Compare commits
No commits in common. "main" and "flagd-proxy/v0.6.3" have entirely different histories.
main
...
flagd-prox
|
@ -15,6 +15,9 @@ on:
|
|||
- "README.md"
|
||||
- "docs/**"
|
||||
|
||||
env:
|
||||
GO_VERSION: '~1.21'
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
|
@ -27,7 +30,7 @@ jobs:
|
|||
- name: Setup go
|
||||
uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5
|
||||
with:
|
||||
go-version-file: 'flagd/go.mod'
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- run: make workspace-init
|
||||
- run: make lint
|
||||
|
||||
|
@ -39,7 +42,7 @@ jobs:
|
|||
- name: Setup go
|
||||
uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5
|
||||
with:
|
||||
go-version-file: 'flagd/go.mod'
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- run: make workspace-init
|
||||
- run: make generate-docs
|
||||
- name: Check no diff
|
||||
|
@ -57,7 +60,7 @@ jobs:
|
|||
- name: Setup go
|
||||
uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5
|
||||
with:
|
||||
go-version-file: 'flagd/go.mod'
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- run: make workspace-init
|
||||
- run: make test
|
||||
- name: Upload coverage to Codecov
|
||||
|
@ -75,7 +78,7 @@ jobs:
|
|||
- name: Setup go
|
||||
uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5
|
||||
with:
|
||||
go-version-file: 'flagd/go.mod'
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@master
|
||||
|
@ -95,15 +98,13 @@ jobs:
|
|||
tags: flagd-local:test
|
||||
|
||||
- name: Run Trivy vulnerability scanner
|
||||
uses: aquasecurity/trivy-action@0.28.0
|
||||
uses: aquasecurity/trivy-action@master
|
||||
with:
|
||||
input: ${{ github.workspace }}/flagd-local.tar
|
||||
format: "sarif"
|
||||
input: /github/workspace/flagd-local.tar
|
||||
format: "template"
|
||||
template: "@/contrib/sarif.tpl"
|
||||
output: "trivy-results.sarif"
|
||||
severity: "CRITICAL,HIGH"
|
||||
env:
|
||||
# use an alternative trivvy db to avoid rate limits
|
||||
TRIVY_DB_REPOSITORY: public.ecr.aws/aquasecurity/trivy-db:2,ghcr.io/aquasecurity/trivy-db:2
|
||||
|
||||
- name: Upload Trivy scan results to GitHub Security tab
|
||||
uses: github/codeql-action/upload-sarif@e8893c57a1f3a2b659b6b55564fdfdbbd2982911 # v3
|
||||
|
@ -122,15 +123,7 @@ jobs:
|
|||
- name: Setup go
|
||||
uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5
|
||||
with:
|
||||
go-version-file: 'flagd/go.mod'
|
||||
|
||||
- name: Install envoy
|
||||
run: |
|
||||
wget -O- https://apt.envoyproxy.io/signing.key | sudo gpg --dearmor -o /etc/apt/keyrings/envoy-keyring.gpg
|
||||
echo "deb [signed-by=/etc/apt/keyrings/envoy-keyring.gpg] https://apt.envoyproxy.io jammy main" | sudo tee /etc/apt/sources.list.d/envoy.list
|
||||
sudo apt-get update
|
||||
sudo apt-get install envoy
|
||||
envoy --version
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
|
||||
- name: Workspace init
|
||||
run: make workspace-init
|
||||
|
@ -147,9 +140,5 @@ jobs:
|
|||
-f file:${{ github.workspace }}/test-harness/flags/zero-flags.json \
|
||||
-f file:${{ github.workspace }}/test-harness/flags/edge-case-flags.json &
|
||||
|
||||
- name: Run envoy proxy in background
|
||||
run: |
|
||||
envoy -c ./test/integration/config/envoy.yaml &
|
||||
|
||||
- name: Run evaluation test suite
|
||||
run: go clean -testcache && go test -cover ./test/integration
|
||||
|
|
|
@ -63,7 +63,6 @@ jobs:
|
|||
container-release:
|
||||
name: Build and push containers to GHCR
|
||||
needs: release-please
|
||||
environment: publish
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ needs.release-please.outputs.items_to_publish != '' && toJson(fromJson(needs.release-please.outputs.items_to_publish)) != '[]' }}
|
||||
strategy:
|
||||
|
@ -109,8 +108,6 @@ jobs:
|
|||
context: .
|
||||
file: ./${{ matrix.path }}/build.Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
provenance: mode=max
|
||||
sbom: true
|
||||
push: true
|
||||
tags: |
|
||||
${{ env.REGISTRY }}/${{ env.REPO_OWNER }}/${{ matrix.path }}:latest
|
||||
|
@ -131,12 +128,24 @@ jobs:
|
|||
COSIGN_PRIVATE_KEY: ${{secrets.COSIGN_PRIVATE_KEY}}
|
||||
COSIGN_PASSWORD: ${{secrets.COSIGN_PASSWORD}}
|
||||
|
||||
- name: Generate image SBOM file name
|
||||
id: image-sbom-file-gen
|
||||
run: echo "IMG_SBOM_FILE=${{ format('{0}-{1}-sbom.spdx', matrix.path, env.VERSION) }}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: SBOM for latest image
|
||||
uses: anchore/sbom-action@b6a39da80722a2cb0ef5d197531764a89b5d48c3 # v0
|
||||
with:
|
||||
image: ${{ env.REGISTRY }}/${{ env.REPO_OWNER }}/${{ matrix.path }}:${{ env.VERSION }}
|
||||
artifact-name: ${{ steps.image-sbom-file-gen.outputs.IMG_SBOM_FILE }}
|
||||
output-file: ${{ steps.image-sbom-file-gen.outputs.IMG_SBOM_FILE }}
|
||||
|
||||
- name: Bundle release assets
|
||||
uses: softprops/action-gh-release@de2c0eb89ae2a093876385947365aca7b0e5f844 # v1
|
||||
with:
|
||||
tag_name: ${{ env.TAG }}
|
||||
files: |
|
||||
${{ env.PUBLIC_KEY_FILE }}
|
||||
${{ steps.image-sbom-file-gen.outputs.IMG_SBOM_FILE }}
|
||||
|
||||
release-go-binaries:
|
||||
name: Create and publish binaries to GitHub
|
||||
|
@ -202,6 +211,19 @@ jobs:
|
|||
run: |
|
||||
env CGO_ENABLED=0 GOOS=windows GOARCH=386 go build ${{ env.BUILD_ARGS }} -o ./${{ matrix.path }}_windows_i386 ./${{ matrix.path }}/main.go
|
||||
zip -r ${{ matrix.path }}_${{ env.VERSION_NO_PREFIX }}_Windows_i386.zip ./${{ matrix.path }}_windows_i386 ./LICENSE ./CHANGELOG.md ./README.md ./sbom.xml
|
||||
# Bundle licenses
|
||||
- name: Install go-licenses
|
||||
run: go install github.com/google/go-licenses@latest
|
||||
- name: Build license extraction locations
|
||||
id: license-files
|
||||
run: |
|
||||
echo "LICENSE_FOLDER=${{ format('{0}-third-party-license', matrix.path) }}" >> $GITHUB_OUTPUT
|
||||
echo "LICENSE_ERROR_FILE=${{ format('{0}-license-errors.txt', matrix.path) }}" >> $GITHUB_OUTPUT
|
||||
- name: Run go-licenses for module ${{ matrix.path }}
|
||||
run: go-licenses save ./${{ matrix.path }} --save_path=./${{ steps.license-files.outputs.LICENSE_FOLDER }} --force --logtostderr=false 2> ./${{ steps.license-files.outputs.LICENSE_ERROR_FILE }}
|
||||
continue-on-error: true # tool set stderr which can be ignored and referred through error artefact
|
||||
- name: Bundle license extracts
|
||||
run: tar czf ./${{ steps.license-files.outputs.LICENSE_FOLDER }}.tar.gz ./${{ steps.license-files.outputs.LICENSE_FOLDER }}
|
||||
# Bundle release artifacts
|
||||
- name: Bundle release assets
|
||||
uses: softprops/action-gh-release@de2c0eb89ae2a093876385947365aca7b0e5f844 # v1
|
||||
|
@ -211,6 +233,7 @@ jobs:
|
|||
./sbom.xml
|
||||
./*.tar.gz
|
||||
./*.zip
|
||||
./${{ steps.license-files.outputs.LICENSE_ERROR_FILE }}
|
||||
homebrew:
|
||||
name: Bump homebrew-core formula
|
||||
needs: release-please
|
||||
|
|
|
@ -17,10 +17,4 @@ node_modules/
|
|||
|
||||
# built documentation
|
||||
site
|
||||
.cache/
|
||||
|
||||
# coverage results
|
||||
*coverage.out
|
||||
|
||||
# benchmark results
|
||||
benchmark.txt
|
||||
.cache/
|
|
@ -6,4 +6,4 @@
|
|||
url = https://github.com/open-feature/spec.git
|
||||
[submodule "schemas"]
|
||||
path = schemas
|
||||
url = https://github.com/open-feature/flagd-schemas.git
|
||||
url = https://github.com/open-feature/schemas.git
|
||||
|
|
|
@ -1,30 +0,0 @@
|
|||
run:
|
||||
timeout: 3m
|
||||
linters-settings:
|
||||
funlen:
|
||||
statements: 50
|
||||
golint:
|
||||
min-confidence: 0.6
|
||||
enable-all: true
|
||||
issues:
|
||||
exclude:
|
||||
- pkg/generated
|
||||
exclude-rules:
|
||||
- path: _test.go
|
||||
linters:
|
||||
- funlen
|
||||
- maligned
|
||||
- noctx
|
||||
- scopelint
|
||||
- bodyclose
|
||||
- lll
|
||||
- goconst
|
||||
- gocognit
|
||||
- gocyclo
|
||||
- dupl
|
||||
- staticcheck
|
||||
exclude-dirs:
|
||||
- (^|/)bin($|/)
|
||||
- (^|/)examples($|/)
|
||||
- (^|/)schemas($|/)
|
||||
- (^|/)test-harness($|/)
|
108
.golangci.yml
108
.golangci.yml
|
@ -1,43 +1,67 @@
|
|||
version: "2"
|
||||
run:
|
||||
skip-dirs:
|
||||
- (^|/)bin($|/)
|
||||
- (^|/)examples($|/)
|
||||
- (^|/)schemas($|/)
|
||||
- (^|/)test-harness($|/)
|
||||
linters:
|
||||
settings:
|
||||
funlen:
|
||||
statements: 50
|
||||
exclusions:
|
||||
generated: lax
|
||||
presets:
|
||||
- comments
|
||||
- common-false-positives
|
||||
- legacy
|
||||
- std-error-handling
|
||||
rules:
|
||||
- linters:
|
||||
- bodyclose
|
||||
- dupl
|
||||
- funlen
|
||||
- gocognit
|
||||
- goconst
|
||||
- gocyclo
|
||||
- lll
|
||||
- maligned
|
||||
- noctx
|
||||
- scopelint
|
||||
- staticcheck
|
||||
path: _test.go
|
||||
- path: (.+)\.go$
|
||||
text: pkg/generated
|
||||
paths:
|
||||
- (^|/)bin($|/)
|
||||
- (^|/)examples($|/)
|
||||
- (^|/)schemas($|/)
|
||||
- (^|/)test-harness($|/)
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
formatters:
|
||||
exclusions:
|
||||
generated: lax
|
||||
paths:
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
enable:
|
||||
- asciicheck
|
||||
- asasalint
|
||||
- bidichk
|
||||
- bodyclose
|
||||
- contextcheck
|
||||
- dogsled
|
||||
- dupl
|
||||
- dupword
|
||||
- durationcheck
|
||||
- errchkjson
|
||||
- exhaustive
|
||||
- funlen
|
||||
- gci
|
||||
- goconst
|
||||
- gocritic
|
||||
- gocyclo
|
||||
- interfacebloat
|
||||
- gosec
|
||||
- lll
|
||||
- misspell
|
||||
- nakedret
|
||||
- nilerr
|
||||
- nilnil
|
||||
- noctx
|
||||
- nosprintfhostport
|
||||
- prealloc
|
||||
- promlinter
|
||||
- revive
|
||||
- rowserrcheck
|
||||
- exportloopref
|
||||
- stylecheck
|
||||
- unconvert
|
||||
- unparam
|
||||
- whitespace
|
||||
- wrapcheck
|
||||
- gofumpt
|
||||
- tenv
|
||||
linters-settings:
|
||||
funlen:
|
||||
statements: 50
|
||||
golint:
|
||||
min-confidence: 0.6
|
||||
issues:
|
||||
exclude:
|
||||
- pkg/generated
|
||||
exclude-rules:
|
||||
- path: _test.go
|
||||
linters:
|
||||
- funlen
|
||||
- maligned
|
||||
- noctx
|
||||
- scopelint
|
||||
- bodyclose
|
||||
- lll
|
||||
- goconst
|
||||
- gocognit
|
||||
- gocyclo
|
||||
- dupl
|
||||
- staticcheck
|
||||
|
|
|
@ -13,9 +13,6 @@ config:
|
|||
max-one-sentence-per-line: true
|
||||
code-block-style: false # not compatible with mkdocs "details" panes
|
||||
no-alt-text: false
|
||||
descriptive-link-text: false
|
||||
MD007:
|
||||
indent: 4
|
||||
|
||||
ignores:
|
||||
- "**/CHANGELOG.md"
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
{
|
||||
"flagd": "0.12.9",
|
||||
"flagd-proxy": "0.8.0",
|
||||
"core": "0.12.1"
|
||||
"flagd": "0.11.0",
|
||||
"flagd-proxy": "0.6.3",
|
||||
"core": "0.10.0"
|
||||
}
|
|
@ -3,4 +3,4 @@
|
|||
#
|
||||
# Managed by Peribolos: https://github.com/open-feature/community/blob/main/config/open-feature/cloud-native/workgroup.yaml
|
||||
#
|
||||
* @open-feature/flagd-maintainers @open-feature/maintainers
|
||||
* @open-feature/cloud-native-maintainers
|
||||
|
|
|
@ -8,24 +8,6 @@ TLDR: be respectful.
|
|||
Any contributions are expected to include unit tests.
|
||||
These can be validated with `make test` or the automated github workflow will run them on PR creation.
|
||||
|
||||
## Development
|
||||
|
||||
### Prerequisites
|
||||
|
||||
You'll need:
|
||||
|
||||
- Go
|
||||
- make
|
||||
- docker
|
||||
|
||||
You'll want:
|
||||
|
||||
- curl (for calling HTTP endpoints)
|
||||
- [grpcurl](https://github.com/fullstorydev/grpcurl) (for making gRPC calls)
|
||||
- jq (for pretty printing responses)
|
||||
|
||||
### Workspace Initialization
|
||||
|
||||
This project uses a go workspace, to setup the project run
|
||||
|
||||
```shell
|
||||
|
@ -40,70 +22,6 @@ The project uses remote buf packages, changing the remote generation source will
|
|||
export GOPRIVATE=buf.build/gen/go
|
||||
```
|
||||
|
||||
### Manual testing
|
||||
|
||||
flagd has a number of interfaces (you can read more about them at [flagd.dev](https://flagd.dev/)) which can be used to evaluate flags, or deliver flag configurations so that they can be evaluated by _in-process_ providers.
|
||||
|
||||
You can manually test this functionality by starting flagd (from the flagd/ directory) with `go run main.go start -f file:../config/samples/example_flags.flagd.json`.
|
||||
|
||||
NOTE: you will need `go, curl`
|
||||
|
||||
#### Remote single flag evaluation via HTTP1.1/Connect
|
||||
|
||||
```sh
|
||||
# evaluates a single boolean flag
|
||||
curl -X POST -d '{"flagKey":"myBoolFlag","context":{}}' -H "Content-Type: application/json" "http://localhost:8013/flagd.evaluation.v1.Service/ResolveBoolean" | jq
|
||||
```
|
||||
|
||||
#### Remote single flag evaluation via HTTP1.1/OFREP
|
||||
|
||||
```sh
|
||||
# evaluates a single boolean flag
|
||||
curl -X POST -d '{"context":{}}' 'http://localhost:8016/ofrep/v1/evaluate/flags/myBoolFlag' | jq
|
||||
```
|
||||
|
||||
#### Remote single flag evaluation via gRPC
|
||||
|
||||
```sh
|
||||
# evaluates a single boolean flag
|
||||
grpcurl -import-path schemas/protobuf/flagd/evaluation/v1/ -proto evaluation.proto -plaintext -d '{"flagKey":"myBoolFlag"}' localhost:8013 flagd.evaluation.v1.Service/ResolveBoolean | jq
|
||||
```
|
||||
|
||||
#### Remote bulk evaluation via HTTP1.1/OFREP
|
||||
|
||||
```sh
|
||||
# evaluates flags in bulk
|
||||
curl -X POST -d '{"context":{}}' 'http://localhost:8016/ofrep/v1/evaluate/flags' | jq
|
||||
```
|
||||
|
||||
#### Remote bulk evaluation via gRPC
|
||||
|
||||
```sh
|
||||
# evaluates flags in bulk
|
||||
grpcurl -import-path schemas/protobuf/flagd/evaluation/v1/ -proto evaluation.proto -plaintext -d '{}' localhost:8013 flagd.evaluation.v1.Service/ResolveAll | jq
|
||||
```
|
||||
|
||||
#### Remote event streaming via gRPC
|
||||
|
||||
```sh
|
||||
# notifies of flag changes (but does not evaluate)
|
||||
grpcurl -import-path schemas/protobuf/flagd/evaluation/v1/ -proto evaluation.proto -plaintext -d '{}' localhost:8013 flagd.evaluation.v1.Service/EventStream
|
||||
```
|
||||
|
||||
#### Flag configuration fetch via gRPC
|
||||
|
||||
```sh
|
||||
# sends back a representation of all flags
|
||||
grpcurl -import-path schemas/protobuf/flagd/sync/v1/ -proto sync.proto -plaintext localhost:8015 flagd.sync.v1.FlagSyncService/FetchAllFlags | jq
|
||||
```
|
||||
|
||||
#### Flag synchronization stream via gRPC
|
||||
|
||||
```sh
|
||||
# will open a persistent stream which sends flag changes when the watched source is modified
|
||||
grpcurl -import-path schemas/protobuf/flagd/sync/v1/ -proto sync.proto -plaintext localhost:8015 flagd.sync.v1.FlagSyncService/SyncFlags | jq
|
||||
```
|
||||
|
||||
## DCO Sign-Off
|
||||
|
||||
A DCO (Developer Certificate of Origin) sign-off is a line placed at the end of
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
FROM squidfunk/mkdocs-material:9.5
|
||||
RUN pip install mkdocs-include-markdown-plugin
|
||||
RUN pip install mkdocs-include-markdown-plugin
|
24
Makefile
24
Makefile
|
@ -47,19 +47,12 @@ test-flagd:
|
|||
go test -race -covermode=atomic -cover -short ./flagd/pkg/... -coverprofile=flagd-coverage.out
|
||||
test-flagd-proxy:
|
||||
go test -race -covermode=atomic -cover -short ./flagd-proxy/pkg/... -coverprofile=flagd-proxy-coverage.out
|
||||
flagd-benchmark-test:
|
||||
go test -bench=Bench -short -benchtime=5s -benchmem ./core/... | tee benchmark.txt
|
||||
flagd-integration-test-harness:
|
||||
# target used to start a locally built flagd with the e2e flags
|
||||
cd flagd; go run main.go start -f file:../test-harness/flags/testing-flags.json -f file:../test-harness/flags/custom-ops.json -f file:../test-harness/flags/evaluator-refs.json -f file:../test-harness/flags/zero-flags.json -f file:../test-harness/flags/edge-case-flags.json
|
||||
flagd-integration-test: # dependent on flagd-e2e-test-harness if not running in github actions
|
||||
go test -count=1 -cover ./test/integration $(ARGS)
|
||||
flagd-integration-test: # dependent on ./bin/flagd start -f file:test-harness/flags/testing-flags.json -f file:test-harness/flags/custom-ops.json -f file:test-harness/flags/evaluator-refs.json -f file:test-harness/flags/zero-flags.json
|
||||
go test -cover ./test/integration $(ARGS)
|
||||
run: # default to flagd
|
||||
make run-flagd
|
||||
run-flagd:
|
||||
cd flagd; go run main.go start -f file:../config/samples/example_flags.flagd.json
|
||||
run-flagd-selector-demo:
|
||||
cd flagd; go run main.go start -f file:../config/samples/example_flags.flagd.json -f file:../config/samples/example_flags.flagd.2.json
|
||||
cd flagd; go run main.go start -f file:../config/samples/example_flags.flagd.json
|
||||
install:
|
||||
cp systemd/flagd.service /etc/systemd/system/flagd.service
|
||||
mkdir -p /etc/flagd
|
||||
|
@ -73,11 +66,8 @@ uninstall:
|
|||
rm /etc/systemd/system/flagd.service
|
||||
rm -f $(DESTDIR)$(PREFIX)/bin/flagd
|
||||
lint:
|
||||
go install -v github.com/golangci/golangci-lint/v2/cmd/golangci-lint@v2.2.1
|
||||
$(foreach module, $(ALL_GO_MOD_DIRS), ${GOPATH}/bin/golangci-lint run $(module)/...;)
|
||||
lint-fix:
|
||||
go install -v github.com/golangci/golangci-lint/v2/cmd/golangci-lint@v2.2.1
|
||||
$(foreach module, $(ALL_GO_MOD_DIRS), ${GOPATH}/bin/golangci-lint run --fix $(module)/...;)
|
||||
go install -v github.com/golangci/golangci-lint/cmd/golangci-lint@v1.55.2
|
||||
$(foreach module, $(ALL_GO_MOD_DIRS), ${GOPATH}/bin/golangci-lint run --deadline=5m --timeout=5m $(module)/... || exit;)
|
||||
install-mockgen:
|
||||
go install go.uber.org/mock/mockgen@v0.4.0
|
||||
mockgen: install-mockgen
|
||||
|
@ -144,8 +134,8 @@ update-public-schema: pull-schemas-submodule
|
|||
|
||||
.PHONY: run-web-docs
|
||||
run-web-docs: generate-docs generate-proto-docs
|
||||
docker build -t flag-docs:latest . --load \
|
||||
&& docker run --rm -it -p 8000:8000 -v ${PWD}:/docs flag-docs:latest
|
||||
docker build -t squidfunk/mkdocs-material . \
|
||||
&& docker run --rm -it -p 8000:8000 -v ${PWD}:/docs squidfunk/mkdocs-material
|
||||
|
||||
# Run the playground app in dev mode
|
||||
# See the readme in the playground-app folder for more details
|
||||
|
|
|
@ -1,72 +0,0 @@
|
|||
PASS
|
||||
ok github.com/open-feature/flagd/core/pkg/certreloader 15.986s
|
||||
goos: linux
|
||||
goarch: amd64
|
||||
pkg: github.com/open-feature/flagd/core/pkg/evaluator
|
||||
cpu: 11th Gen Intel(R) Core(TM) i9-11950H @ 2.60GHz
|
||||
BenchmarkFractionalEvaluation/test_a@faas.com-16 423930 13316 ns/op 7229 B/op 135 allocs/op
|
||||
BenchmarkFractionalEvaluation/test_b@faas.com-16 469594 13677 ns/op 7229 B/op 135 allocs/op
|
||||
BenchmarkFractionalEvaluation/test_c@faas.com-16 569103 13286 ns/op 7229 B/op 135 allocs/op
|
||||
BenchmarkFractionalEvaluation/test_d@faas.com-16 412386 13023 ns/op 7229 B/op 135 allocs/op
|
||||
BenchmarkResolveBooleanValue/test_staticBoolFlag-16 3106903 1792 ns/op 1008 B/op 11 allocs/op
|
||||
BenchmarkResolveBooleanValue/test_targetingBoolFlag-16 448164 11250 ns/op 6065 B/op 87 allocs/op
|
||||
BenchmarkResolveBooleanValue/test_staticObjectFlag-16 3958750 1476 ns/op 1008 B/op 11 allocs/op
|
||||
BenchmarkResolveBooleanValue/test_missingFlag-16 5331808 1353 ns/op 784 B/op 12 allocs/op
|
||||
BenchmarkResolveBooleanValue/test_disabledFlag-16 4530751 1301 ns/op 1072 B/op 13 allocs/op
|
||||
BenchmarkResolveStringValue/test_staticStringFlag-16 4583056 1525 ns/op 1040 B/op 13 allocs/op
|
||||
BenchmarkResolveStringValue/test_targetingStringFlag-16 839954 10388 ns/op 6097 B/op 89 allocs/op
|
||||
BenchmarkResolveStringValue/test_staticObjectFlag-16 4252830 1677 ns/op 1008 B/op 11 allocs/op
|
||||
BenchmarkResolveStringValue/test_missingFlag-16 3743324 1495 ns/op 784 B/op 12 allocs/op
|
||||
BenchmarkResolveStringValue/test_disabledFlag-16 3495699 1709 ns/op 1072 B/op 13 allocs/op
|
||||
BenchmarkResolveFloatValue/test:_staticFloatFlag-16 4382868 1511 ns/op 1024 B/op 13 allocs/op
|
||||
BenchmarkResolveFloatValue/test:_targetingFloatFlag-16 867987 10344 ns/op 6081 B/op 89 allocs/op
|
||||
BenchmarkResolveFloatValue/test:_staticObjectFlag-16 3913120 1695 ns/op 1008 B/op 11 allocs/op
|
||||
BenchmarkResolveFloatValue/test:_missingFlag-16 3910468 1349 ns/op 784 B/op 12 allocs/op
|
||||
BenchmarkResolveFloatValue/test:_disabledFlag-16 3642919 1666 ns/op 1072 B/op 13 allocs/op
|
||||
BenchmarkResolveIntValue/test_staticIntFlag-16 4077288 1349 ns/op 1008 B/op 11 allocs/op
|
||||
BenchmarkResolveIntValue/test_targetingNumberFlag-16 922383 7601 ns/op 6065 B/op 87 allocs/op
|
||||
BenchmarkResolveIntValue/test_staticObjectFlag-16 4995128 1229 ns/op 1008 B/op 11 allocs/op
|
||||
BenchmarkResolveIntValue/test_missingFlag-16 5574153 1274 ns/op 768 B/op 12 allocs/op
|
||||
BenchmarkResolveIntValue/test_disabledFlag-16 3633708 1734 ns/op 1072 B/op 13 allocs/op
|
||||
BenchmarkResolveObjectValue/test_staticObjectFlag-16 1624102 4559 ns/op 2243 B/op 37 allocs/op
|
||||
BenchmarkResolveObjectValue/test_targetingObjectFlag-16 443880 11995 ns/op 7283 B/op 109 allocs/op
|
||||
BenchmarkResolveObjectValue/test_staticBoolFlag-16 3462445 1665 ns/op 1008 B/op 11 allocs/op
|
||||
BenchmarkResolveObjectValue/test_missingFlag-16 4207567 1458 ns/op 784 B/op 12 allocs/op
|
||||
BenchmarkResolveObjectValue/test_disabledFlag-16 3407262 1848 ns/op 1072 B/op 13 allocs/op
|
||||
PASS
|
||||
ok github.com/open-feature/flagd/core/pkg/evaluator 239.506s
|
||||
? github.com/open-feature/flagd/core/pkg/evaluator/mock [no test files]
|
||||
PASS
|
||||
ok github.com/open-feature/flagd/core/pkg/logger 0.003s
|
||||
? github.com/open-feature/flagd/core/pkg/model [no test files]
|
||||
? github.com/open-feature/flagd/core/pkg/service [no test files]
|
||||
PASS
|
||||
ok github.com/open-feature/flagd/core/pkg/service/ofrep 0.002s
|
||||
PASS
|
||||
ok github.com/open-feature/flagd/core/pkg/store 0.003s
|
||||
? github.com/open-feature/flagd/core/pkg/sync [no test files]
|
||||
PASS
|
||||
ok github.com/open-feature/flagd/core/pkg/sync/blob 0.016s
|
||||
PASS
|
||||
ok github.com/open-feature/flagd/core/pkg/sync/builder 0.018s
|
||||
? github.com/open-feature/flagd/core/pkg/sync/builder/mock [no test files]
|
||||
PASS
|
||||
ok github.com/open-feature/flagd/core/pkg/sync/file 1.007s
|
||||
PASS
|
||||
ok github.com/open-feature/flagd/core/pkg/sync/grpc 8.011s
|
||||
PASS
|
||||
ok github.com/open-feature/flagd/core/pkg/sync/grpc/credentials 0.008s
|
||||
? github.com/open-feature/flagd/core/pkg/sync/grpc/credentials/mock [no test files]
|
||||
? github.com/open-feature/flagd/core/pkg/sync/grpc/mock [no test files]
|
||||
PASS
|
||||
ok github.com/open-feature/flagd/core/pkg/sync/grpc/nameresolvers 0.002s
|
||||
PASS
|
||||
ok github.com/open-feature/flagd/core/pkg/sync/http 4.006s
|
||||
? github.com/open-feature/flagd/core/pkg/sync/http/mock [no test files]
|
||||
PASS
|
||||
ok github.com/open-feature/flagd/core/pkg/sync/kubernetes 0.016s
|
||||
? github.com/open-feature/flagd/core/pkg/sync/testing [no test files]
|
||||
PASS
|
||||
ok github.com/open-feature/flagd/core/pkg/telemetry 0.016s
|
||||
PASS
|
||||
ok github.com/open-feature/flagd/core/pkg/utils 0.002s
|
|
@ -1,17 +0,0 @@
|
|||
{
|
||||
"$schema": "https://flagd.dev/schema/v0/flags.json",
|
||||
"metadata": {
|
||||
"flagSetId": "other",
|
||||
"version": "v1"
|
||||
},
|
||||
"flags": {
|
||||
"myStringFlag": {
|
||||
"state": "ENABLED",
|
||||
"variants": {
|
||||
"dupe1": "dupe1",
|
||||
"dupe2": "dupe2"
|
||||
},
|
||||
"defaultVariant": "dupe1"
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,9 +1,5 @@
|
|||
{
|
||||
"$schema": "https://flagd.dev/schema/v0/flags.json",
|
||||
"metadata": {
|
||||
"flagSetId": "example",
|
||||
"version": "v1"
|
||||
},
|
||||
"flags": {
|
||||
"myBoolFlag": {
|
||||
"state": "ENABLED",
|
||||
|
@ -11,10 +7,7 @@
|
|||
"on": true,
|
||||
"off": false
|
||||
},
|
||||
"defaultVariant": "on",
|
||||
"metadata": {
|
||||
"version": "v2"
|
||||
}
|
||||
"defaultVariant": "on"
|
||||
},
|
||||
"myStringFlag": {
|
||||
"state": "ENABLED",
|
||||
|
|
|
@ -1,8 +1,5 @@
|
|||
{
|
||||
"$schema": "https://flagd.dev/schema/v0/flags.json",
|
||||
"metadata": {
|
||||
"version": "v2"
|
||||
},
|
||||
"flags": {
|
||||
"myBoolFlag": {
|
||||
"state": "ENABLED",
|
||||
|
|
|
@ -1,295 +1,5 @@
|
|||
# Changelog
|
||||
|
||||
## [0.12.1](https://github.com/open-feature/flagd/compare/core/v0.12.0...core/v0.12.1) (2025-07-28)
|
||||
|
||||
|
||||
### 🧹 Chore
|
||||
|
||||
* add back file-delete test ([#1694](https://github.com/open-feature/flagd/issues/1694)) ([750aa17](https://github.com/open-feature/flagd/commit/750aa176b5a8dd24a9daaff985ff6efeb084c758))
|
||||
* fix benchmark ([#1698](https://github.com/open-feature/flagd/issues/1698)) ([5e2d7d7](https://github.com/open-feature/flagd/commit/5e2d7d7176ba05e667cd92acd7decb531a8de2f6))
|
||||
|
||||
## [0.12.0](https://github.com/open-feature/flagd/compare/core/v0.11.8...core/v0.12.0) (2025-07-21)
|
||||
|
||||
|
||||
### ⚠ BREAKING CHANGES
|
||||
|
||||
* remove sync.Type ([#1691](https://github.com/open-feature/flagd/issues/1691))
|
||||
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
* update to latest otel semconv ([#1668](https://github.com/open-feature/flagd/issues/1668)) ([81855d7](https://github.com/open-feature/flagd/commit/81855d76f94a09251a19a05f830cc1d11ab6b566))
|
||||
|
||||
|
||||
### ✨ New Features
|
||||
|
||||
* Add support for HTTP eTag header and 304 no change response ([#1645](https://github.com/open-feature/flagd/issues/1645)) ([ea3be4f](https://github.com/open-feature/flagd/commit/ea3be4f9010644132795bb60b36fb7705f901b62))
|
||||
* remove sync.Type ([#1691](https://github.com/open-feature/flagd/issues/1691)) ([ac647e0](https://github.com/open-feature/flagd/commit/ac647e065636071f5bc065a9a084461cea692166))
|
||||
|
||||
## [0.11.8](https://github.com/open-feature/flagd/compare/core/v0.11.7...core/v0.11.8) (2025-07-15)
|
||||
|
||||
|
||||
### 🧹 Chore
|
||||
|
||||
* **deps:** update github.com/open-feature/flagd-schemas digest to 08b4c52 ([#1682](https://github.com/open-feature/flagd/issues/1682)) ([68d04e2](https://github.com/open-feature/flagd/commit/68d04e21e63c63d6054fcd6aebfb864e8b3a597e))
|
||||
|
||||
## [0.11.7](https://github.com/open-feature/flagd/compare/core/v0.11.6...core/v0.11.7) (2025-07-15)
|
||||
|
||||
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
* general err if targeting variant not in variants ([#1680](https://github.com/open-feature/flagd/issues/1680)) ([6cabfc8](https://github.com/open-feature/flagd/commit/6cabfc8ff3bd4ad69699a72724495e84cdec0cc3))
|
||||
|
||||
## [0.11.6](https://github.com/open-feature/flagd/compare/core/v0.11.5...core/v0.11.6) (2025-07-10)
|
||||
|
||||
|
||||
### ✨ New Features
|
||||
|
||||
* add sync_context to SyncFlags ([#1642](https://github.com/open-feature/flagd/issues/1642)) ([07a45d9](https://github.com/open-feature/flagd/commit/07a45d9b2275584fa92ff33cbe5e5c7d7864db38))
|
||||
* allowing null/missing defaultValue ([#1659](https://github.com/open-feature/flagd/issues/1659)) ([3f6b78c](https://github.com/open-feature/flagd/commit/3f6b78c8ccab75e9c07d26741c4b206fd0b722ee))
|
||||
|
||||
## [0.11.5](https://github.com/open-feature/flagd/compare/core/v0.11.4...core/v0.11.5) (2025-06-13)
|
||||
|
||||
|
||||
### ✨ New Features
|
||||
|
||||
* add server-side deadline to sync service ([#1638](https://github.com/open-feature/flagd/issues/1638)) ([b70fa06](https://github.com/open-feature/flagd/commit/b70fa06b66e1fe8a28728441a7ccd28c6fe6a0c6))
|
||||
* updating context using headers ([#1641](https://github.com/open-feature/flagd/issues/1641)) ([ba34815](https://github.com/open-feature/flagd/commit/ba348152b6e7b6bd7473bb11846aac7db316c88e))
|
||||
|
||||
## [0.11.4](https://github.com/open-feature/flagd/compare/core/v0.11.3...core/v0.11.4) (2025-05-28)
|
||||
|
||||
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
* incorrect comparison used for time ([#1608](https://github.com/open-feature/flagd/issues/1608)) ([8c5ac2f](https://github.com/open-feature/flagd/commit/8c5ac2f2c31e092cbe6ddb4d3c1adeeeb04e9ef9))
|
||||
|
||||
|
||||
### 🧹 Chore
|
||||
|
||||
* **deps:** update dependency go to v1.24.1 ([#1559](https://github.com/open-feature/flagd/issues/1559)) ([cd46044](https://github.com/open-feature/flagd/commit/cd4604471bba0a1df67bf87653a38df3caf9d20f))
|
||||
* **security:** upgrade dependency versions ([#1632](https://github.com/open-feature/flagd/issues/1632)) ([761d870](https://github.com/open-feature/flagd/commit/761d870a3c563b8eb1b83ee543b41316c98a1d48))
|
||||
|
||||
|
||||
### 🔄 Refactoring
|
||||
|
||||
* Refactor the cron function in http sync ([#1600](https://github.com/open-feature/flagd/issues/1600)) ([babcacf](https://github.com/open-feature/flagd/commit/babcacfe4dd1244dda954823d8a3ed2019c8752b))
|
||||
* removed hardcoded metric export interval and use otel default ([#1621](https://github.com/open-feature/flagd/issues/1621)) ([81c66eb](https://github.com/open-feature/flagd/commit/81c66ebf2b82fc6874ab325569f52801d5ab8e5e))
|
||||
|
||||
## [0.11.3](https://github.com/open-feature/flagd/compare/core/v0.11.2...core/v0.11.3) (2025-03-25)
|
||||
|
||||
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
* **deps:** update github.com/open-feature/flagd-schemas digest to 9b0ee43 ([#1598](https://github.com/open-feature/flagd/issues/1598)) ([0587ce4](https://github.com/open-feature/flagd/commit/0587ce44e60b643ff6960c1eaf4461f933ea95b7))
|
||||
* **deps:** update github.com/open-feature/flagd-schemas digest to e840a03 ([#1587](https://github.com/open-feature/flagd/issues/1587)) ([9ee0c57](https://github.com/open-feature/flagd/commit/9ee0c573d6dbfa0c4e9b18c9da7313094ea56916))
|
||||
* **deps:** update module connectrpc.com/otelconnect to v0.7.2 ([#1574](https://github.com/open-feature/flagd/issues/1574)) ([6094dce](https://github.com/open-feature/flagd/commit/6094dce5c0472f593b79d6d40e080f9b8d6503e5))
|
||||
* **deps:** update module github.com/google/go-cmp to v0.7.0 ([#1569](https://github.com/open-feature/flagd/issues/1569)) ([6e9dbd2](https://github.com/open-feature/flagd/commit/6e9dbd2dbf8365f839e353f53cb638847a1f05d6))
|
||||
* **deps:** update module github.com/prometheus/client_golang to v1.21.1 ([#1576](https://github.com/open-feature/flagd/issues/1576)) ([cd95193](https://github.com/open-feature/flagd/commit/cd95193f71fd465ffd1b177fa492aa84d8414a87))
|
||||
* **deps:** update module google.golang.org/grpc to v1.71.0 ([#1578](https://github.com/open-feature/flagd/issues/1578)) ([5c2c64f](https://github.com/open-feature/flagd/commit/5c2c64f878b8603dd37cbfd79b0e1588e4b5a3c6))
|
||||
* incorrect metadata returned per source ([#1599](https://github.com/open-feature/flagd/issues/1599)) ([b333e11](https://github.com/open-feature/flagd/commit/b333e11ecfe54f72c44ee61b3dcb1f2a487c94d4))
|
||||
|
||||
|
||||
### ✨ New Features
|
||||
|
||||
* accept version numbers which are not strings ([#1589](https://github.com/open-feature/flagd/issues/1589)) ([6a13796](https://github.com/open-feature/flagd/commit/6a137967a258e799cbac9e3bb3927a07412c2a7b))
|
||||
|
||||
## [0.11.2](https://github.com/open-feature/flagd/compare/core/v0.11.1...core/v0.11.2) (2025-02-21)
|
||||
|
||||
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
* **deps:** update golang.org/x/exp digest to 939b2ce ([#1555](https://github.com/open-feature/flagd/issues/1555)) ([23afa9c](https://github.com/open-feature/flagd/commit/23afa9c18c27885bdae0f5c4ebdc30e780e9da71))
|
||||
* **deps:** update golang.org/x/exp digest to f9890c6 ([#1551](https://github.com/open-feature/flagd/issues/1551)) ([02c4b42](https://github.com/open-feature/flagd/commit/02c4b4250131ca819c85dcf10c2d78e0c218469f))
|
||||
* **deps:** update module buf.build/gen/go/open-feature/flagd/protocolbuffers/go to v1.36.5-20250127221518-be6d1143b690.1 ([#1549](https://github.com/open-feature/flagd/issues/1549)) ([d3eb44e](https://github.com/open-feature/flagd/commit/d3eb44ed45a54bd9152b7477cce17be90016683c))
|
||||
* **deps:** update module github.com/diegoholiveira/jsonlogic/v3 to v3.7.4 ([#1556](https://github.com/open-feature/flagd/issues/1556)) ([0dfa799](https://github.com/open-feature/flagd/commit/0dfa79956695849f3a703554525759093931a01d))
|
||||
* **deps:** update module github.com/prometheus/client_golang to v1.21.0 ([#1568](https://github.com/open-feature/flagd/issues/1568)) ([a3d4162](https://github.com/open-feature/flagd/commit/a3d41625a2b79452c0732af29d0b4f320e74fe8b))
|
||||
* **deps:** update module golang.org/x/crypto to v0.33.0 ([#1552](https://github.com/open-feature/flagd/issues/1552)) ([7cef153](https://github.com/open-feature/flagd/commit/7cef153a275a4fac5099f5a52013dcd227a79bb3))
|
||||
* **deps:** update module golang.org/x/mod to v0.23.0 ([#1544](https://github.com/open-feature/flagd/issues/1544)) ([6fe7bd2](https://github.com/open-feature/flagd/commit/6fe7bd2a3e82dfc81068d9d95d8c3a4acc16456c))
|
||||
|
||||
|
||||
### ✨ New Features
|
||||
|
||||
* Adding gRPC dial option override to grpc_sync.go ([#1563](https://github.com/open-feature/flagd/issues/1563)) ([1a97ca5](https://github.com/open-feature/flagd/commit/1a97ca5f81582e6d1f139a61e0e49007ad173d3f))
|
||||
|
||||
## [0.11.1](https://github.com/open-feature/flagd/compare/core/v0.11.0...core/v0.11.1) (2025-02-04)
|
||||
|
||||
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
* **deps:** update module golang.org/x/sync to v0.11.0 ([#1543](https://github.com/open-feature/flagd/issues/1543)) ([7d6c0dc](https://github.com/open-feature/flagd/commit/7d6c0dc6e6e6955af1e5225807deeb2b6797900b))
|
||||
|
||||
## [0.11.0](https://github.com/open-feature/flagd/compare/core/v0.10.8...core/v0.11.0) (2025-01-31)
|
||||
|
||||
|
||||
### ⚠ BREAKING CHANGES
|
||||
|
||||
* flagSetMetadata in OFREP/ResolveAll, core refactors ([#1540](https://github.com/open-feature/flagd/issues/1540))
|
||||
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
* **deps:** update github.com/open-feature/flagd-schemas digest to bb76343 ([#1534](https://github.com/open-feature/flagd/issues/1534)) ([8303353](https://github.com/open-feature/flagd/commit/8303353a1b503ef34b8e46d9bf77ce53c067ef3b))
|
||||
* **deps:** update golang.org/x/exp digest to 3edf0e9 ([#1538](https://github.com/open-feature/flagd/issues/1538)) ([7a06567](https://github.com/open-feature/flagd/commit/7a0656713a8c2ac3d456a3a300fe137debee0edd))
|
||||
* **deps:** update golang.org/x/exp digest to e0ece0d ([#1539](https://github.com/open-feature/flagd/issues/1539)) ([4281c6e](https://github.com/open-feature/flagd/commit/4281c6e80b233a162436fea3640bf5d061d40b96))
|
||||
* **deps:** update module buf.build/gen/go/open-feature/flagd/grpc/go to v1.5.1-20250127221518-be6d1143b690.2 ([#1536](https://github.com/open-feature/flagd/issues/1536)) ([e23060f](https://github.com/open-feature/flagd/commit/e23060f24b2a714ae748e6b37d0d06b7caa1c95c))
|
||||
* **deps:** update module buf.build/gen/go/open-feature/flagd/protocolbuffers/go to v1.36.4-20241220192239-696330adaff0.1 ([#1529](https://github.com/open-feature/flagd/issues/1529)) ([8881a80](https://github.com/open-feature/flagd/commit/8881a804b4055da0127a16b8fc57022d24906e1b))
|
||||
* **deps:** update module buf.build/gen/go/open-feature/flagd/protocolbuffers/go to v1.36.4-20250127221518-be6d1143b690.1 ([#1537](https://github.com/open-feature/flagd/issues/1537)) ([f74207b](https://github.com/open-feature/flagd/commit/f74207bc13b75bae4275bc486df51e2da569dd41))
|
||||
* **deps:** update module google.golang.org/grpc to v1.70.0 ([#1528](https://github.com/open-feature/flagd/issues/1528)) ([79b2b0a](https://github.com/open-feature/flagd/commit/79b2b0a6bbd48676dcbdd2393feb8247529bf29c))
|
||||
|
||||
|
||||
### ✨ New Features
|
||||
|
||||
* flagSetMetadata in OFREP/ResolveAll, core refactors ([#1540](https://github.com/open-feature/flagd/issues/1540)) ([b49abf9](https://github.com/open-feature/flagd/commit/b49abf95069da93bdf8369c8aa0ae40e698df760))
|
||||
* support yaml in blob, file, and http syncs ([#1522](https://github.com/open-feature/flagd/issues/1522)) ([76d673a](https://github.com/open-feature/flagd/commit/76d673ae8f765512270e6498569c0ce3d54a60bf))
|
||||
|
||||
## [0.10.8](https://github.com/open-feature/flagd/compare/core/v0.10.7...core/v0.10.8) (2025-01-19)
|
||||
|
||||
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
* **deps:** update module github.com/diegoholiveira/jsonlogic/v3 to v3.7.3 ([#1520](https://github.com/open-feature/flagd/issues/1520)) ([db2f990](https://github.com/open-feature/flagd/commit/db2f99021dfd676d2fd0c6af6af7e77783ee31ce))
|
||||
* **deps:** update opentelemetry-go monorepo ([#1524](https://github.com/open-feature/flagd/issues/1524)) ([eeae9a6](https://github.com/open-feature/flagd/commit/eeae9a64caf93356fd663cc735cc422edcf9e132))
|
||||
|
||||
## [0.10.7](https://github.com/open-feature/flagd/compare/core/v0.10.6...core/v0.10.7) (2025-01-16)
|
||||
|
||||
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
* **deps:** update module buf.build/gen/go/open-feature/flagd/protocolbuffers/go to v1.36.3-20241220192239-696330adaff0.1 ([#1513](https://github.com/open-feature/flagd/issues/1513)) ([64c5787](https://github.com/open-feature/flagd/commit/64c57875b032edcef2e2d230e7735990e01b72b8))
|
||||
|
||||
## [0.10.6](https://github.com/open-feature/flagd/compare/core/v0.10.5...core/v0.10.6) (2025-01-15)
|
||||
|
||||
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
* **deps:** update github.com/open-feature/flagd-schemas digest to 37baa2c ([#1499](https://github.com/open-feature/flagd/issues/1499)) ([1a853f7](https://github.com/open-feature/flagd/commit/1a853f79dc41523fd6dcb1ae6ca9745947955cbc))
|
||||
* **deps:** update github.com/open-feature/flagd-schemas digest to b81a56e ([#1391](https://github.com/open-feature/flagd/issues/1391)) ([6a3d8ac](https://github.com/open-feature/flagd/commit/6a3d8ac2511c32bd0dc77bba0169679aa9bf6ca6))
|
||||
* **deps:** update golang.org/x/exp digest to 7588d65 ([#1495](https://github.com/open-feature/flagd/issues/1495)) ([242e594](https://github.com/open-feature/flagd/commit/242e59450c71c682b56e554830ea3003bdbf9622))
|
||||
* **deps:** update golang.org/x/exp digest to b2144cd ([#1320](https://github.com/open-feature/flagd/issues/1320)) ([a692b00](https://github.com/open-feature/flagd/commit/a692b009ae8e7dc928d0fd65236b404192c99562))
|
||||
* **deps:** update module buf.build/gen/go/open-feature/flagd/grpc/go to v1.5.1-20241220192239-696330adaff0.1 ([#1489](https://github.com/open-feature/flagd/issues/1489)) ([53add83](https://github.com/open-feature/flagd/commit/53add83a491c6e00e0d9b1b64a9461e5973edca7))
|
||||
* **deps:** update module buf.build/gen/go/open-feature/flagd/grpc/go to v1.5.1-20241220192239-696330adaff0.2 ([#1492](https://github.com/open-feature/flagd/issues/1492)) ([9f1d94a](https://github.com/open-feature/flagd/commit/9f1d94a42ac00ecf5fc58c07a76c350e2e4ec2f6))
|
||||
* **deps:** update module buf.build/gen/go/open-feature/flagd/protocolbuffers/go to v1.36.0-20241220192239-696330adaff0.1 ([#1490](https://github.com/open-feature/flagd/issues/1490)) ([6edce72](https://github.com/open-feature/flagd/commit/6edce72e8cff01ea13cbd15d604b35ccc8337f50))
|
||||
* **deps:** update module buf.build/gen/go/open-feature/flagd/protocolbuffers/go to v1.36.2-20241220192239-696330adaff0.1 ([#1502](https://github.com/open-feature/flagd/issues/1502)) ([426c36e](https://github.com/open-feature/flagd/commit/426c36e838b9ded3a23f933e66e963c8110c0ddb))
|
||||
* **deps:** update module connectrpc.com/connect to v1.18.1 ([#1507](https://github.com/open-feature/flagd/issues/1507)) ([89d3259](https://github.com/open-feature/flagd/commit/89d32591db784458ce9b4cca36662ea502418bc5))
|
||||
* **deps:** update module github.com/diegoholiveira/jsonlogic/v3 to v3.7.0 ([#1496](https://github.com/open-feature/flagd/issues/1496)) ([e1fe149](https://github.com/open-feature/flagd/commit/e1fe1490fd1c26b9c566ff5ddef666c0fa74b2d5))
|
||||
* **deps:** update module github.com/diegoholiveira/jsonlogic/v3 to v3.7.1 ([#1509](https://github.com/open-feature/flagd/issues/1509)) ([9d06812](https://github.com/open-feature/flagd/commit/9d0681270f26bb91777fa2b8a792a4b0ccd07304))
|
||||
* **deps:** update module golang.org/x/crypto to v0.32.0 ([#1497](https://github.com/open-feature/flagd/issues/1497)) ([63a34d2](https://github.com/open-feature/flagd/commit/63a34d23aedcd798ff9f4cd47cdaddca35416423))
|
||||
* **deps:** update module google.golang.org/grpc to v1.69.2 ([#1484](https://github.com/open-feature/flagd/issues/1484)) ([6b40ad3](https://github.com/open-feature/flagd/commit/6b40ad34c83da4a3116e7cad4139a63a6c918097))
|
||||
* **deps:** update module google.golang.org/grpc to v1.69.4 ([#1510](https://github.com/open-feature/flagd/issues/1510)) ([76d6353](https://github.com/open-feature/flagd/commit/76d6353840ab8e7c93bdb0802eb1c49fc6fe1dc0))
|
||||
* **deps:** update opentelemetry-go monorepo ([#1470](https://github.com/open-feature/flagd/issues/1470)) ([26b0b1a](https://github.com/open-feature/flagd/commit/26b0b1af8bc4b3a393c3453784b50f167f13f743))
|
||||
|
||||
|
||||
### ✨ New Features
|
||||
|
||||
* add ssl support to sync service ([#1479](https://github.com/open-feature/flagd/issues/1479)) ([#1501](https://github.com/open-feature/flagd/issues/1501)) ([d50fcc8](https://github.com/open-feature/flagd/commit/d50fcc821c1ae043cb8cf77e464f7b738e2ff755))
|
||||
* support flag metadata ([#1476](https://github.com/open-feature/flagd/issues/1476)) ([13fbbad](https://github.com/open-feature/flagd/commit/13fbbad4d849b35884f429c0e74a71ece9cce2c9))
|
||||
|
||||
## [0.10.5](https://github.com/open-feature/flagd/compare/core/v0.10.4...core/v0.10.5) (2024-12-17)
|
||||
|
||||
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
* **deps:** update kubernetes packages to v0.31.2 ([#1430](https://github.com/open-feature/flagd/issues/1430)) ([0df8622](https://github.com/open-feature/flagd/commit/0df862215563545c33f518ab7a5ad42a19bf6adb))
|
||||
* **deps:** update kubernetes packages to v0.31.3 ([#1454](https://github.com/open-feature/flagd/issues/1454)) ([f56d7b0](https://github.com/open-feature/flagd/commit/f56d7b043c2d80ae4fe27e996c05a7cc1c2c1b28))
|
||||
* **deps:** update kubernetes packages to v0.31.4 ([#1461](https://github.com/open-feature/flagd/issues/1461)) ([431fbb4](https://github.com/open-feature/flagd/commit/431fbb49513bcdb21b09845f47c26e51e7e9f21b))
|
||||
* **deps:** update module buf.build/gen/go/open-feature/flagd/protocolbuffers/go to v1.35.2-20240906125204-0a6a901b42e8.1 ([#1451](https://github.com/open-feature/flagd/issues/1451)) ([8c6d91d](https://github.com/open-feature/flagd/commit/8c6d91d538d226b10cb954c23409902e9d245cda))
|
||||
* **deps:** update module buf.build/gen/go/open-feature/flagd/protocolbuffers/go to v1.36.0-20240906125204-0a6a901b42e8.1 ([#1475](https://github.com/open-feature/flagd/issues/1475)) ([0b11c6c](https://github.com/open-feature/flagd/commit/0b11c6cf612b244bda6bab119814647f3ce8de2e))
|
||||
* **deps:** update module github.com/diegoholiveira/jsonlogic/v3 to v3.6.0 ([#1460](https://github.com/open-feature/flagd/issues/1460)) ([dbc1da4](https://github.com/open-feature/flagd/commit/dbc1da4ba984c06972b57cf990d1d31c4b8323df))
|
||||
* **deps:** update module github.com/diegoholiveira/jsonlogic/v3 to v3.6.1 ([#1473](https://github.com/open-feature/flagd/issues/1473)) ([a3d899c](https://github.com/open-feature/flagd/commit/a3d899c5f8952181a6a987436e2255c2ab9176c5))
|
||||
* **deps:** update module github.com/fsnotify/fsnotify to v1.8.0 ([#1438](https://github.com/open-feature/flagd/issues/1438)) ([949c73b](https://github.com/open-feature/flagd/commit/949c73bd6ebadb30cfa3b7573b43d722f8d2a93d))
|
||||
* **deps:** update module github.com/stretchr/testify to v1.10.0 ([#1455](https://github.com/open-feature/flagd/issues/1455)) ([8c843df](https://github.com/open-feature/flagd/commit/8c843df7714b1f2d120c5cac8e40c7220cc0c05b))
|
||||
* **deps:** update module golang.org/x/crypto to v0.29.0 ([#1443](https://github.com/open-feature/flagd/issues/1443)) ([db96dd5](https://github.com/open-feature/flagd/commit/db96dd57b9de032fc4d15931bf907a7ed962f81b))
|
||||
* **deps:** update module golang.org/x/crypto to v0.30.0 ([#1457](https://github.com/open-feature/flagd/issues/1457)) ([dbdaa19](https://github.com/open-feature/flagd/commit/dbdaa199f0667f16d2a3b91867535ce93e63373c))
|
||||
* **deps:** update module golang.org/x/crypto to v0.31.0 ([#1463](https://github.com/open-feature/flagd/issues/1463)) ([b2245d7](https://github.com/open-feature/flagd/commit/b2245d7f73f1bde859b9627d337dd09ecd2f1a31))
|
||||
* **deps:** update module golang.org/x/mod to v0.22.0 ([#1444](https://github.com/open-feature/flagd/issues/1444)) ([ed064e1](https://github.com/open-feature/flagd/commit/ed064e134fb3a5edb0ec2d976f136af7e94d7f6d))
|
||||
* **deps:** update module google.golang.org/grpc to v1.68.0 ([#1442](https://github.com/open-feature/flagd/issues/1442)) ([cd27d09](https://github.com/open-feature/flagd/commit/cd27d098e6d8d8b0f681ef42d26dba1ebac67d12))
|
||||
* **deps:** update module google.golang.org/grpc to v1.68.1 ([#1456](https://github.com/open-feature/flagd/issues/1456)) ([0b6e2a1](https://github.com/open-feature/flagd/commit/0b6e2a1cd64910226d348c921b08a6de8013ac90))
|
||||
* **deps:** update module google.golang.org/grpc to v1.69.0 ([#1469](https://github.com/open-feature/flagd/issues/1469)) ([dd4869f](https://github.com/open-feature/flagd/commit/dd4869f5e095066f80c9d82d1be83155e7504d88))
|
||||
* **deps:** update opentelemetry-go monorepo ([#1447](https://github.com/open-feature/flagd/issues/1447)) ([68b5794](https://github.com/open-feature/flagd/commit/68b5794180da84af9adc1f2cd80f929489969c1c))
|
||||
|
||||
|
||||
### ✨ New Features
|
||||
|
||||
* add context-value flag ([#1448](https://github.com/open-feature/flagd/issues/1448)) ([7ca092e](https://github.com/open-feature/flagd/commit/7ca092e478c937eca0c91357394499763545dc1c))
|
||||
* s3 support for the blob sync ([#1449](https://github.com/open-feature/flagd/issues/1449)) ([a9f7261](https://github.com/open-feature/flagd/commit/a9f7261e75bc064947ae14900e5c4edc4b49bec4))
|
||||
|
||||
## [0.10.4](https://github.com/open-feature/flagd/compare/core/v0.10.3...core/v0.10.4) (2024-10-28)
|
||||
|
||||
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
* **deps:** update module buf.build/gen/go/open-feature/flagd/protocolbuffers/go to v1.35.1-20240906125204-0a6a901b42e8.1 ([#1420](https://github.com/open-feature/flagd/issues/1420)) ([1f06d5a](https://github.com/open-feature/flagd/commit/1f06d5a1837ea2b753974e96c2a1154d6cb3e582))
|
||||
* **deps:** update module github.com/prometheus/client_golang to v1.20.5 ([#1425](https://github.com/open-feature/flagd/issues/1425)) ([583ba89](https://github.com/open-feature/flagd/commit/583ba894f2de794b36b6a1cc3bfceb9c46dc9d96))
|
||||
* **deps:** update module go.uber.org/mock to v0.5.0 ([#1427](https://github.com/open-feature/flagd/issues/1427)) ([0c6fd7f](https://github.com/open-feature/flagd/commit/0c6fd7fa688db992d4e58a202889cbfea07eebf6))
|
||||
* **deps:** update module gocloud.dev to v0.40.0 ([#1422](https://github.com/open-feature/flagd/issues/1422)) ([e0e4709](https://github.com/open-feature/flagd/commit/e0e4709243d8301bcbb0aaaa309be66944c1d9ed))
|
||||
* **deps:** update module golang.org/x/crypto to v0.28.0 ([#1416](https://github.com/open-feature/flagd/issues/1416)) ([fb272da](https://github.com/open-feature/flagd/commit/fb272da56e0eba12245309899888c18920b9a200))
|
||||
* **deps:** update module google.golang.org/grpc to v1.67.1 ([#1415](https://github.com/open-feature/flagd/issues/1415)) ([85a3a6b](https://github.com/open-feature/flagd/commit/85a3a6b46233fcc7cf71a0292b46c82ac8e66d7b))
|
||||
|
||||
|
||||
### ✨ New Features
|
||||
|
||||
* added custom grpc resolver ([#1424](https://github.com/open-feature/flagd/issues/1424)) ([e5007e2](https://github.com/open-feature/flagd/commit/e5007e2bcb6f049a3c54e09331065bb9abe215be))
|
||||
* support azure blob sync ([#1428](https://github.com/open-feature/flagd/issues/1428)) ([5c39cfe](https://github.com/open-feature/flagd/commit/5c39cfe30a3dead4f6db2c6f9ee4c12193cd479b))
|
||||
|
||||
## [0.10.3](https://github.com/open-feature/flagd/compare/core/v0.10.2...core/v0.10.3) (2024-09-23)
|
||||
|
||||
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
* **deps:** update kubernetes package and controller runtime, fix proto lint ([#1290](https://github.com/open-feature/flagd/issues/1290)) ([94860d6](https://github.com/open-feature/flagd/commit/94860d6ceabe9eb7c1e5dd8ea139a796710d6d8b))
|
||||
* **deps:** update module buf.build/gen/go/open-feature/flagd/grpc/go to v1.5.1-20240906125204-0a6a901b42e8.1 ([#1400](https://github.com/open-feature/flagd/issues/1400)) ([954d972](https://github.com/open-feature/flagd/commit/954d97238210f90b650493ae76277d4a8d80788a))
|
||||
* **deps:** update module connectrpc.com/connect to v1.17.0 ([#1408](https://github.com/open-feature/flagd/issues/1408)) ([e7eb691](https://github.com/open-feature/flagd/commit/e7eb691094dfbf02e37d79c41f60f556415e7640))
|
||||
* **deps:** update module github.com/prometheus/client_golang to v1.20.3 ([#1384](https://github.com/open-feature/flagd/issues/1384)) ([8fd16b2](https://github.com/open-feature/flagd/commit/8fd16b23b1fa8517128af36b3068ca18ebbad6c3))
|
||||
* **deps:** update module github.com/prometheus/client_golang to v1.20.4 ([#1406](https://github.com/open-feature/flagd/issues/1406)) ([a0a6426](https://github.com/open-feature/flagd/commit/a0a64269b08251317676075fdea7bc65bea8a8dc))
|
||||
* **deps:** update module gocloud.dev to v0.39.0 ([#1404](https://github.com/open-feature/flagd/issues/1404)) ([a3184d6](https://github.com/open-feature/flagd/commit/a3184d68413749808709baac47df3bf7400f9cdc))
|
||||
* **deps:** update module golang.org/x/crypto to v0.27.0 ([#1396](https://github.com/open-feature/flagd/issues/1396)) ([f9a7d10](https://github.com/open-feature/flagd/commit/f9a7d10590d3191ea8eba0dbb340fa94d07026a4))
|
||||
* **deps:** update module golang.org/x/mod to v0.21.0 ([#1397](https://github.com/open-feature/flagd/issues/1397)) ([1507e19](https://github.com/open-feature/flagd/commit/1507e19e9304bcebfbbe4376f45e9f2e82135fd2))
|
||||
* **deps:** update module google.golang.org/grpc to v1.66.0 ([#1393](https://github.com/open-feature/flagd/issues/1393)) ([c96e9d7](https://github.com/open-feature/flagd/commit/c96e9d764aa51caf00fbde07cdc7d2de55b98b9e))
|
||||
* **deps:** update module google.golang.org/grpc to v1.66.1 ([#1402](https://github.com/open-feature/flagd/issues/1402)) ([50c9cd3](https://github.com/open-feature/flagd/commit/50c9cd3ada2f470a22374392a5a152a487636645))
|
||||
* **deps:** update module google.golang.org/grpc to v1.66.2 ([#1405](https://github.com/open-feature/flagd/issues/1405)) ([69ec28f](https://github.com/open-feature/flagd/commit/69ec28fceb597bdaad63b184943b66ccdb4af0b7))
|
||||
* **deps:** update module google.golang.org/grpc to v1.67.0 ([#1407](https://github.com/open-feature/flagd/issues/1407)) ([1ad6480](https://github.com/open-feature/flagd/commit/1ad6480a0f37c4677e53065ef455f615b26b1f17))
|
||||
* **deps:** update opentelemetry-go monorepo ([#1387](https://github.com/open-feature/flagd/issues/1387)) ([22aef5b](https://github.com/open-feature/flagd/commit/22aef5bbf030c619e48fbe22a16d83e071b11902))
|
||||
* **deps:** update opentelemetry-go monorepo ([#1403](https://github.com/open-feature/flagd/issues/1403)) ([fc4cd3e](https://github.com/open-feature/flagd/commit/fc4cd3e547f4826ea0bb8cc1bb2304807932b4e6))
|
||||
* remove dep cycle with certreloader ([#1410](https://github.com/open-feature/flagd/issues/1410)) ([5244f6f](https://github.com/open-feature/flagd/commit/5244f6f6c94f310fd80c7ab84942103cc8c18a39))
|
||||
|
||||
|
||||
### ✨ New Features
|
||||
|
||||
* add mTLS support to otel exporter ([#1389](https://github.com/open-feature/flagd/issues/1389)) ([8737f53](https://github.com/open-feature/flagd/commit/8737f53444016b114ee4ae52eead0b835af0e200))
|
||||
|
||||
## [0.10.2](https://github.com/open-feature/flagd/compare/core/v0.10.1...core/v0.10.2) (2024-08-22)
|
||||
|
||||
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
* **deps:** update module buf.build/gen/go/open-feature/flagd/grpc/go to v1.5.1-20240215170432-1e611e2999cc.1 ([#1372](https://github.com/open-feature/flagd/issues/1372)) ([ae24595](https://github.com/open-feature/flagd/commit/ae2459504f7eccafebccec83fa1f72b08f41a978))
|
||||
* **deps:** update module connectrpc.com/otelconnect to v0.7.1 ([#1367](https://github.com/open-feature/flagd/issues/1367)) ([184915b](https://github.com/open-feature/flagd/commit/184915b31726729e8ed2f7999f338bf4ed684809))
|
||||
* **deps:** update module github.com/open-feature/open-feature-operator/apis to v0.2.44 ([#1368](https://github.com/open-feature/flagd/issues/1368)) ([0c68726](https://github.com/open-feature/flagd/commit/0c68726bed1cdae07f1b90447818ebbc9dc45caf))
|
||||
* **deps:** update module golang.org/x/crypto to v0.26.0 ([#1379](https://github.com/open-feature/flagd/issues/1379)) ([05f6658](https://github.com/open-feature/flagd/commit/05f6658e3dc72182adbff9197c8980641af8c53f))
|
||||
* **deps:** update module golang.org/x/mod to v0.20.0 ([#1377](https://github.com/open-feature/flagd/issues/1377)) ([797d7a4](https://github.com/open-feature/flagd/commit/797d7a4bbafc73e6882e5998df500ae4fe98fbbc))
|
||||
* **deps:** update module golang.org/x/sync to v0.8.0 ([#1378](https://github.com/open-feature/flagd/issues/1378)) ([4804c17](https://github.com/open-feature/flagd/commit/4804c17a67ea9761079ecade34ccb3446643050b))
|
||||
|
||||
|
||||
### ✨ New Features
|
||||
|
||||
* add 'watcher' interface to file sync ([#1365](https://github.com/open-feature/flagd/issues/1365)) ([61fff43](https://github.com/open-feature/flagd/commit/61fff43e288daac88efb127ada20276c01ed5928))
|
||||
* added new grpc sync config option to allow setting max receive message size. ([#1358](https://github.com/open-feature/flagd/issues/1358)) ([bed077b](https://github.com/open-feature/flagd/commit/bed077bac9da3b6e3bd45ca54046e40a595fcba6))
|
||||
* Support blob type sources and GCS as an example of such source. ([#1366](https://github.com/open-feature/flagd/issues/1366)) ([21f2c9a](https://github.com/open-feature/flagd/commit/21f2c9a5d64cbfe2fc841080850a2c582e8f4ba6))
|
||||
|
||||
|
||||
### 🧹 Chore
|
||||
|
||||
* **deps:** update dependency go to v1.22.6 ([#1297](https://github.com/open-feature/flagd/issues/1297)) ([50b92c1](https://github.com/open-feature/flagd/commit/50b92c17cfd872d3e6b95fef3b3d96444e563715))
|
||||
|
||||
## [0.10.1](https://github.com/open-feature/flagd/compare/core/v0.10.0...core/v0.10.1) (2024-07-08)
|
||||
|
||||
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
* **deps:** update module buf.build/gen/go/open-feature/flagd/grpc/go to v1.4.0-20240215170432-1e611e2999cc.2 ([#1342](https://github.com/open-feature/flagd/issues/1342)) ([efdd921](https://github.com/open-feature/flagd/commit/efdd92139903b89ac986a62ff2cf4f5cfef91cde))
|
||||
* **deps:** update module golang.org/x/crypto to v0.25.0 ([#1351](https://github.com/open-feature/flagd/issues/1351)) ([450cbc8](https://github.com/open-feature/flagd/commit/450cbc84ca55eef3fccc768003e358a8e589668e))
|
||||
* **deps:** update module golang.org/x/mod to v0.19.0 ([#1349](https://github.com/open-feature/flagd/issues/1349)) ([6ee89b4](https://github.com/open-feature/flagd/commit/6ee89b44ca4aca8f6236603fc3f969e814907bd6))
|
||||
* **deps:** update module google.golang.org/grpc to v1.65.0 ([#1346](https://github.com/open-feature/flagd/issues/1346)) ([72a6b87](https://github.com/open-feature/flagd/commit/72a6b876e880ff0b43440d9b63710c7a87536988))
|
||||
* **deps:** update opentelemetry-go monorepo ([#1347](https://github.com/open-feature/flagd/issues/1347)) ([37fb3cd](https://github.com/open-feature/flagd/commit/37fb3cd81d5436e9d8cd3ea490a3951ae5794130))
|
||||
|
||||
## [0.10.0](https://github.com/open-feature/flagd/compare/core/v0.9.3...core/v0.10.0) (2024-06-27)
|
||||
|
||||
|
||||
|
|
193
core/go.mod
193
core/go.mod
|
@ -1,167 +1,108 @@
|
|||
module github.com/open-feature/flagd/core
|
||||
|
||||
go 1.24.0
|
||||
go 1.21
|
||||
|
||||
toolchain go1.24.4
|
||||
toolchain go1.21.4
|
||||
|
||||
require (
|
||||
buf.build/gen/go/open-feature/flagd/grpc/go v1.5.1-20250529171031-ebdc14163473.2
|
||||
buf.build/gen/go/open-feature/flagd/protocolbuffers/go v1.36.6-20250529171031-ebdc14163473.1
|
||||
connectrpc.com/connect v1.18.1
|
||||
connectrpc.com/otelconnect v0.7.2
|
||||
github.com/diegoholiveira/jsonlogic/v3 v3.8.4
|
||||
github.com/fsnotify/fsnotify v1.9.0
|
||||
github.com/google/go-cmp v0.7.0
|
||||
github.com/open-feature/flagd-schemas v0.2.9-0.20250707123415-08b4c52d3b86
|
||||
github.com/open-feature/open-feature-operator/apis v0.2.45
|
||||
github.com/prometheus/client_golang v1.22.0
|
||||
buf.build/gen/go/open-feature/flagd/grpc/go v1.4.0-20240215170432-1e611e2999cc.1
|
||||
buf.build/gen/go/open-feature/flagd/protocolbuffers/go v1.34.2-20240215170432-1e611e2999cc.2
|
||||
connectrpc.com/connect v1.16.2
|
||||
connectrpc.com/otelconnect v0.7.0
|
||||
github.com/diegoholiveira/jsonlogic/v3 v3.5.3
|
||||
github.com/fsnotify/fsnotify v1.7.0
|
||||
github.com/open-feature/flagd-schemas v0.2.9-0.20240527214546-61523e5efe3e
|
||||
github.com/open-feature/open-feature-operator/apis v0.2.43
|
||||
github.com/prometheus/client_golang v1.19.1
|
||||
github.com/robfig/cron v1.2.0
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/stretchr/testify v1.9.0
|
||||
github.com/twmb/murmur3 v1.1.8
|
||||
github.com/xeipuuv/gojsonschema v1.2.0
|
||||
github.com/zeebo/xxh3 v1.0.2
|
||||
go.opentelemetry.io/otel v1.37.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.37.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.37.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.37.0
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.59.0
|
||||
go.opentelemetry.io/otel/metric v1.37.0
|
||||
go.opentelemetry.io/otel/sdk v1.37.0
|
||||
go.opentelemetry.io/otel/sdk/metric v1.37.0
|
||||
go.opentelemetry.io/otel/trace v1.37.0
|
||||
go.uber.org/mock v0.5.2
|
||||
go.opentelemetry.io/otel v1.27.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.27.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.49.0
|
||||
go.opentelemetry.io/otel/metric v1.27.0
|
||||
go.opentelemetry.io/otel/sdk v1.27.0
|
||||
go.opentelemetry.io/otel/sdk/metric v1.27.0
|
||||
go.opentelemetry.io/otel/trace v1.27.0
|
||||
go.uber.org/mock v0.4.0
|
||||
go.uber.org/zap v1.27.0
|
||||
gocloud.dev v0.42.0
|
||||
golang.org/x/crypto v0.39.0
|
||||
golang.org/x/exp v0.0.0-20250210185358-939b2ce775ac
|
||||
golang.org/x/mod v0.25.0
|
||||
golang.org/x/sync v0.15.0
|
||||
google.golang.org/grpc v1.73.0
|
||||
google.golang.org/protobuf v1.36.6
|
||||
golang.org/x/crypto v0.24.0
|
||||
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842
|
||||
golang.org/x/mod v0.18.0
|
||||
golang.org/x/sync v0.7.0
|
||||
google.golang.org/grpc v1.64.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
k8s.io/apimachinery v0.33.2
|
||||
k8s.io/client-go v0.33.2
|
||||
k8s.io/apimachinery v0.29.3
|
||||
k8s.io/client-go v0.29.3
|
||||
)
|
||||
|
||||
require (
|
||||
cel.dev/expr v0.23.0 // indirect
|
||||
cloud.google.com/go v0.121.1 // indirect
|
||||
cloud.google.com/go/auth v0.16.1 // indirect
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.7.0 // indirect
|
||||
cloud.google.com/go/iam v1.5.2 // indirect
|
||||
cloud.google.com/go/monitoring v1.24.2 // indirect
|
||||
cloud.google.com/go/storage v1.55.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.1 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.2 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.0 // indirect
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
|
||||
github.com/Azure/go-autorest/autorest/to v0.4.1 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 // indirect
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0 // indirect
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.51.0 // indirect
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.51.0 // indirect
|
||||
github.com/aws/aws-sdk-go v1.55.6 // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.36.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.29.12 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.65 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.69 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.78.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.25.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.33.17 // indirect
|
||||
github.com/aws/smithy-go v1.22.3 // indirect
|
||||
github.com/Masterminds/goutils v1.1.1 // indirect
|
||||
github.com/Masterminds/semver v1.5.0 // indirect
|
||||
github.com/barkimedes/go-deepcopy v0.0.0-20220514131651-17c30cfc62df // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cenkalti/backoff/v5 v5.0.2 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/cncf/xds/go v0.0.0-20250326154945-ae57f3c0d45f // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.12.0 // indirect
|
||||
github.com/envoyproxy/go-control-plane/envoy v1.32.4 // indirect
|
||||
github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect
|
||||
github.com/evanphx/json-patch/v5 v5.9.0 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
|
||||
github.com/go-jose/go-jose/v4 v4.0.5 // indirect
|
||||
github.com/go-logr/logr v1.4.3 // indirect
|
||||
github.com/evanphx/json-patch v5.6.0+incompatible // indirect
|
||||
github.com/evanphx/json-patch/v5 v5.8.0 // indirect
|
||||
github.com/go-logr/logr v1.4.1 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.21.0 // indirect
|
||||
github.com/go-openapi/jsonreference v0.21.0 // indirect
|
||||
github.com/go-openapi/swag v0.23.0 // indirect
|
||||
github.com/go-task/slim-sprig v2.20.0+incompatible // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect
|
||||
github.com/google/gnostic-models v0.6.9 // indirect
|
||||
github.com/google/s2a-go v0.1.9 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/google/gnostic-models v0.6.8 // indirect
|
||||
github.com/google/go-cmp v0.6.0 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/google/wire v0.6.0 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.14.2 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect
|
||||
github.com/huandu/xstrings v1.4.0 // indirect
|
||||
github.com/imdario/mergo v0.3.16 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.7 // indirect
|
||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/prometheus/client_model v0.6.2 // indirect
|
||||
github.com/prometheus/common v0.65.0 // indirect
|
||||
github.com/prometheus/procfs v0.16.1 // indirect
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
github.com/prometheus/common v0.53.0 // indirect
|
||||
github.com/prometheus/procfs v0.15.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/spiffe/go-spiffe/v2 v2.5.0 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
github.com/zeebo/errs v1.4.0 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/contrib/detectors/gcp v1.36.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.7.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.2.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
golang.org/x/net v0.41.0 // indirect
|
||||
golang.org/x/oauth2 v0.30.0 // indirect
|
||||
golang.org/x/sys v0.33.0 // indirect
|
||||
golang.org/x/term v0.32.0 // indirect
|
||||
golang.org/x/text v0.26.0 // indirect
|
||||
golang.org/x/time v0.11.0 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect
|
||||
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
|
||||
google.golang.org/api v0.235.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20250603155806-513f23925822 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||
golang.org/x/net v0.25.0 // indirect
|
||||
golang.org/x/oauth2 v0.20.0 // indirect
|
||||
golang.org/x/sys v0.21.0 // indirect
|
||||
golang.org/x/term v0.21.0 // indirect
|
||||
golang.org/x/text v0.16.0 // indirect
|
||||
golang.org/x/time v0.5.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240520151616-dc85e6b867a5 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240515191416-fc5f0ca64291 // indirect
|
||||
google.golang.org/protobuf v1.34.2 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
k8s.io/api v0.33.2 // indirect
|
||||
k8s.io/apiextensions-apiserver v0.31.1 // indirect
|
||||
k8s.io/klog/v2 v2.130.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect
|
||||
sigs.k8s.io/controller-runtime v0.19.3 // indirect
|
||||
sigs.k8s.io/gateway-api v1.2.1 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
|
||||
sigs.k8s.io/randfill v1.0.0 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
k8s.io/api v0.29.3 // indirect
|
||||
k8s.io/apiextensions-apiserver v0.29.2 // indirect
|
||||
k8s.io/component-base v0.29.2 // indirect
|
||||
k8s.io/klog/v2 v2.120.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20240403164606-bc84c2ddaf99 // indirect
|
||||
k8s.io/utils v0.0.0-20240310230437-4693a0247e57 // indirect
|
||||
sigs.k8s.io/controller-runtime v0.17.3 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
|
||||
sigs.k8s.io/yaml v1.4.0 // indirect
|
||||
)
|
||||
|
|
2590
core/go.sum
2590
core/go.sum
File diff suppressed because it is too large
Load Diff
|
@ -1,69 +0,0 @@
|
|||
package certreloader
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
KeyPath string
|
||||
CertPath string
|
||||
ReloadInterval time.Duration
|
||||
}
|
||||
|
||||
type CertReloader struct {
|
||||
cert *tls.Certificate
|
||||
mu sync.RWMutex
|
||||
nextReload time.Time
|
||||
Config
|
||||
}
|
||||
|
||||
func NewCertReloader(config Config) (*CertReloader, error) {
|
||||
reloader := CertReloader{
|
||||
Config: config,
|
||||
}
|
||||
|
||||
reloader.mu.Lock()
|
||||
defer reloader.mu.Unlock()
|
||||
cert, err := reloader.loadCertificate()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to load initial certificate: %w", err)
|
||||
}
|
||||
reloader.cert = &cert
|
||||
|
||||
return &reloader, nil
|
||||
}
|
||||
|
||||
func (r *CertReloader) GetCertificate() (*tls.Certificate, error) {
|
||||
now := time.Now()
|
||||
// Read locking here before we do the time comparison
|
||||
// If a reload is in progress this will block and we will skip reloading in the current
|
||||
// call once we can continue
|
||||
r.mu.RLock()
|
||||
shouldReload := r.ReloadInterval != 0 && r.nextReload.Before(now)
|
||||
r.mu.RUnlock()
|
||||
if shouldReload {
|
||||
// Need to release the read lock, otherwise we deadlock
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
cert, err := r.loadCertificate()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to load TLS cert and key: %w", err)
|
||||
}
|
||||
r.cert = &cert
|
||||
r.nextReload = now.Add(r.ReloadInterval)
|
||||
return r.cert, nil
|
||||
}
|
||||
return r.cert, nil
|
||||
}
|
||||
|
||||
func (r *CertReloader) loadCertificate() (tls.Certificate, error) {
|
||||
newCert, err := tls.LoadX509KeyPair(r.CertPath, r.KeyPath)
|
||||
if err != nil {
|
||||
return tls.Certificate{}, fmt.Errorf("failed to load key pair: %w", err)
|
||||
}
|
||||
|
||||
return newCert, nil
|
||||
}
|
|
@ -1,306 +0,0 @@
|
|||
package certreloader
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/big"
|
||||
"net"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestNewCertReloader(t *testing.T) {
|
||||
cert1, key1, cleanup := generateValidCertificateFiles(t)
|
||||
defer cleanup()
|
||||
_, key2, cleanup := generateValidCertificateFiles(t)
|
||||
defer cleanup()
|
||||
|
||||
tcs := []struct {
|
||||
name string
|
||||
config Config
|
||||
err error
|
||||
}{
|
||||
{
|
||||
name: "no config set",
|
||||
config: Config{},
|
||||
err: fmt.Errorf("failed to load initial certificate: failed to load key pair: open : no such file or directory"),
|
||||
},
|
||||
{
|
||||
name: "invalid certs",
|
||||
config: Config{CertPath: cert1, KeyPath: key2},
|
||||
err: fmt.Errorf("failed to load initial certificate: failed to load key pair: tls: private key does not match public key"),
|
||||
},
|
||||
|
||||
{
|
||||
name: "valid certs",
|
||||
config: Config{CertPath: cert1, KeyPath: key1},
|
||||
err: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tcs {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
reloader, err := NewCertReloader(tc.config)
|
||||
if err != nil {
|
||||
if tc.err == nil {
|
||||
t.Fatalf("NewCertReloader returned error when no error was expected: %s", err)
|
||||
} else if tc.err.Error() != err.Error() {
|
||||
t.Fatalf("expected error did not matched received error. expected: %v, received: %v", tc.err, err)
|
||||
}
|
||||
} else {
|
||||
if reloader == nil {
|
||||
t.Fatal("expected reloader to not be nil")
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCertificateReload(t *testing.T) {
|
||||
newCert, newKey, cleanup := generateValidCertificateFiles(t)
|
||||
defer cleanup()
|
||||
|
||||
tcs := []struct {
|
||||
name string
|
||||
waitInterval time.Duration
|
||||
reloadInterval time.Duration
|
||||
newCert string
|
||||
newKey string
|
||||
shouldRotate bool
|
||||
err error
|
||||
}{
|
||||
{
|
||||
name: "reloads after interval",
|
||||
waitInterval: time.Microsecond * 200,
|
||||
reloadInterval: time.Microsecond * 100,
|
||||
newCert: newCert,
|
||||
newKey: newKey,
|
||||
shouldRotate: true,
|
||||
err: nil,
|
||||
},
|
||||
{
|
||||
name: "doesnt reload before interval",
|
||||
waitInterval: time.Microsecond * 50,
|
||||
reloadInterval: time.Microsecond * 100,
|
||||
newCert: newCert,
|
||||
newKey: newKey,
|
||||
shouldRotate: false,
|
||||
err: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tcs {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
cert, key, cleanup := generateValidCertificateFiles(t)
|
||||
defer cleanup()
|
||||
reloader, err := NewCertReloader(Config{
|
||||
CertPath: cert,
|
||||
KeyPath: key,
|
||||
ReloadInterval: tc.reloadInterval,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := copyFile(tc.newCert, cert); err != nil {
|
||||
t.Fatalf("failed to move %s -> %s: %s", newCert, cert, err)
|
||||
}
|
||||
if err := copyFile(tc.newKey, key); err != nil {
|
||||
t.Fatalf("failed to move %s -> %s: %s", newKey, key, err)
|
||||
}
|
||||
time.Sleep(tc.waitInterval)
|
||||
|
||||
actualCert, err := reloader.GetCertificate()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
actualCertParsed, err := x509.ParseCertificate(actualCert.Certificate[0])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var expectedCert tls.Certificate
|
||||
if tc.shouldRotate {
|
||||
expectedCert, err = tls.LoadX509KeyPair(tc.newCert, tc.newKey)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
} else {
|
||||
expectedCert, err = tls.LoadX509KeyPair(cert, key)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
expectedCertParsed, err := x509.ParseCertificate(expectedCert.Certificate[0])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if expectedCertParsed.DNSNames[0] != actualCertParsed.DNSNames[0] {
|
||||
t.Fatalf("expected certificate was not returned by GetCertificate. expectedCert: %v, actualCert: %v", expectedCertParsed.DNSNames[0], actualCertParsed.DNSNames[0])
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func generateValidCertificate(t *testing.T) (*bytes.Buffer, *bytes.Buffer) {
|
||||
t.Helper()
|
||||
|
||||
// set up our CA certificate
|
||||
ca := &x509.Certificate{
|
||||
SerialNumber: big.NewInt(2019),
|
||||
Subject: pkix.Name{
|
||||
Organization: []string{"Company, INC."},
|
||||
Country: []string{"US"},
|
||||
Province: []string{""},
|
||||
Locality: []string{"San Francisco"},
|
||||
StreetAddress: []string{"Golden Gate Bridge"},
|
||||
PostalCode: []string{"94016"},
|
||||
},
|
||||
NotBefore: time.Now(),
|
||||
NotAfter: time.Now().AddDate(10, 0, 0),
|
||||
IsCA: true,
|
||||
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth},
|
||||
KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
|
||||
BasicConstraintsValid: true,
|
||||
}
|
||||
|
||||
// create our private and public key
|
||||
caPrivKey, err := rsa.GenerateKey(rand.Reader, 4096)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// create the CA
|
||||
caBytes, err := x509.CreateCertificate(rand.Reader, ca, ca, &caPrivKey.PublicKey, caPrivKey)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// pem encode
|
||||
caPEM := new(bytes.Buffer)
|
||||
err = pem.Encode(caPEM, &pem.Block{
|
||||
Type: "CERTIFICATE",
|
||||
Bytes: caBytes,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
caPrivKeyPEM := new(bytes.Buffer)
|
||||
err = pem.Encode(caPrivKeyPEM, &pem.Block{
|
||||
Type: "RSA PRIVATE KEY",
|
||||
Bytes: x509.MarshalPKCS1PrivateKey(caPrivKey),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// set up our server certificate
|
||||
cert := &x509.Certificate{
|
||||
SerialNumber: big.NewInt(2019),
|
||||
Subject: pkix.Name{
|
||||
Organization: []string{"Company, INC."},
|
||||
Country: []string{"US"},
|
||||
Province: []string{""},
|
||||
Locality: []string{"San Francisco"},
|
||||
StreetAddress: []string{"Golden Gate Bridge"},
|
||||
PostalCode: []string{"94016"},
|
||||
},
|
||||
DNSNames: []string{randString(8)},
|
||||
IPAddresses: []net.IP{net.IPv4(127, 0, 0, 1), net.IPv6loopback},
|
||||
NotBefore: time.Now(),
|
||||
NotAfter: time.Now().AddDate(10, 0, 0),
|
||||
SubjectKeyId: []byte{1, 2, 3, 4, 6},
|
||||
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth},
|
||||
KeyUsage: x509.KeyUsageDigitalSignature,
|
||||
}
|
||||
|
||||
certPrivKey, err := rsa.GenerateKey(rand.Reader, 4096)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create private key: %s", err)
|
||||
}
|
||||
|
||||
certBytes, err := x509.CreateCertificate(rand.Reader, cert, ca, &certPrivKey.PublicKey, caPrivKey)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create certificate: %s", err)
|
||||
}
|
||||
|
||||
certPEM := new(bytes.Buffer)
|
||||
err = pem.Encode(certPEM, &pem.Block{
|
||||
Type: "CERTIFICATE",
|
||||
Bytes: certBytes,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
certPrivKeyPEM := new(bytes.Buffer)
|
||||
err = pem.Encode(certPrivKeyPEM, &pem.Block{
|
||||
Type: "RSA PRIVATE KEY",
|
||||
Bytes: x509.MarshalPKCS1PrivateKey(certPrivKey),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
return certPEM, certPrivKeyPEM
|
||||
}
|
||||
|
||||
func generateValidCertificateFiles(t *testing.T) (string, string, func()) {
|
||||
t.Helper()
|
||||
certFile, err := os.CreateTemp("", "certreloader_cert")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create certFile: %s", err)
|
||||
}
|
||||
defer certFile.Close()
|
||||
keyFile, err := os.CreateTemp("", "certreloader_key")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create keyFile: %s", err)
|
||||
}
|
||||
defer keyFile.Close()
|
||||
|
||||
certBytes, keyBytes := generateValidCertificate(t)
|
||||
if _, err := io.Copy(certFile, certBytes); err != nil {
|
||||
t.Fatalf("failed to copy certBytes into %s: %s", certFile.Name(), err)
|
||||
}
|
||||
if _, err := io.Copy(keyFile, keyBytes); err != nil {
|
||||
t.Fatalf("failed to copy keyBytes into %s: %s", keyFile.Name(), err)
|
||||
}
|
||||
|
||||
return certFile.Name(), keyFile.Name(), func() {
|
||||
os.Remove(certFile.Name())
|
||||
os.Remove(keyFile.Name())
|
||||
}
|
||||
}
|
||||
|
||||
func copyFile(src, dst string) error {
|
||||
data, err := os.ReadFile(src)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load key pair: %w", err)
|
||||
}
|
||||
|
||||
err = os.WriteFile(dst, data, 0o0600)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load key pair: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func randString(n int) string {
|
||||
const alphanum = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
|
||||
bytes := make([]byte, n)
|
||||
//nolint:errcheck
|
||||
rand.Read(bytes)
|
||||
for i, b := range bytes {
|
||||
bytes[i] = alphanum[b%byte(len(alphanum))]
|
||||
}
|
||||
return string(bytes)
|
||||
}
|
|
@ -11,8 +11,6 @@ import (
|
|||
)
|
||||
|
||||
func TestFractionalEvaluation(t *testing.T) {
|
||||
const source = "testSource"
|
||||
var sources = []string{source}
|
||||
ctx := context.Background()
|
||||
|
||||
commonFlags := Flags{
|
||||
|
@ -460,13 +458,8 @@ func TestFractionalEvaluation(t *testing.T) {
|
|||
for name, tt := range tests {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
log := logger.NewLogger(nil, false)
|
||||
s, err := store.NewStore(log, sources)
|
||||
if err != nil {
|
||||
t.Fatalf("NewStore failed: %v", err)
|
||||
}
|
||||
|
||||
je := NewJSON(log, s)
|
||||
je.store.Update(source, tt.flags.Flags, model.Metadata{})
|
||||
je := NewJSON(log, store.NewFlags())
|
||||
je.store.Flags = tt.flags.Flags
|
||||
|
||||
value, variant, reason, _, err := resolve[string](ctx, reqID, tt.flagKey, tt.context, je.evaluateVariant)
|
||||
|
||||
|
@ -493,8 +486,6 @@ func TestFractionalEvaluation(t *testing.T) {
|
|||
}
|
||||
|
||||
func BenchmarkFractionalEvaluation(b *testing.B) {
|
||||
const source = "testSource"
|
||||
var sources = []string{source}
|
||||
ctx := context.Background()
|
||||
|
||||
flags := Flags{
|
||||
|
@ -517,7 +508,7 @@ func BenchmarkFractionalEvaluation(b *testing.B) {
|
|||
},
|
||||
{
|
||||
"fractional": [
|
||||
{"var": "email"},
|
||||
"email",
|
||||
[
|
||||
"red",
|
||||
25
|
||||
|
@ -551,41 +542,41 @@ func BenchmarkFractionalEvaluation(b *testing.B) {
|
|||
expectedReason string
|
||||
expectedErrorCode string
|
||||
}{
|
||||
"test_a@faas.com": {
|
||||
"test@faas.com": {
|
||||
flags: flags,
|
||||
flagKey: "headerColor",
|
||||
context: map[string]any{
|
||||
"email": "test_a@faas.com",
|
||||
},
|
||||
expectedVariant: "blue",
|
||||
expectedValue: "#0000FF",
|
||||
expectedReason: model.TargetingMatchReason,
|
||||
},
|
||||
"test_b@faas.com": {
|
||||
flags: flags,
|
||||
flagKey: "headerColor",
|
||||
context: map[string]any{
|
||||
"email": "test_b@faas.com",
|
||||
"email": "test@faas.com",
|
||||
},
|
||||
expectedVariant: "red",
|
||||
expectedValue: "#FF0000",
|
||||
expectedReason: model.TargetingMatchReason,
|
||||
},
|
||||
"test_c@faas.com": {
|
||||
"test2@faas.com": {
|
||||
flags: flags,
|
||||
flagKey: "headerColor",
|
||||
context: map[string]any{
|
||||
"email": "test_c@faas.com",
|
||||
"email": "test2@faas.com",
|
||||
},
|
||||
expectedVariant: "green",
|
||||
expectedValue: "#00FF00",
|
||||
expectedVariant: "yellow",
|
||||
expectedValue: "#FFFF00",
|
||||
expectedReason: model.TargetingMatchReason,
|
||||
},
|
||||
"test_d@faas.com": {
|
||||
"test3@faas.com": {
|
||||
flags: flags,
|
||||
flagKey: "headerColor",
|
||||
context: map[string]any{
|
||||
"email": "test_d@faas.com",
|
||||
"email": "test3@faas.com",
|
||||
},
|
||||
expectedVariant: "red",
|
||||
expectedValue: "#FF0000",
|
||||
expectedReason: model.TargetingMatchReason,
|
||||
},
|
||||
"test4@faas.com": {
|
||||
flags: flags,
|
||||
flagKey: "headerColor",
|
||||
context: map[string]any{
|
||||
"email": "test4@faas.com",
|
||||
},
|
||||
expectedVariant: "blue",
|
||||
expectedValue: "#0000FF",
|
||||
|
@ -596,13 +587,7 @@ func BenchmarkFractionalEvaluation(b *testing.B) {
|
|||
for name, tt := range tests {
|
||||
b.Run(name, func(b *testing.B) {
|
||||
log := logger.NewLogger(nil, false)
|
||||
s, err := store.NewStore(log, sources)
|
||||
if err != nil {
|
||||
b.Fatalf("NewStore failed: %v", err)
|
||||
}
|
||||
je := NewJSON(log, s)
|
||||
je.store.Update(source, tt.flags.Flags, model.Metadata{})
|
||||
|
||||
je := NewJSON(log, &store.Flags{Flags: tt.flags.Flags})
|
||||
for i := 0; i < b.N; i++ {
|
||||
value, variant, reason, _, err := resolve[string](
|
||||
ctx, reqID, tt.flagKey, tt.context, je.evaluateVariant)
|
||||
|
|
|
@ -3,7 +3,6 @@ package evaluator
|
|||
import (
|
||||
"context"
|
||||
|
||||
"github.com/open-feature/flagd/core/pkg/model"
|
||||
"github.com/open-feature/flagd/core/pkg/sync"
|
||||
)
|
||||
|
||||
|
@ -12,12 +11,12 @@ type AnyValue struct {
|
|||
Variant string
|
||||
Reason string
|
||||
FlagKey string
|
||||
Metadata model.Metadata
|
||||
Metadata map[string]interface{}
|
||||
Error error
|
||||
}
|
||||
|
||||
func NewAnyValue(
|
||||
value interface{}, variant string, reason string, flagKey string, metadata model.Metadata,
|
||||
value interface{}, variant string, reason string, flagKey string, metadata map[string]interface{},
|
||||
err error,
|
||||
) AnyValue {
|
||||
return AnyValue{
|
||||
|
@ -45,31 +44,31 @@ type IResolver interface {
|
|||
ctx context.Context,
|
||||
reqID string,
|
||||
flagKey string,
|
||||
context map[string]any) (value bool, variant string, reason string, metadata model.Metadata, err error)
|
||||
context map[string]any) (value bool, variant string, reason string, metadata map[string]interface{}, err error)
|
||||
ResolveStringValue(
|
||||
ctx context.Context,
|
||||
reqID string,
|
||||
flagKey string,
|
||||
context map[string]any) (
|
||||
value string, variant string, reason string, metadata model.Metadata, err error)
|
||||
value string, variant string, reason string, metadata map[string]interface{}, err error)
|
||||
ResolveIntValue(
|
||||
ctx context.Context,
|
||||
reqID string,
|
||||
flagKey string,
|
||||
context map[string]any) (
|
||||
value int64, variant string, reason string, metadata model.Metadata, err error)
|
||||
value int64, variant string, reason string, metadata map[string]interface{}, err error)
|
||||
ResolveFloatValue(
|
||||
ctx context.Context,
|
||||
reqID string,
|
||||
flagKey string,
|
||||
context map[string]any) (
|
||||
value float64, variant string, reason string, metadata model.Metadata, err error)
|
||||
value float64, variant string, reason string, metadata map[string]interface{}, err error)
|
||||
ResolveObjectValue(
|
||||
ctx context.Context,
|
||||
reqID string,
|
||||
flagKey string,
|
||||
context map[string]any) (
|
||||
value map[string]any, variant string, reason string, metadata model.Metadata, err error)
|
||||
value map[string]any, variant string, reason string, metadata map[string]interface{}, err error)
|
||||
ResolveAsAnyValue(
|
||||
ctx context.Context,
|
||||
reqID string,
|
||||
|
@ -78,5 +77,5 @@ type IResolver interface {
|
|||
ResolveAllValues(
|
||||
ctx context.Context,
|
||||
reqID string,
|
||||
context map[string]any) (resolutions []AnyValue, metadata model.Metadata, err error)
|
||||
context map[string]any) (values []AnyValue, err error)
|
||||
}
|
||||
|
|
|
@ -64,13 +64,13 @@ func WithEvaluator(name string, evalFunc func(interface{}, interface{}) interfac
|
|||
|
||||
// JSON evaluator
|
||||
type JSON struct {
|
||||
store *store.Store
|
||||
store *store.Flags
|
||||
Logger *logger.Logger
|
||||
jsonEvalTracer trace.Tracer
|
||||
Resolver
|
||||
}
|
||||
|
||||
func NewJSON(logger *logger.Logger, s *store.Store, opts ...JSONEvaluatorOption) *JSON {
|
||||
func NewJSON(logger *logger.Logger, s *store.Flags, opts ...JSONEvaluatorOption) *JSON {
|
||||
logger = logger.WithFields(
|
||||
zap.String("component", "evaluator"),
|
||||
zap.String("evaluator", "json"),
|
||||
|
@ -103,12 +103,13 @@ func (je *JSON) SetState(payload sync.DataSync) (map[string]interface{}, bool, e
|
|||
_, span := je.jsonEvalTracer.Start(
|
||||
context.Background(),
|
||||
"flagSync",
|
||||
trace.WithAttributes(attribute.String("feature_flag.source", payload.Source)))
|
||||
trace.WithAttributes(attribute.String("feature_flag.source", payload.Source)),
|
||||
trace.WithAttributes(attribute.String("feature_flag.sync_type", payload.Type.String())))
|
||||
defer span.End()
|
||||
|
||||
var definition Definition
|
||||
var newFlags Flags
|
||||
|
||||
err := configToFlagDefinition(je.Logger, payload.FlagData, &definition)
|
||||
err := configToFlags(je.Logger, payload.FlagData, &newFlags)
|
||||
if err != nil {
|
||||
span.SetStatus(codes.Error, "flagSync error")
|
||||
span.RecordError(err)
|
||||
|
@ -118,7 +119,18 @@ func (je *JSON) SetState(payload sync.DataSync) (map[string]interface{}, bool, e
|
|||
var events map[string]interface{}
|
||||
var reSync bool
|
||||
|
||||
events, reSync = je.store.Update(payload.Source, definition.Flags, definition.Metadata)
|
||||
switch payload.Type {
|
||||
case sync.ALL:
|
||||
events, reSync = je.store.Merge(je.Logger, payload.Source, payload.Selector, newFlags.Flags)
|
||||
case sync.ADD:
|
||||
events = je.store.Add(je.Logger, payload.Source, payload.Selector, newFlags.Flags)
|
||||
case sync.UPDATE:
|
||||
events = je.store.Update(je.Logger, payload.Source, payload.Selector, newFlags.Flags)
|
||||
case sync.DELETE:
|
||||
events = je.store.DeleteFlags(je.Logger, payload.Source, newFlags.Flags)
|
||||
default:
|
||||
return nil, false, fmt.Errorf("unsupported sync type: %d", payload.Type)
|
||||
}
|
||||
|
||||
// Number of events correlates to the number of flags changed through this sync, record it
|
||||
span.SetAttributes(attribute.Int("feature_flag.change_count", len(events)))
|
||||
|
@ -139,24 +151,19 @@ func NewResolver(store store.IStore, logger *logger.Logger, jsonEvalTracer trace
|
|||
jsonlogic.AddOperator(StartsWithEvaluationName, NewStringComparisonEvaluator(logger).StartsWithEvaluation)
|
||||
jsonlogic.AddOperator(EndsWithEvaluationName, NewStringComparisonEvaluator(logger).EndsWithEvaluation)
|
||||
jsonlogic.AddOperator(SemVerEvaluationName, NewSemVerComparison(logger).SemVerEvaluation)
|
||||
jsonlogic.AddOperator(LegacyFractionEvaluationName, NewLegacyFractional(logger).LegacyFractionalEvaluation)
|
||||
|
||||
return Resolver{store: store, Logger: logger, tracer: jsonEvalTracer}
|
||||
}
|
||||
|
||||
func (je *Resolver) ResolveAllValues(ctx context.Context, reqID string, context map[string]any) ([]AnyValue,
|
||||
model.Metadata, error,
|
||||
) {
|
||||
func (je *Resolver) ResolveAllValues(ctx context.Context, reqID string, context map[string]any) ([]AnyValue, error) {
|
||||
_, span := je.tracer.Start(ctx, "resolveAll")
|
||||
defer span.End()
|
||||
|
||||
var selector store.Selector
|
||||
s := ctx.Value(store.SelectorContextKey{})
|
||||
if s != nil {
|
||||
selector = s.(store.Selector)
|
||||
}
|
||||
allFlags, flagSetMetadata, err := je.store.GetAll(ctx, &selector)
|
||||
var err error
|
||||
allFlags, err := je.store.GetAll(ctx)
|
||||
if err != nil {
|
||||
return nil, flagSetMetadata, fmt.Errorf("error retreiving flags from the store: %w", err)
|
||||
return nil, fmt.Errorf("error retreiving flags from the store: %w", err)
|
||||
}
|
||||
|
||||
values := []AnyValue{}
|
||||
|
@ -188,7 +195,7 @@ func (je *Resolver) ResolveAllValues(ctx context.Context, reqID string, context
|
|||
values = append(values, NewAnyValue(value, variant, reason, flagKey, metadata, err))
|
||||
}
|
||||
|
||||
return values, flagSetMetadata, nil
|
||||
return values, nil
|
||||
}
|
||||
|
||||
func (je *Resolver) ResolveBooleanValue(
|
||||
|
@ -305,24 +312,19 @@ func resolve[T constraints](ctx context.Context, reqID string, key string, conte
|
|||
func (je *Resolver) evaluateVariant(ctx context.Context, reqID string, flagKey string, evalCtx map[string]any) (
|
||||
variant string, variants map[string]interface{}, reason string, metadata map[string]interface{}, err error,
|
||||
) {
|
||||
metadata = map[string]interface{}{}
|
||||
|
||||
var selector store.Selector
|
||||
s := ctx.Value(store.SelectorContextKey{})
|
||||
if s != nil {
|
||||
selector = s.(store.Selector)
|
||||
}
|
||||
flag, metadata, err := je.store.Get(ctx, flagKey, &selector)
|
||||
if err != nil {
|
||||
flag, ok := je.store.Get(ctx, flagKey)
|
||||
if !ok {
|
||||
// flag not found
|
||||
je.Logger.DebugWithID(reqID, fmt.Sprintf("requested flag could not be found: %s", flagKey))
|
||||
return "", map[string]interface{}{}, model.ErrorReason, metadata, errors.New(model.FlagNotFoundErrorCode)
|
||||
}
|
||||
|
||||
for key, value := range flag.Metadata {
|
||||
// If value is not nil or empty, copy to metadata
|
||||
if value != nil {
|
||||
metadata[key] = value
|
||||
}
|
||||
// add selector to evaluation metadata
|
||||
selector := je.store.SelectorForFlag(ctx, flag)
|
||||
if selector != "" {
|
||||
metadata[SelectorMetadataKey] = selector
|
||||
}
|
||||
|
||||
if flag.State == Disabled {
|
||||
|
@ -362,12 +364,7 @@ func (je *Resolver) evaluateVariant(ctx context.Context, reqID string, flagKey s
|
|||
|
||||
// check if string is "null" before we strip quotes, so we can differentiate between JSON null and "null"
|
||||
trimmed := strings.TrimSpace(result.String())
|
||||
|
||||
if trimmed == "null" {
|
||||
if flag.DefaultVariant == "" {
|
||||
return "", flag.Variants, model.ErrorReason, metadata, errors.New(model.FlagNotFoundErrorCode)
|
||||
}
|
||||
|
||||
return flag.DefaultVariant, flag.Variants, model.DefaultReason, metadata, nil
|
||||
}
|
||||
|
||||
|
@ -380,13 +377,8 @@ func (je *Resolver) evaluateVariant(ctx context.Context, reqID string, flagKey s
|
|||
}
|
||||
je.Logger.ErrorWithID(reqID,
|
||||
fmt.Sprintf("invalid or missing variant: %s for flagKey: %s, variant is not valid", variant, flagKey))
|
||||
return "", flag.Variants, model.ErrorReason, metadata, errors.New(model.GeneralErrorCode)
|
||||
return "", flag.Variants, model.ErrorReason, metadata, errors.New(model.ParseErrorCode)
|
||||
}
|
||||
|
||||
if flag.DefaultVariant == "" {
|
||||
return "", flag.Variants, model.ErrorReason, metadata, errors.New(model.FlagNotFoundErrorCode)
|
||||
}
|
||||
|
||||
return flag.DefaultVariant, flag.Variants, model.StaticReason, metadata, nil
|
||||
}
|
||||
|
||||
|
@ -448,8 +440,8 @@ func loadAndCompileSchema(log *logger.Logger) *gojsonschema.Schema {
|
|||
return compiledSchema
|
||||
}
|
||||
|
||||
// configToFlagDefinition convert string configurations to flags and store them to pointer newFlags
|
||||
func configToFlagDefinition(log *logger.Logger, config string, definition *Definition) error {
|
||||
// configToFlags convert string configurations to flags and store them to pointer newFlags
|
||||
func configToFlags(log *logger.Logger, config string, newFlags *Flags) error {
|
||||
compiledSchema := loadAndCompileSchema(log)
|
||||
|
||||
flagStringLoader := gojsonschema.NewStringLoader(config)
|
||||
|
@ -468,22 +460,17 @@ func configToFlagDefinition(log *logger.Logger, config string, definition *Defin
|
|||
return fmt.Errorf("transposing evaluators: %w", err)
|
||||
}
|
||||
|
||||
err = json.Unmarshal([]byte(transposedConfig), &definition)
|
||||
err = json.Unmarshal([]byte(transposedConfig), &newFlags)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unmarshalling provided configurations: %w", err)
|
||||
}
|
||||
|
||||
return validateDefaultVariants(definition)
|
||||
return validateDefaultVariants(newFlags)
|
||||
}
|
||||
|
||||
// validateDefaultVariants returns an error if any of the default variants aren't valid
|
||||
func validateDefaultVariants(flags *Definition) error {
|
||||
func validateDefaultVariants(flags *Flags) error {
|
||||
for name, flag := range flags.Flags {
|
||||
// Default Variant is not provided in the config
|
||||
if flag.DefaultVariant == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
if _, ok := flag.Variants[flag.DefaultVariant]; !ok {
|
||||
return fmt.Errorf(
|
||||
"default variant: '%s' isn't a valid variant of flag: '%s'", flag.DefaultVariant, name,
|
||||
|
|
|
@ -10,11 +10,6 @@ type Evaluators struct {
|
|||
Evaluators map[string]json.RawMessage `json:"$evaluators"`
|
||||
}
|
||||
|
||||
type Definition struct {
|
||||
Flags map[string]model.Flag `json:"flags"`
|
||||
Metadata map[string]interface{} `json:"metadata"`
|
||||
}
|
||||
|
||||
type Flags struct {
|
||||
Flags map[string]model.Flag `json:"flags"`
|
||||
}
|
||||
|
|
|
@ -44,90 +44,7 @@ const ValidFlags = `{
|
|||
}
|
||||
}`
|
||||
|
||||
const NullDefault = `{
|
||||
"flags": {
|
||||
"validFlag": {
|
||||
"state": "ENABLED",
|
||||
"variants": {
|
||||
"on": true,
|
||||
"off": false
|
||||
},
|
||||
"defaultVariant": null
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
const UndefinedDefault = `{
|
||||
"flags": {
|
||||
"validFlag": {
|
||||
"state": "ENABLED",
|
||||
"variants": {
|
||||
"on": true,
|
||||
"off": false
|
||||
}
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
const NullDefaultWithTargetting = `{
|
||||
"flags": {
|
||||
"validFlag": {
|
||||
"state": "ENABLED",
|
||||
"variants": {
|
||||
"on": true,
|
||||
"off": false
|
||||
},
|
||||
"defaultVariant": null,
|
||||
"targeting": {
|
||||
"if": [
|
||||
{
|
||||
"==": [
|
||||
{
|
||||
"var": [
|
||||
"key"
|
||||
]
|
||||
},
|
||||
"value"
|
||||
]
|
||||
},
|
||||
"on"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
const UndefinedDefaultWithTargetting = `{
|
||||
"flags": {
|
||||
"validFlag": {
|
||||
"state": "ENABLED",
|
||||
"variants": {
|
||||
"on": true,
|
||||
"off": false
|
||||
},
|
||||
"targeting": {
|
||||
"if": [
|
||||
{
|
||||
"==": [
|
||||
{
|
||||
"var": [
|
||||
"key"
|
||||
]
|
||||
},
|
||||
"value"
|
||||
]
|
||||
},
|
||||
"on"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
const (
|
||||
FlagSetID = "testSetId"
|
||||
Version = "v33"
|
||||
ValidFlag = "validFlag"
|
||||
MissingFlag = "missingFlag"
|
||||
StaticBoolFlag = "staticBoolFlag"
|
||||
StaticBoolValue = true
|
||||
|
@ -152,15 +69,9 @@ const (
|
|||
ColorProp = "color"
|
||||
ColorValue = "yellow"
|
||||
DisabledFlag = "disabledFlag"
|
||||
MetadataFlag = "metadataFlag"
|
||||
VersionOverride = "v66"
|
||||
)
|
||||
|
||||
var Flags = fmt.Sprintf(`{
|
||||
"metadata": {
|
||||
"flagSetId": "%s",
|
||||
"version": "%s"
|
||||
},
|
||||
"flags": {
|
||||
"%s": {
|
||||
"state": "ENABLED",
|
||||
|
@ -331,22 +242,9 @@ var Flags = fmt.Sprintf(`{
|
|||
"off": false
|
||||
},
|
||||
"defaultVariant": "on"
|
||||
},
|
||||
"%s": {
|
||||
"state": "ENABLED",
|
||||
"variants": {
|
||||
"on": true,
|
||||
"off": false
|
||||
},
|
||||
"defaultVariant": "on",
|
||||
"metadata": {
|
||||
"version": "%s"
|
||||
}
|
||||
}
|
||||
}
|
||||
}`,
|
||||
FlagSetID,
|
||||
Version,
|
||||
StaticBoolFlag,
|
||||
StaticBoolValue,
|
||||
StaticStringFlag,
|
||||
|
@ -377,13 +275,11 @@ var Flags = fmt.Sprintf(`{
|
|||
DynamicObjectValue,
|
||||
ColorProp,
|
||||
ColorValue,
|
||||
DisabledFlag,
|
||||
MetadataFlag,
|
||||
VersionOverride)
|
||||
DisabledFlag)
|
||||
|
||||
func TestGetState_Valid_ContainsFlag(t *testing.T) {
|
||||
evaluator := evaluator.NewJSON(logger.NewLogger(nil, false), store.NewFlags())
|
||||
_, _, err := evaluator.SetState(sync.DataSync{FlagData: ValidFlags, Source: "testSource"})
|
||||
_, _, err := evaluator.SetState(sync.DataSync{FlagData: ValidFlags})
|
||||
if err != nil {
|
||||
t.Fatalf("Expected no error")
|
||||
}
|
||||
|
@ -405,9 +301,9 @@ func TestSetState_Invalid_Error(t *testing.T) {
|
|||
evaluator := evaluator.NewJSON(logger.NewLogger(nil, false), store.NewFlags())
|
||||
|
||||
// set state with an invalid flag definition
|
||||
_, _, err := evaluator.SetState(sync.DataSync{FlagData: InvalidFlags, Source: "testSource"})
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error")
|
||||
_, _, err := evaluator.SetState(sync.DataSync{FlagData: InvalidFlags})
|
||||
if err == nil {
|
||||
t.Fatalf("expected error")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -415,7 +311,7 @@ func TestSetState_Valid_NoError(t *testing.T) {
|
|||
evaluator := evaluator.NewJSON(logger.NewLogger(nil, false), store.NewFlags())
|
||||
|
||||
// set state with a valid flag definition
|
||||
_, _, err := evaluator.SetState(sync.DataSync{FlagData: ValidFlags, Source: "testSource"})
|
||||
_, _, err := evaluator.SetState(sync.DataSync{FlagData: ValidFlags})
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error")
|
||||
}
|
||||
|
@ -423,7 +319,7 @@ func TestSetState_Valid_NoError(t *testing.T) {
|
|||
|
||||
func TestResolveAllValues(t *testing.T) {
|
||||
evaluator := evaluator.NewJSON(logger.NewLogger(nil, false), store.NewFlags())
|
||||
_, _, err := evaluator.SetState(sync.DataSync{FlagData: Flags, Source: "testSource"})
|
||||
_, _, err := evaluator.SetState(sync.DataSync{FlagData: Flags})
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error")
|
||||
}
|
||||
|
@ -439,7 +335,7 @@ func TestResolveAllValues(t *testing.T) {
|
|||
}
|
||||
const reqID = "default"
|
||||
for _, test := range tests {
|
||||
vals, _, err := evaluator.ResolveAllValues(context.TODO(), reqID, test.context)
|
||||
vals, err := evaluator.ResolveAllValues(context.TODO(), reqID, test.context)
|
||||
if err != nil {
|
||||
t.Error("error from resolver", err)
|
||||
}
|
||||
|
@ -492,7 +388,7 @@ func TestResolveBooleanValue(t *testing.T) {
|
|||
}
|
||||
const reqID = "default"
|
||||
evaluator := evaluator.NewJSON(logger.NewLogger(nil, false), store.NewFlags())
|
||||
_, _, err := evaluator.SetState(sync.DataSync{FlagData: Flags, Source: "testSource"})
|
||||
_, _, err := evaluator.SetState(sync.DataSync{FlagData: Flags})
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error")
|
||||
}
|
||||
|
@ -527,7 +423,7 @@ func BenchmarkResolveBooleanValue(b *testing.B) {
|
|||
}
|
||||
|
||||
evaluator := evaluator.NewJSON(logger.NewLogger(nil, false), store.NewFlags())
|
||||
_, _, err := evaluator.SetState(sync.DataSync{FlagData: Flags, Source: "testSource"})
|
||||
_, _, err := evaluator.SetState(sync.DataSync{FlagData: Flags})
|
||||
if err != nil {
|
||||
b.Fatalf("expected no error")
|
||||
}
|
||||
|
@ -567,7 +463,7 @@ func TestResolveStringValue(t *testing.T) {
|
|||
}
|
||||
const reqID = "default"
|
||||
evaluator := evaluator.NewJSON(logger.NewLogger(nil, false), store.NewFlags())
|
||||
_, _, err := evaluator.SetState(sync.DataSync{FlagData: Flags, Source: "testSource"})
|
||||
_, _, err := evaluator.SetState(sync.DataSync{FlagData: Flags})
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error")
|
||||
}
|
||||
|
@ -603,7 +499,7 @@ func BenchmarkResolveStringValue(b *testing.B) {
|
|||
}
|
||||
|
||||
evaluator := evaluator.NewJSON(logger.NewLogger(nil, false), store.NewFlags())
|
||||
_, _, err := evaluator.SetState(sync.DataSync{FlagData: Flags, Source: "testSource"})
|
||||
_, _, err := evaluator.SetState(sync.DataSync{FlagData: Flags})
|
||||
if err != nil {
|
||||
b.Fatalf("expected no error")
|
||||
}
|
||||
|
@ -643,7 +539,7 @@ func TestResolveFloatValue(t *testing.T) {
|
|||
}
|
||||
const reqID = "default"
|
||||
evaluator := evaluator.NewJSON(logger.NewLogger(nil, false), store.NewFlags())
|
||||
_, _, err := evaluator.SetState(sync.DataSync{FlagData: Flags, Source: "testSource"})
|
||||
_, _, err := evaluator.SetState(sync.DataSync{FlagData: Flags})
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error")
|
||||
}
|
||||
|
@ -679,7 +575,7 @@ func BenchmarkResolveFloatValue(b *testing.B) {
|
|||
}
|
||||
|
||||
evaluator := evaluator.NewJSON(logger.NewLogger(nil, false), store.NewFlags())
|
||||
_, _, err := evaluator.SetState(sync.DataSync{FlagData: Flags, Source: "testSource"})
|
||||
_, _, err := evaluator.SetState(sync.DataSync{FlagData: Flags})
|
||||
if err != nil {
|
||||
b.Fatalf("expected no error")
|
||||
}
|
||||
|
@ -719,7 +615,7 @@ func TestResolveIntValue(t *testing.T) {
|
|||
}
|
||||
const reqID = "default"
|
||||
evaluator := evaluator.NewJSON(logger.NewLogger(nil, false), store.NewFlags())
|
||||
_, _, err := evaluator.SetState(sync.DataSync{FlagData: Flags, Source: "testSource"})
|
||||
_, _, err := evaluator.SetState(sync.DataSync{FlagData: Flags})
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error")
|
||||
}
|
||||
|
@ -755,7 +651,7 @@ func BenchmarkResolveIntValue(b *testing.B) {
|
|||
}
|
||||
|
||||
evaluator := evaluator.NewJSON(logger.NewLogger(nil, false), store.NewFlags())
|
||||
_, _, err := evaluator.SetState(sync.DataSync{FlagData: Flags, Source: "testSource"})
|
||||
_, _, err := evaluator.SetState(sync.DataSync{FlagData: Flags})
|
||||
if err != nil {
|
||||
b.Fatalf("expected no error")
|
||||
}
|
||||
|
@ -795,7 +691,7 @@ func TestResolveObjectValue(t *testing.T) {
|
|||
}
|
||||
const reqID = "default"
|
||||
evaluator := evaluator.NewJSON(logger.NewLogger(nil, false), store.NewFlags())
|
||||
_, _, err := evaluator.SetState(sync.DataSync{FlagData: Flags, Source: "testSource"})
|
||||
_, _, err := evaluator.SetState(sync.DataSync{FlagData: Flags})
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error")
|
||||
}
|
||||
|
@ -834,7 +730,7 @@ func BenchmarkResolveObjectValue(b *testing.B) {
|
|||
}
|
||||
|
||||
evaluator := evaluator.NewJSON(logger.NewLogger(nil, false), store.NewFlags())
|
||||
_, _, err := evaluator.SetState(sync.DataSync{FlagData: Flags, Source: "testSource"})
|
||||
_, _, err := evaluator.SetState(sync.DataSync{FlagData: Flags})
|
||||
if err != nil {
|
||||
b.Fatalf("expected no error")
|
||||
}
|
||||
|
@ -879,7 +775,7 @@ func TestResolveAsAnyValue(t *testing.T) {
|
|||
}
|
||||
|
||||
evaluator := evaluator.NewJSON(logger.NewLogger(nil, false), store.NewFlags())
|
||||
_, _, err := evaluator.SetState(sync.DataSync{FlagData: Flags, Source: "testSource"})
|
||||
_, _, err := evaluator.SetState(sync.DataSync{FlagData: Flags})
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error")
|
||||
}
|
||||
|
@ -898,37 +794,6 @@ func TestResolveAsAnyValue(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestResolve_DefaultVariant(t *testing.T) {
|
||||
tests := []struct {
|
||||
flags string
|
||||
flagKey string
|
||||
context map[string]interface{}
|
||||
reason string
|
||||
errorCode string
|
||||
}{
|
||||
{NullDefault, ValidFlag, nil, model.ErrorReason, model.FlagNotFoundErrorCode},
|
||||
{UndefinedDefault, ValidFlag, nil, model.ErrorReason, model.FlagNotFoundErrorCode},
|
||||
{NullDefaultWithTargetting, ValidFlag, nil, model.ErrorReason, model.FlagNotFoundErrorCode},
|
||||
{UndefinedDefaultWithTargetting, ValidFlag, nil, model.ErrorReason, model.FlagNotFoundErrorCode},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run("", func(t *testing.T) {
|
||||
evaluator := evaluator.NewJSON(logger.NewLogger(nil, false), store.NewFlags())
|
||||
_, _, err := evaluator.SetState(sync.DataSync{FlagData: test.flags, Source: "testSource"})
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error")
|
||||
}
|
||||
|
||||
anyResult := evaluator.ResolveAsAnyValue(context.TODO(), "", test.flagKey, test.context)
|
||||
|
||||
assert.Equal(t, model.ErrorReason, anyResult.Reason)
|
||||
assert.EqualError(t, anyResult.Error, test.errorCode)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetState_DefaultVariantValidation(t *testing.T) {
|
||||
tests := map[string]struct {
|
||||
jsonFlags string
|
||||
|
@ -982,7 +847,7 @@ func TestSetState_DefaultVariantValidation(t *testing.T) {
|
|||
t.Run(name, func(t *testing.T) {
|
||||
jsonEvaluator := evaluator.NewJSON(logger.NewLogger(nil, false), store.NewFlags())
|
||||
|
||||
_, _, err := jsonEvaluator.SetState(sync.DataSync{FlagData: tt.jsonFlags, Source: "testSource"})
|
||||
_, _, err := jsonEvaluator.SetState(sync.DataSync{FlagData: tt.jsonFlags})
|
||||
|
||||
if tt.valid && err != nil {
|
||||
t.Error(err)
|
||||
|
@ -994,6 +859,7 @@ func TestSetState_DefaultVariantValidation(t *testing.T) {
|
|||
func TestState_Evaluator(t *testing.T) {
|
||||
tests := map[string]struct {
|
||||
inputState string
|
||||
inputSyncType sync.Type
|
||||
expectedOutputState string
|
||||
expectedError bool
|
||||
expectedResync bool
|
||||
|
@ -1029,6 +895,7 @@ func TestState_Evaluator(t *testing.T) {
|
|||
}
|
||||
}
|
||||
`,
|
||||
inputSyncType: sync.ALL,
|
||||
expectedOutputState: `
|
||||
{
|
||||
"flags": {
|
||||
|
@ -1041,7 +908,7 @@ func TestState_Evaluator(t *testing.T) {
|
|||
},
|
||||
"defaultVariant": "recursive",
|
||||
"state": "ENABLED",
|
||||
"source":"testSource",
|
||||
"source":"",
|
||||
"selector":"",
|
||||
"targeting": {
|
||||
"if": [
|
||||
|
@ -1089,6 +956,7 @@ func TestState_Evaluator(t *testing.T) {
|
|||
}
|
||||
}
|
||||
`,
|
||||
inputSyncType: sync.ALL,
|
||||
expectedOutputState: `
|
||||
{
|
||||
"flags": {
|
||||
|
@ -1101,7 +969,7 @@ func TestState_Evaluator(t *testing.T) {
|
|||
},
|
||||
"defaultVariant": "recursive",
|
||||
"state": "ENABLED",
|
||||
"source":"testSource",
|
||||
"source":"",
|
||||
"selector":"",
|
||||
"targeting": {
|
||||
"if": [
|
||||
|
@ -1145,6 +1013,7 @@ func TestState_Evaluator(t *testing.T) {
|
|||
}
|
||||
}
|
||||
`,
|
||||
inputSyncType: sync.ALL,
|
||||
expectedError: true,
|
||||
},
|
||||
"invalid targeting": {
|
||||
|
@ -1177,7 +1046,7 @@ func TestState_Evaluator(t *testing.T) {
|
|||
"off": false
|
||||
},
|
||||
"defaultVariant": "off",
|
||||
"source":"testSource",
|
||||
"source":"",
|
||||
"targeting": {
|
||||
"if": [
|
||||
{
|
||||
|
@ -1198,6 +1067,7 @@ func TestState_Evaluator(t *testing.T) {
|
|||
"flagSources":null
|
||||
}
|
||||
`,
|
||||
inputSyncType: sync.ALL,
|
||||
expectedError: false,
|
||||
expectedOutputState: `
|
||||
{
|
||||
|
@ -1211,7 +1081,7 @@ func TestState_Evaluator(t *testing.T) {
|
|||
},
|
||||
"defaultVariant": "recursive",
|
||||
"state": "ENABLED",
|
||||
"source":"testSource",
|
||||
"source":"",
|
||||
"selector":"",
|
||||
"targeting": {
|
||||
"if": [
|
||||
|
@ -1230,7 +1100,7 @@ func TestState_Evaluator(t *testing.T) {
|
|||
"off": false
|
||||
},
|
||||
"defaultVariant": "off",
|
||||
"source":"testSource",
|
||||
"source":"",
|
||||
"selector":"",
|
||||
"targeting": {
|
||||
"if": [
|
||||
|
@ -1280,15 +1150,47 @@ func TestState_Evaluator(t *testing.T) {
|
|||
}
|
||||
}
|
||||
`,
|
||||
inputSyncType: sync.ALL,
|
||||
expectedError: true,
|
||||
},
|
||||
"unexpected sync type": {
|
||||
inputState: `
|
||||
{
|
||||
"flags": {
|
||||
"fibAlgo": {
|
||||
"variants": {
|
||||
"recursive": "recursive",
|
||||
"memo": "memo",
|
||||
"loop": "loop",
|
||||
"binet": "binet"
|
||||
},
|
||||
"defaultVariant": "recursive",
|
||||
"state": "ENABLED",
|
||||
"targeting": {
|
||||
"if": [
|
||||
{
|
||||
"$ref": "emailWithFaas"
|
||||
}, "binet", null
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"$evaluators": {
|
||||
"emailWithFaas": ""
|
||||
}
|
||||
}
|
||||
`,
|
||||
inputSyncType: 999,
|
||||
expectedError: true,
|
||||
expectedResync: false,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tt := range tests {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
jsonEvaluator := evaluator.NewJSON(logger.NewLogger(nil, false), store.NewFlags())
|
||||
|
||||
_, resync, err := jsonEvaluator.SetState(sync.DataSync{FlagData: tt.inputState, Source: "testSource"})
|
||||
_, resync, err := jsonEvaluator.SetState(sync.DataSync{FlagData: tt.inputState})
|
||||
if err != nil {
|
||||
if !tt.expectedError {
|
||||
t.Error(err)
|
||||
|
@ -1321,8 +1223,8 @@ func TestState_Evaluator(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(expectedOutputJSON["flags"], gotOutputJSON) {
|
||||
t.Errorf("expected state: %v got state: %v", expectedOutputJSON["flags"], gotOutputJSON)
|
||||
if !reflect.DeepEqual(expectedOutputJSON["flags"], gotOutputJSON["flags"]) {
|
||||
t.Errorf("expected state: %v got state: %v", expectedOutputJSON, gotOutputJSON)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
@ -1330,11 +1232,13 @@ func TestState_Evaluator(t *testing.T) {
|
|||
|
||||
func TestFlagStateSafeForConcurrentReadWrites(t *testing.T) {
|
||||
tests := map[string]struct {
|
||||
dataSyncType sync.Type
|
||||
flagResolution func(evaluator *evaluator.JSON) error
|
||||
}{
|
||||
"Add_ResolveAllValues": {
|
||||
dataSyncType: sync.ADD,
|
||||
flagResolution: func(evaluator *evaluator.JSON) error {
|
||||
_, _, err := evaluator.ResolveAllValues(context.TODO(), "", nil)
|
||||
_, err := evaluator.ResolveAllValues(context.TODO(), "", nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1342,8 +1246,9 @@ func TestFlagStateSafeForConcurrentReadWrites(t *testing.T) {
|
|||
},
|
||||
},
|
||||
"Update_ResolveAllValues": {
|
||||
dataSyncType: sync.UPDATE,
|
||||
flagResolution: func(evaluator *evaluator.JSON) error {
|
||||
_, _, err := evaluator.ResolveAllValues(context.TODO(), "", nil)
|
||||
_, err := evaluator.ResolveAllValues(context.TODO(), "", nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1351,8 +1256,9 @@ func TestFlagStateSafeForConcurrentReadWrites(t *testing.T) {
|
|||
},
|
||||
},
|
||||
"Delete_ResolveAllValues": {
|
||||
dataSyncType: sync.DELETE,
|
||||
flagResolution: func(evaluator *evaluator.JSON) error {
|
||||
_, _, err := evaluator.ResolveAllValues(context.TODO(), "", nil)
|
||||
_, err := evaluator.ResolveAllValues(context.TODO(), "", nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1360,30 +1266,35 @@ func TestFlagStateSafeForConcurrentReadWrites(t *testing.T) {
|
|||
},
|
||||
},
|
||||
"Add_ResolveBooleanValue": {
|
||||
dataSyncType: sync.ADD,
|
||||
flagResolution: func(evaluator *evaluator.JSON) error {
|
||||
_, _, _, _, err := evaluator.ResolveBooleanValue(context.TODO(), "", StaticBoolFlag, nil)
|
||||
return err
|
||||
},
|
||||
},
|
||||
"Update_ResolveStringValue": {
|
||||
dataSyncType: sync.UPDATE,
|
||||
flagResolution: func(evaluator *evaluator.JSON) error {
|
||||
_, _, _, _, err := evaluator.ResolveBooleanValue(context.TODO(), "", StaticStringValue, nil)
|
||||
return err
|
||||
},
|
||||
},
|
||||
"Delete_ResolveIntValue": {
|
||||
dataSyncType: sync.DELETE,
|
||||
flagResolution: func(evaluator *evaluator.JSON) error {
|
||||
_, _, _, _, err := evaluator.ResolveIntValue(context.TODO(), "", StaticIntFlag, nil)
|
||||
return err
|
||||
},
|
||||
},
|
||||
"Add_ResolveFloatValue": {
|
||||
dataSyncType: sync.ADD,
|
||||
flagResolution: func(evaluator *evaluator.JSON) error {
|
||||
_, _, _, _, err := evaluator.ResolveFloatValue(context.TODO(), "", StaticFloatFlag, nil)
|
||||
return err
|
||||
},
|
||||
},
|
||||
"Update_ResolveObjectValue": {
|
||||
dataSyncType: sync.UPDATE,
|
||||
flagResolution: func(evaluator *evaluator.JSON) error {
|
||||
_, _, _, _, err := evaluator.ResolveObjectValue(context.TODO(), "", StaticObjectFlag, nil)
|
||||
return err
|
||||
|
@ -1395,7 +1306,7 @@ func TestFlagStateSafeForConcurrentReadWrites(t *testing.T) {
|
|||
t.Run(name, func(t *testing.T) {
|
||||
jsonEvaluator := evaluator.NewJSON(logger.NewLogger(nil, false), store.NewFlags())
|
||||
|
||||
_, _, err := jsonEvaluator.SetState(sync.DataSync{FlagData: Flags, Source: "testSource"})
|
||||
_, _, err := jsonEvaluator.SetState(sync.DataSync{FlagData: Flags, Type: sync.ADD})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -1418,7 +1329,7 @@ func TestFlagStateSafeForConcurrentReadWrites(t *testing.T) {
|
|||
errChan <- nil
|
||||
return
|
||||
default:
|
||||
_, _, err := jsonEvaluator.SetState(sync.DataSync{FlagData: Flags, Source: "testSource"})
|
||||
_, _, err := jsonEvaluator.SetState(sync.DataSync{FlagData: Flags, Type: tt.dataSyncType})
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
return
|
||||
|
@ -1460,7 +1371,7 @@ func TestFlagdAmbientProperties(t *testing.T) {
|
|||
t.Run("flagKeyIsInTheContext", func(t *testing.T) {
|
||||
evaluator := evaluator.NewJSON(logger.NewLogger(nil, false), store.NewFlags())
|
||||
|
||||
_, _, err := evaluator.SetState(sync.DataSync{Source: "testSource", FlagData: `{
|
||||
_, _, err := evaluator.SetState(sync.DataSync{FlagData: `{
|
||||
"flags": {
|
||||
"welcome-banner": {
|
||||
"state": "ENABLED",
|
||||
|
@ -1500,7 +1411,7 @@ func TestFlagdAmbientProperties(t *testing.T) {
|
|||
t.Run("timestampIsInTheContext", func(t *testing.T) {
|
||||
evaluator := evaluator.NewJSON(logger.NewLogger(nil, false), store.NewFlags())
|
||||
|
||||
_, _, err := evaluator.SetState(sync.DataSync{Source: "testSource", FlagData: `{
|
||||
_, _, err := evaluator.SetState(sync.DataSync{FlagData: `{
|
||||
"flags": {
|
||||
"welcome-banner": {
|
||||
"state": "ENABLED",
|
||||
|
@ -1534,7 +1445,7 @@ func TestTargetingVariantBehavior(t *testing.T) {
|
|||
t.Run("missing variant error", func(t *testing.T) {
|
||||
evaluator := evaluator.NewJSON(logger.NewLogger(nil, false), store.NewFlags())
|
||||
|
||||
_, _, err := evaluator.SetState(sync.DataSync{Source: "testSource", FlagData: `{
|
||||
_, _, err := evaluator.SetState(sync.DataSync{FlagData: `{
|
||||
"flags": {
|
||||
"missing-variant": {
|
||||
"state": "ENABLED",
|
||||
|
@ -1562,7 +1473,7 @@ func TestTargetingVariantBehavior(t *testing.T) {
|
|||
t.Run("null fallback", func(t *testing.T) {
|
||||
evaluator := evaluator.NewJSON(logger.NewLogger(nil, false), store.NewFlags())
|
||||
|
||||
_, _, err := evaluator.SetState(sync.DataSync{Source: "testSource", FlagData: `{
|
||||
_, _, err := evaluator.SetState(sync.DataSync{FlagData: `{
|
||||
"flags": {
|
||||
"null-fallback": {
|
||||
"state": "ENABLED",
|
||||
|
@ -1595,7 +1506,7 @@ func TestTargetingVariantBehavior(t *testing.T) {
|
|||
evaluator := evaluator.NewJSON(logger.NewLogger(nil, false), store.NewFlags())
|
||||
|
||||
//nolint:dupword
|
||||
_, _, err := evaluator.SetState(sync.DataSync{Source: "testSource", FlagData: `{
|
||||
_, _, err := evaluator.SetState(sync.DataSync{FlagData: `{
|
||||
"flags": {
|
||||
"match-boolean": {
|
||||
"state": "ENABLED",
|
||||
|
|
|
@ -0,0 +1,145 @@
|
|||
// This evaluation type is deprecated and will be removed before v1.
|
||||
// Do not enhance it or use it for reference.
|
||||
|
||||
package evaluator
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
|
||||
"github.com/open-feature/flagd/core/pkg/logger"
|
||||
"github.com/zeebo/xxh3"
|
||||
)
|
||||
|
||||
const (
|
||||
LegacyFractionEvaluationName = "fractionalEvaluation"
|
||||
LegacyFractionEvaluationLink = "https://flagd.dev/concepts/#migrating-from-legacy-fractionalevaluation"
|
||||
)
|
||||
|
||||
// Deprecated: LegacyFractional is deprecated. This will be removed prior to v1 release.
|
||||
type LegacyFractional struct {
|
||||
Logger *logger.Logger
|
||||
}
|
||||
|
||||
type legacyFractionalEvaluationDistribution struct {
|
||||
variant string
|
||||
percentage int
|
||||
}
|
||||
|
||||
func NewLegacyFractional(logger *logger.Logger) *LegacyFractional {
|
||||
return &LegacyFractional{Logger: logger}
|
||||
}
|
||||
|
||||
func (fe *LegacyFractional) LegacyFractionalEvaluation(values, data interface{}) interface{} {
|
||||
fe.Logger.Warn(
|
||||
fmt.Sprintf("%s is deprecated, please use %s, see: %s",
|
||||
LegacyFractionEvaluationName,
|
||||
FractionEvaluationName,
|
||||
LegacyFractionEvaluationLink))
|
||||
|
||||
valueToDistribute, feDistributions, err := parseLegacyFractionalEvaluationData(values, data)
|
||||
if err != nil {
|
||||
fe.Logger.Error(fmt.Sprintf("parse fractional evaluation data: %v", err))
|
||||
return nil
|
||||
}
|
||||
|
||||
return distributeLegacyValue(valueToDistribute, feDistributions)
|
||||
}
|
||||
|
||||
func parseLegacyFractionalEvaluationData(values, data interface{}) (string,
|
||||
[]legacyFractionalEvaluationDistribution, error,
|
||||
) {
|
||||
valuesArray, ok := values.([]interface{})
|
||||
if !ok {
|
||||
return "", nil, errors.New("fractional evaluation data is not an array")
|
||||
}
|
||||
if len(valuesArray) < 2 {
|
||||
return "", nil, errors.New("fractional evaluation data has length under 2")
|
||||
}
|
||||
|
||||
bucketBy, ok := valuesArray[0].(string)
|
||||
if !ok {
|
||||
return "", nil, errors.New("first element of fractional evaluation data isn't of type string")
|
||||
}
|
||||
|
||||
dataMap, ok := data.(map[string]interface{})
|
||||
if !ok {
|
||||
return "", nil, errors.New("data isn't of type map[string]interface{}")
|
||||
}
|
||||
|
||||
v, ok := dataMap[bucketBy]
|
||||
if !ok {
|
||||
return "", nil, nil
|
||||
}
|
||||
|
||||
valueToDistribute, ok := v.(string)
|
||||
if !ok {
|
||||
return "", nil, fmt.Errorf("var: %s isn't of type string", bucketBy)
|
||||
}
|
||||
|
||||
feDistributions, err := parseLegacyFractionalEvaluationDistributions(valuesArray)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
return valueToDistribute, feDistributions, nil
|
||||
}
|
||||
|
||||
func parseLegacyFractionalEvaluationDistributions(values []interface{}) (
|
||||
[]legacyFractionalEvaluationDistribution, error,
|
||||
) {
|
||||
sumOfPercentages := 0
|
||||
var feDistributions []legacyFractionalEvaluationDistribution
|
||||
for i := 1; i < len(values); i++ {
|
||||
distributionArray, ok := values[i].([]interface{})
|
||||
if !ok {
|
||||
return nil, errors.New("distribution elements aren't of type []interface{}")
|
||||
}
|
||||
|
||||
if len(distributionArray) != 2 {
|
||||
return nil, errors.New("distribution element isn't length 2")
|
||||
}
|
||||
|
||||
variant, ok := distributionArray[0].(string)
|
||||
if !ok {
|
||||
return nil, errors.New("first element of distribution element isn't string")
|
||||
}
|
||||
|
||||
percentage, ok := distributionArray[1].(float64)
|
||||
if !ok {
|
||||
return nil, errors.New("second element of distribution element isn't float")
|
||||
}
|
||||
|
||||
sumOfPercentages += int(percentage)
|
||||
|
||||
feDistributions = append(feDistributions, legacyFractionalEvaluationDistribution{
|
||||
variant: variant,
|
||||
percentage: int(percentage),
|
||||
})
|
||||
}
|
||||
|
||||
if sumOfPercentages != 100 {
|
||||
return nil, fmt.Errorf("percentages must sum to 100, got: %d", sumOfPercentages)
|
||||
}
|
||||
|
||||
return feDistributions, nil
|
||||
}
|
||||
|
||||
func distributeLegacyValue(value string, feDistribution []legacyFractionalEvaluationDistribution) string {
|
||||
hashValue := xxh3.HashString(value)
|
||||
|
||||
hashRatio := float64(hashValue) / math.Pow(2, 64) // divide the hash value by the largest possible value, integer 2^64
|
||||
|
||||
bucket := int(hashRatio * 100) // integer in range [0, 99]
|
||||
|
||||
rangeEnd := 0
|
||||
for _, dist := range feDistribution {
|
||||
rangeEnd += dist.percentage
|
||||
if bucket < rangeEnd {
|
||||
return dist.variant
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
|
@ -0,0 +1,300 @@
|
|||
package evaluator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/open-feature/flagd/core/pkg/logger"
|
||||
"github.com/open-feature/flagd/core/pkg/model"
|
||||
"github.com/open-feature/flagd/core/pkg/store"
|
||||
)
|
||||
|
||||
func TestLegacyFractionalEvaluation(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
flags := Flags{
|
||||
Flags: map[string]model.Flag{
|
||||
"headerColor": {
|
||||
State: "ENABLED",
|
||||
DefaultVariant: "red",
|
||||
Variants: map[string]any{
|
||||
"red": "#FF0000",
|
||||
"blue": "#0000FF",
|
||||
"green": "#00FF00",
|
||||
"yellow": "#FFFF00",
|
||||
},
|
||||
Targeting: []byte(`{
|
||||
"if": [
|
||||
{
|
||||
"in": ["@faas.com", {
|
||||
"var": ["email"]
|
||||
}]
|
||||
},
|
||||
{
|
||||
"fractionalEvaluation": [
|
||||
"email",
|
||||
[
|
||||
"red",
|
||||
25
|
||||
],
|
||||
[
|
||||
"blue",
|
||||
25
|
||||
],
|
||||
[
|
||||
"green",
|
||||
25
|
||||
],
|
||||
[
|
||||
"yellow",
|
||||
25
|
||||
]
|
||||
]
|
||||
}, null
|
||||
]
|
||||
}`),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
tests := map[string]struct {
|
||||
flags Flags
|
||||
flagKey string
|
||||
context map[string]any
|
||||
expectedValue string
|
||||
expectedVariant string
|
||||
expectedReason string
|
||||
expectedErrorCode string
|
||||
}{
|
||||
"test@faas.com": {
|
||||
flags: flags,
|
||||
flagKey: "headerColor",
|
||||
context: map[string]any{
|
||||
"email": "test@faas.com",
|
||||
},
|
||||
expectedVariant: "red",
|
||||
expectedValue: "#FF0000",
|
||||
expectedReason: model.TargetingMatchReason,
|
||||
},
|
||||
"test2@faas.com": {
|
||||
flags: flags,
|
||||
flagKey: "headerColor",
|
||||
context: map[string]any{
|
||||
"email": "test2@faas.com",
|
||||
},
|
||||
expectedVariant: "yellow",
|
||||
expectedValue: "#FFFF00",
|
||||
expectedReason: model.TargetingMatchReason,
|
||||
},
|
||||
"test3@faas.com": {
|
||||
flags: flags,
|
||||
flagKey: "headerColor",
|
||||
context: map[string]any{
|
||||
"email": "test3@faas.com",
|
||||
},
|
||||
expectedVariant: "red",
|
||||
expectedValue: "#FF0000",
|
||||
expectedReason: model.TargetingMatchReason,
|
||||
},
|
||||
"test4@faas.com": {
|
||||
flags: flags,
|
||||
flagKey: "headerColor",
|
||||
context: map[string]any{
|
||||
"email": "test4@faas.com",
|
||||
},
|
||||
expectedVariant: "blue",
|
||||
expectedValue: "#0000FF",
|
||||
expectedReason: model.TargetingMatchReason,
|
||||
},
|
||||
"non even split": {
|
||||
flags: Flags{
|
||||
Flags: map[string]model.Flag{
|
||||
"headerColor": {
|
||||
State: "ENABLED",
|
||||
DefaultVariant: "red",
|
||||
Variants: map[string]any{
|
||||
"red": "#FF0000",
|
||||
"blue": "#0000FF",
|
||||
"green": "#00FF00",
|
||||
"yellow": "#FFFF00",
|
||||
},
|
||||
Targeting: []byte(`{
|
||||
"if": [
|
||||
{
|
||||
"in": ["@faas.com", {
|
||||
"var": ["email"]
|
||||
}]
|
||||
},
|
||||
{
|
||||
"fractionalEvaluation": [
|
||||
"email",
|
||||
[
|
||||
"red",
|
||||
50
|
||||
],
|
||||
[
|
||||
"blue",
|
||||
25
|
||||
],
|
||||
[
|
||||
"green",
|
||||
25
|
||||
]
|
||||
]
|
||||
}, null
|
||||
]
|
||||
}`),
|
||||
},
|
||||
},
|
||||
},
|
||||
flagKey: "headerColor",
|
||||
context: map[string]any{
|
||||
"email": "test4@faas.com",
|
||||
},
|
||||
expectedVariant: "red",
|
||||
expectedValue: "#FF0000",
|
||||
expectedReason: model.TargetingMatchReason,
|
||||
},
|
||||
"fallback to default variant if no email provided": {
|
||||
flags: Flags{
|
||||
Flags: map[string]model.Flag{
|
||||
"headerColor": {
|
||||
State: "ENABLED",
|
||||
DefaultVariant: "red",
|
||||
Variants: map[string]any{
|
||||
"red": "#FF0000",
|
||||
"blue": "#0000FF",
|
||||
"green": "#00FF00",
|
||||
"yellow": "#FFFF00",
|
||||
},
|
||||
Targeting: []byte(`{
|
||||
"fractionalEvaluation": [
|
||||
"email",
|
||||
[
|
||||
"red",
|
||||
25
|
||||
],
|
||||
[
|
||||
"blue",
|
||||
25
|
||||
],
|
||||
[
|
||||
"green",
|
||||
25
|
||||
],
|
||||
[
|
||||
"yellow",
|
||||
25
|
||||
]
|
||||
]
|
||||
}`),
|
||||
},
|
||||
},
|
||||
},
|
||||
flagKey: "headerColor",
|
||||
context: map[string]any{},
|
||||
expectedVariant: "",
|
||||
expectedValue: "",
|
||||
expectedReason: model.ErrorReason,
|
||||
expectedErrorCode: model.ParseErrorCode,
|
||||
},
|
||||
"fallback to default variant if invalid variant as result of fractional evaluation": {
|
||||
flags: Flags{
|
||||
Flags: map[string]model.Flag{
|
||||
"headerColor": {
|
||||
State: "ENABLED",
|
||||
DefaultVariant: "red",
|
||||
Variants: map[string]any{
|
||||
"red": "#FF0000",
|
||||
"blue": "#0000FF",
|
||||
"green": "#00FF00",
|
||||
"yellow": "#FFFF00",
|
||||
},
|
||||
Targeting: []byte(`{
|
||||
"fractionalEvaluation": [
|
||||
"email",
|
||||
[
|
||||
"black",
|
||||
100
|
||||
]
|
||||
]
|
||||
}`),
|
||||
},
|
||||
},
|
||||
},
|
||||
flagKey: "headerColor",
|
||||
context: map[string]any{
|
||||
"email": "foo@foo.com",
|
||||
},
|
||||
expectedVariant: "",
|
||||
expectedValue: "",
|
||||
expectedReason: model.ErrorReason,
|
||||
expectedErrorCode: model.ParseErrorCode,
|
||||
},
|
||||
"fallback to default variant if percentages don't sum to 100": {
|
||||
flags: Flags{
|
||||
Flags: map[string]model.Flag{
|
||||
"headerColor": {
|
||||
State: "ENABLED",
|
||||
DefaultVariant: "red",
|
||||
Variants: map[string]any{
|
||||
"red": "#FF0000",
|
||||
"blue": "#0000FF",
|
||||
"green": "#00FF00",
|
||||
"yellow": "#FFFF00",
|
||||
},
|
||||
Targeting: []byte(`{
|
||||
"fractionalEvaluation": [
|
||||
"email",
|
||||
[
|
||||
"red",
|
||||
25
|
||||
],
|
||||
[
|
||||
"blue",
|
||||
25
|
||||
]
|
||||
]
|
||||
}`),
|
||||
},
|
||||
},
|
||||
},
|
||||
flagKey: "headerColor",
|
||||
context: map[string]any{
|
||||
"email": "foo@foo.com",
|
||||
},
|
||||
expectedVariant: "red",
|
||||
expectedValue: "#FF0000",
|
||||
expectedReason: model.DefaultReason,
|
||||
},
|
||||
}
|
||||
const reqID = "default"
|
||||
for name, tt := range tests {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
log := logger.NewLogger(nil, false)
|
||||
je := NewJSON(log, store.NewFlags())
|
||||
je.store.Flags = tt.flags.Flags
|
||||
|
||||
value, variant, reason, _, err := resolve[string](ctx, reqID, tt.flagKey, tt.context, je.evaluateVariant)
|
||||
|
||||
if value != tt.expectedValue {
|
||||
t.Errorf("expected value '%s', got '%s'", tt.expectedValue, value)
|
||||
}
|
||||
|
||||
if variant != tt.expectedVariant {
|
||||
t.Errorf("expected variant '%s', got '%s'", tt.expectedVariant, variant)
|
||||
}
|
||||
|
||||
if reason != tt.expectedReason {
|
||||
t.Errorf("expected reason '%s', got '%s'", tt.expectedReason, reason)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
errorCode := err.Error()
|
||||
if errorCode != tt.expectedErrorCode {
|
||||
t.Errorf("expected err '%v', got '%v'", tt.expectedErrorCode, err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
|
@ -14,7 +14,6 @@ import (
|
|||
reflect "reflect"
|
||||
|
||||
evaluator "github.com/open-feature/flagd/core/pkg/evaluator"
|
||||
model "github.com/open-feature/flagd/core/pkg/model"
|
||||
sync "github.com/open-feature/flagd/core/pkg/sync"
|
||||
gomock "go.uber.org/mock/gomock"
|
||||
)
|
||||
|
@ -23,7 +22,6 @@ import (
|
|||
type MockIEvaluator struct {
|
||||
ctrl *gomock.Controller
|
||||
recorder *MockIEvaluatorMockRecorder
|
||||
isgomock struct{}
|
||||
}
|
||||
|
||||
// MockIEvaluatorMockRecorder is the mock recorder for MockIEvaluator.
|
||||
|
@ -59,13 +57,12 @@ func (mr *MockIEvaluatorMockRecorder) GetState() *gomock.Call {
|
|||
}
|
||||
|
||||
// ResolveAllValues mocks base method.
|
||||
func (m *MockIEvaluator) ResolveAllValues(ctx context.Context, reqID string, context map[string]any) ([]evaluator.AnyValue, model.Metadata, error) {
|
||||
func (m *MockIEvaluator) ResolveAllValues(ctx context.Context, reqID string, context map[string]any) ([]evaluator.AnyValue, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ResolveAllValues", ctx, reqID, context)
|
||||
ret0, _ := ret[0].([]evaluator.AnyValue)
|
||||
ret1, _ := ret[1].(model.Metadata)
|
||||
ret2, _ := ret[2].(error)
|
||||
return ret0, ret1, ret2
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// ResolveAllValues indicates an expected call of ResolveAllValues.
|
||||
|
@ -89,13 +86,13 @@ func (mr *MockIEvaluatorMockRecorder) ResolveAsAnyValue(ctx, reqID, flagKey, con
|
|||
}
|
||||
|
||||
// ResolveBooleanValue mocks base method.
|
||||
func (m *MockIEvaluator) ResolveBooleanValue(ctx context.Context, reqID, flagKey string, context map[string]any) (bool, string, string, model.Metadata, error) {
|
||||
func (m *MockIEvaluator) ResolveBooleanValue(ctx context.Context, reqID, flagKey string, context map[string]any) (bool, string, string, map[string]any, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ResolveBooleanValue", ctx, reqID, flagKey, context)
|
||||
ret0, _ := ret[0].(bool)
|
||||
ret1, _ := ret[1].(string)
|
||||
ret2, _ := ret[2].(string)
|
||||
ret3, _ := ret[3].(model.Metadata)
|
||||
ret3, _ := ret[3].(map[string]any)
|
||||
ret4, _ := ret[4].(error)
|
||||
return ret0, ret1, ret2, ret3, ret4
|
||||
}
|
||||
|
@ -107,13 +104,13 @@ func (mr *MockIEvaluatorMockRecorder) ResolveBooleanValue(ctx, reqID, flagKey, c
|
|||
}
|
||||
|
||||
// ResolveFloatValue mocks base method.
|
||||
func (m *MockIEvaluator) ResolveFloatValue(ctx context.Context, reqID, flagKey string, context map[string]any) (float64, string, string, model.Metadata, error) {
|
||||
func (m *MockIEvaluator) ResolveFloatValue(ctx context.Context, reqID, flagKey string, context map[string]any) (float64, string, string, map[string]any, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ResolveFloatValue", ctx, reqID, flagKey, context)
|
||||
ret0, _ := ret[0].(float64)
|
||||
ret1, _ := ret[1].(string)
|
||||
ret2, _ := ret[2].(string)
|
||||
ret3, _ := ret[3].(model.Metadata)
|
||||
ret3, _ := ret[3].(map[string]any)
|
||||
ret4, _ := ret[4].(error)
|
||||
return ret0, ret1, ret2, ret3, ret4
|
||||
}
|
||||
|
@ -125,13 +122,13 @@ func (mr *MockIEvaluatorMockRecorder) ResolveFloatValue(ctx, reqID, flagKey, con
|
|||
}
|
||||
|
||||
// ResolveIntValue mocks base method.
|
||||
func (m *MockIEvaluator) ResolveIntValue(ctx context.Context, reqID, flagKey string, context map[string]any) (int64, string, string, model.Metadata, error) {
|
||||
func (m *MockIEvaluator) ResolveIntValue(ctx context.Context, reqID, flagKey string, context map[string]any) (int64, string, string, map[string]any, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ResolveIntValue", ctx, reqID, flagKey, context)
|
||||
ret0, _ := ret[0].(int64)
|
||||
ret1, _ := ret[1].(string)
|
||||
ret2, _ := ret[2].(string)
|
||||
ret3, _ := ret[3].(model.Metadata)
|
||||
ret3, _ := ret[3].(map[string]any)
|
||||
ret4, _ := ret[4].(error)
|
||||
return ret0, ret1, ret2, ret3, ret4
|
||||
}
|
||||
|
@ -143,13 +140,13 @@ func (mr *MockIEvaluatorMockRecorder) ResolveIntValue(ctx, reqID, flagKey, conte
|
|||
}
|
||||
|
||||
// ResolveObjectValue mocks base method.
|
||||
func (m *MockIEvaluator) ResolveObjectValue(ctx context.Context, reqID, flagKey string, context map[string]any) (map[string]any, string, string, model.Metadata, error) {
|
||||
func (m *MockIEvaluator) ResolveObjectValue(ctx context.Context, reqID, flagKey string, context map[string]any) (map[string]any, string, string, map[string]any, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ResolveObjectValue", ctx, reqID, flagKey, context)
|
||||
ret0, _ := ret[0].(map[string]any)
|
||||
ret1, _ := ret[1].(string)
|
||||
ret2, _ := ret[2].(string)
|
||||
ret3, _ := ret[3].(model.Metadata)
|
||||
ret3, _ := ret[3].(map[string]any)
|
||||
ret4, _ := ret[4].(error)
|
||||
return ret0, ret1, ret2, ret3, ret4
|
||||
}
|
||||
|
@ -161,13 +158,13 @@ func (mr *MockIEvaluatorMockRecorder) ResolveObjectValue(ctx, reqID, flagKey, co
|
|||
}
|
||||
|
||||
// ResolveStringValue mocks base method.
|
||||
func (m *MockIEvaluator) ResolveStringValue(ctx context.Context, reqID, flagKey string, context map[string]any) (string, string, string, model.Metadata, error) {
|
||||
func (m *MockIEvaluator) ResolveStringValue(ctx context.Context, reqID, flagKey string, context map[string]any) (string, string, string, map[string]any, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ResolveStringValue", ctx, reqID, flagKey, context)
|
||||
ret0, _ := ret[0].(string)
|
||||
ret1, _ := ret[1].(string)
|
||||
ret2, _ := ret[2].(string)
|
||||
ret3, _ := ret[3].(model.Metadata)
|
||||
ret3, _ := ret[3].(map[string]any)
|
||||
ret4, _ := ret[4].(error)
|
||||
return ret0, ret1, ret2, ret3, ret4
|
||||
}
|
||||
|
@ -179,10 +176,10 @@ func (mr *MockIEvaluatorMockRecorder) ResolveStringValue(ctx, reqID, flagKey, co
|
|||
}
|
||||
|
||||
// SetState mocks base method.
|
||||
func (m *MockIEvaluator) SetState(payload sync.DataSync) (model.Metadata, bool, error) {
|
||||
func (m *MockIEvaluator) SetState(payload sync.DataSync) (map[string]any, bool, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "SetState", payload)
|
||||
ret0, _ := ret[0].(model.Metadata)
|
||||
ret0, _ := ret[0].(map[string]any)
|
||||
ret1, _ := ret[1].(bool)
|
||||
ret2, _ := ret[2].(error)
|
||||
return ret0, ret1, ret2
|
||||
|
@ -198,7 +195,6 @@ func (mr *MockIEvaluatorMockRecorder) SetState(payload any) *gomock.Call {
|
|||
type MockIResolver struct {
|
||||
ctrl *gomock.Controller
|
||||
recorder *MockIResolverMockRecorder
|
||||
isgomock struct{}
|
||||
}
|
||||
|
||||
// MockIResolverMockRecorder is the mock recorder for MockIResolver.
|
||||
|
@ -219,13 +215,12 @@ func (m *MockIResolver) EXPECT() *MockIResolverMockRecorder {
|
|||
}
|
||||
|
||||
// ResolveAllValues mocks base method.
|
||||
func (m *MockIResolver) ResolveAllValues(ctx context.Context, reqID string, context map[string]any) ([]evaluator.AnyValue, model.Metadata, error) {
|
||||
func (m *MockIResolver) ResolveAllValues(ctx context.Context, reqID string, context map[string]any) ([]evaluator.AnyValue, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ResolveAllValues", ctx, reqID, context)
|
||||
ret0, _ := ret[0].([]evaluator.AnyValue)
|
||||
ret1, _ := ret[1].(model.Metadata)
|
||||
ret2, _ := ret[2].(error)
|
||||
return ret0, ret1, ret2
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// ResolveAllValues indicates an expected call of ResolveAllValues.
|
||||
|
@ -249,13 +244,13 @@ func (mr *MockIResolverMockRecorder) ResolveAsAnyValue(ctx, reqID, flagKey, cont
|
|||
}
|
||||
|
||||
// ResolveBooleanValue mocks base method.
|
||||
func (m *MockIResolver) ResolveBooleanValue(ctx context.Context, reqID, flagKey string, context map[string]any) (bool, string, string, model.Metadata, error) {
|
||||
func (m *MockIResolver) ResolveBooleanValue(ctx context.Context, reqID, flagKey string, context map[string]any) (bool, string, string, map[string]any, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ResolveBooleanValue", ctx, reqID, flagKey, context)
|
||||
ret0, _ := ret[0].(bool)
|
||||
ret1, _ := ret[1].(string)
|
||||
ret2, _ := ret[2].(string)
|
||||
ret3, _ := ret[3].(model.Metadata)
|
||||
ret3, _ := ret[3].(map[string]any)
|
||||
ret4, _ := ret[4].(error)
|
||||
return ret0, ret1, ret2, ret3, ret4
|
||||
}
|
||||
|
@ -267,13 +262,13 @@ func (mr *MockIResolverMockRecorder) ResolveBooleanValue(ctx, reqID, flagKey, co
|
|||
}
|
||||
|
||||
// ResolveFloatValue mocks base method.
|
||||
func (m *MockIResolver) ResolveFloatValue(ctx context.Context, reqID, flagKey string, context map[string]any) (float64, string, string, model.Metadata, error) {
|
||||
func (m *MockIResolver) ResolveFloatValue(ctx context.Context, reqID, flagKey string, context map[string]any) (float64, string, string, map[string]any, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ResolveFloatValue", ctx, reqID, flagKey, context)
|
||||
ret0, _ := ret[0].(float64)
|
||||
ret1, _ := ret[1].(string)
|
||||
ret2, _ := ret[2].(string)
|
||||
ret3, _ := ret[3].(model.Metadata)
|
||||
ret3, _ := ret[3].(map[string]any)
|
||||
ret4, _ := ret[4].(error)
|
||||
return ret0, ret1, ret2, ret3, ret4
|
||||
}
|
||||
|
@ -285,13 +280,13 @@ func (mr *MockIResolverMockRecorder) ResolveFloatValue(ctx, reqID, flagKey, cont
|
|||
}
|
||||
|
||||
// ResolveIntValue mocks base method.
|
||||
func (m *MockIResolver) ResolveIntValue(ctx context.Context, reqID, flagKey string, context map[string]any) (int64, string, string, model.Metadata, error) {
|
||||
func (m *MockIResolver) ResolveIntValue(ctx context.Context, reqID, flagKey string, context map[string]any) (int64, string, string, map[string]any, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ResolveIntValue", ctx, reqID, flagKey, context)
|
||||
ret0, _ := ret[0].(int64)
|
||||
ret1, _ := ret[1].(string)
|
||||
ret2, _ := ret[2].(string)
|
||||
ret3, _ := ret[3].(model.Metadata)
|
||||
ret3, _ := ret[3].(map[string]any)
|
||||
ret4, _ := ret[4].(error)
|
||||
return ret0, ret1, ret2, ret3, ret4
|
||||
}
|
||||
|
@ -303,13 +298,13 @@ func (mr *MockIResolverMockRecorder) ResolveIntValue(ctx, reqID, flagKey, contex
|
|||
}
|
||||
|
||||
// ResolveObjectValue mocks base method.
|
||||
func (m *MockIResolver) ResolveObjectValue(ctx context.Context, reqID, flagKey string, context map[string]any) (map[string]any, string, string, model.Metadata, error) {
|
||||
func (m *MockIResolver) ResolveObjectValue(ctx context.Context, reqID, flagKey string, context map[string]any) (map[string]any, string, string, map[string]any, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ResolveObjectValue", ctx, reqID, flagKey, context)
|
||||
ret0, _ := ret[0].(map[string]any)
|
||||
ret1, _ := ret[1].(string)
|
||||
ret2, _ := ret[2].(string)
|
||||
ret3, _ := ret[3].(model.Metadata)
|
||||
ret3, _ := ret[3].(map[string]any)
|
||||
ret4, _ := ret[4].(error)
|
||||
return ret0, ret1, ret2, ret3, ret4
|
||||
}
|
||||
|
@ -321,13 +316,13 @@ func (mr *MockIResolverMockRecorder) ResolveObjectValue(ctx, reqID, flagKey, con
|
|||
}
|
||||
|
||||
// ResolveStringValue mocks base method.
|
||||
func (m *MockIResolver) ResolveStringValue(ctx context.Context, reqID, flagKey string, context map[string]any) (string, string, string, model.Metadata, error) {
|
||||
func (m *MockIResolver) ResolveStringValue(ctx context.Context, reqID, flagKey string, context map[string]any) (string, string, string, map[string]any, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ResolveStringValue", ctx, reqID, flagKey, context)
|
||||
ret0, _ := ret[0].(string)
|
||||
ret1, _ := ret[1].(string)
|
||||
ret2, _ := ret[2].(string)
|
||||
ret3, _ := ret[3].(model.Metadata)
|
||||
ret3, _ := ret[3].(map[string]any)
|
||||
ret4, _ := ret[4].(error)
|
||||
return ret0, ret1, ret2, ret3, ret4
|
||||
}
|
||||
|
|
|
@ -102,7 +102,7 @@ func parseSemverEvaluationData(values interface{}) (string, string, SemVerOperat
|
|||
}
|
||||
|
||||
if len(parsed) != 3 {
|
||||
return "", "", "", errors.New("sem_ver evaluation must contain a value, an operator, and a comparison target")
|
||||
return "", "", "", errors.New("sem_ver evaluation must contain a value, an operator and a comparison target")
|
||||
}
|
||||
|
||||
actualVersion, err := parseSemanticVersion(parsed[0])
|
||||
|
@ -122,17 +122,11 @@ func parseSemverEvaluationData(values interface{}) (string, string, SemVerOperat
|
|||
return actualVersion, targetVersion, operator, nil
|
||||
}
|
||||
|
||||
func ensureString(v interface{}) string {
|
||||
if str, ok := v.(string); ok {
|
||||
// It's already a string
|
||||
return str
|
||||
}
|
||||
// Convert to string if not already
|
||||
return fmt.Sprintf("%v", v)
|
||||
}
|
||||
|
||||
func parseSemanticVersion(v interface{}) (string, error) {
|
||||
version := ensureString(v)
|
||||
version, ok := v.(string)
|
||||
if !ok {
|
||||
return "", errors.New("sem_ver evaluation: property did not resolve to a string value")
|
||||
}
|
||||
// version strings are only valid in the semver package if they start with a 'v'
|
||||
// if it's not present in the given value, we prepend it
|
||||
if !strings.HasPrefix(version, "v") {
|
||||
|
@ -140,7 +134,7 @@ func parseSemanticVersion(v interface{}) (string, error) {
|
|||
}
|
||||
|
||||
if !semver.IsValid(version) {
|
||||
return "", fmt.Errorf("'%v' is not a valid semantic version string", version)
|
||||
return "", errors.New("not a valid semantic version string")
|
||||
}
|
||||
|
||||
return version, nil
|
||||
|
@ -149,7 +143,7 @@ func parseSemanticVersion(v interface{}) (string, error) {
|
|||
func parseOperator(o interface{}) (SemVerOperator, error) {
|
||||
operatorString, ok := o.(string)
|
||||
if !ok {
|
||||
return "", fmt.Errorf("could not parse operator '%v'", o)
|
||||
return "", errors.New("could not parse operator")
|
||||
}
|
||||
|
||||
return SemVerOperator(operatorString), nil
|
||||
|
|
|
@ -23,76 +23,6 @@ func TestSemVerOperator_Compare(t *testing.T) {
|
|||
want bool
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "invalid version",
|
||||
svo: Greater,
|
||||
args: args{
|
||||
v1: "invalid",
|
||||
v2: "v1.0.0",
|
||||
},
|
||||
want: false,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "preview version vs non preview version",
|
||||
svo: Greater,
|
||||
args: args{
|
||||
v1: "v1.0.0-preview.1.2",
|
||||
v2: "v1.0.0",
|
||||
},
|
||||
want: false,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "preview version vs preview version",
|
||||
svo: Greater,
|
||||
args: args{
|
||||
v1: "v1.0.0-preview.1.3",
|
||||
v2: "v1.0.0-preview.1.2",
|
||||
},
|
||||
want: true,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "no prefixed v left greater",
|
||||
svo: Greater,
|
||||
args: args{
|
||||
v1: "0.0.1",
|
||||
v2: "v0.0.2",
|
||||
},
|
||||
want: false,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "no prefixed v right greater",
|
||||
svo: Greater,
|
||||
args: args{
|
||||
v1: "v0.0.1",
|
||||
v2: "0.0.2",
|
||||
},
|
||||
want: false,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "no prefixed v right equals",
|
||||
svo: Equals,
|
||||
args: args{
|
||||
v1: "v0.0.1",
|
||||
v2: "0.0.1",
|
||||
},
|
||||
want: true,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "no prefixed v both",
|
||||
svo: Greater,
|
||||
args: args{
|
||||
v1: "0.0.1",
|
||||
v2: "0.0.2",
|
||||
},
|
||||
want: false,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "invalid operator",
|
||||
svo: "",
|
||||
|
@ -103,16 +33,6 @@ func TestSemVerOperator_Compare(t *testing.T) {
|
|||
want: false,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "less with large number",
|
||||
svo: Less,
|
||||
args: args{
|
||||
v1: "v1234.0.1",
|
||||
v2: "v1235.0.2",
|
||||
},
|
||||
want: true,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "less",
|
||||
svo: Less,
|
||||
|
@ -123,16 +43,6 @@ func TestSemVerOperator_Compare(t *testing.T) {
|
|||
want: true,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "no minor version",
|
||||
svo: Less,
|
||||
args: args{
|
||||
v1: "v1.0",
|
||||
v2: "v1.2",
|
||||
},
|
||||
want: true,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "not less",
|
||||
svo: Less,
|
||||
|
@ -296,28 +206,19 @@ func TestSemVerOperator_Compare(t *testing.T) {
|
|||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
var operatorInterface interface{} = string(tt.svo)
|
||||
actualVersion, targetVersion, operator, err := parseSemverEvaluationData([]interface{}{tt.args.v1, operatorInterface, tt.args.v2})
|
||||
if err != nil {
|
||||
require.Truef(t, tt.wantErr, "Error parsing semver evaluation data. actualVersion: %s, targetVersion: %s, operator: %s, err: %s", actualVersion, targetVersion, operator, err)
|
||||
return
|
||||
}
|
||||
|
||||
got, err := operator.compare(actualVersion, targetVersion)
|
||||
got, err := tt.svo.compare(tt.args.v1, tt.args.v2)
|
||||
|
||||
if tt.wantErr {
|
||||
require.NotNil(t, err)
|
||||
} else {
|
||||
require.Nil(t, err)
|
||||
require.Equalf(t, tt.want, got, "compare(%v, %v) operator: %s", tt.args.v1, tt.args.v2, operator)
|
||||
require.Equalf(t, tt.want, got, "compare(%v, %v)", tt.args.v1, tt.args.v2)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestJSONEvaluator_semVerEvaluation(t *testing.T) {
|
||||
const source = "testSource"
|
||||
var sources = []string{source}
|
||||
ctx := context.Background()
|
||||
|
||||
tests := map[string]struct {
|
||||
|
@ -484,130 +385,6 @@ func TestJSONEvaluator_semVerEvaluation(t *testing.T) {
|
|||
expectedValue: "#FF0000",
|
||||
expectedReason: model.TargetingMatchReason,
|
||||
},
|
||||
"versions given as double - match": {
|
||||
flags: Flags{
|
||||
Flags: map[string]model.Flag{
|
||||
"headerColor": {
|
||||
State: "ENABLED",
|
||||
DefaultVariant: "red",
|
||||
Variants: map[string]any{
|
||||
"red": "#FF0000",
|
||||
"blue": "#0000FF",
|
||||
"green": "#00FF00",
|
||||
"yellow": "#FFFF00",
|
||||
},
|
||||
Targeting: []byte(`{
|
||||
"if": [
|
||||
{
|
||||
"sem_ver": [1.2, "=", "1.2"]
|
||||
},
|
||||
"red", "green"
|
||||
]
|
||||
}`),
|
||||
},
|
||||
},
|
||||
},
|
||||
flagKey: "headerColor",
|
||||
context: map[string]any{
|
||||
"version": "1.0.0",
|
||||
},
|
||||
expectedVariant: "red",
|
||||
expectedValue: "#FF0000",
|
||||
expectedReason: model.TargetingMatchReason,
|
||||
},
|
||||
"versions given as int - match": {
|
||||
flags: Flags{
|
||||
Flags: map[string]model.Flag{
|
||||
"headerColor": {
|
||||
State: "ENABLED",
|
||||
DefaultVariant: "red",
|
||||
Variants: map[string]any{
|
||||
"red": "#FF0000",
|
||||
"blue": "#0000FF",
|
||||
"green": "#00FF00",
|
||||
"yellow": "#FFFF00",
|
||||
},
|
||||
Targeting: []byte(`{
|
||||
"if": [
|
||||
{
|
||||
"sem_ver": [1, "=", "v1.0.0"]
|
||||
},
|
||||
"red", "green"
|
||||
]
|
||||
}`),
|
||||
},
|
||||
},
|
||||
},
|
||||
flagKey: "headerColor",
|
||||
context: map[string]any{
|
||||
"version": "1.0.0",
|
||||
},
|
||||
expectedVariant: "red",
|
||||
expectedValue: "#FF0000",
|
||||
expectedReason: model.TargetingMatchReason,
|
||||
},
|
||||
"versions and minor-version without patch version operator provided - match": {
|
||||
flags: Flags{
|
||||
Flags: map[string]model.Flag{
|
||||
"headerColor": {
|
||||
State: "ENABLED",
|
||||
DefaultVariant: "red",
|
||||
Variants: map[string]any{
|
||||
"red": "#FF0000",
|
||||
"blue": "#0000FF",
|
||||
"green": "#00FF00",
|
||||
"yellow": "#FFFF00",
|
||||
},
|
||||
Targeting: []byte(`{
|
||||
"if": [
|
||||
{
|
||||
"sem_ver": [1.2, "=", "1.2"]
|
||||
},
|
||||
"red", "green"
|
||||
]
|
||||
}`),
|
||||
},
|
||||
},
|
||||
},
|
||||
flagKey: "headerColor",
|
||||
context: map[string]any{
|
||||
"version": "1.0.0",
|
||||
},
|
||||
expectedVariant: "red",
|
||||
expectedValue: "#FF0000",
|
||||
expectedReason: model.TargetingMatchReason,
|
||||
},
|
||||
"versions with prefixed v operator provided - match": {
|
||||
flags: Flags{
|
||||
Flags: map[string]model.Flag{
|
||||
"headerColor": {
|
||||
State: "ENABLED",
|
||||
DefaultVariant: "red",
|
||||
Variants: map[string]any{
|
||||
"red": "#FF0000",
|
||||
"blue": "#0000FF",
|
||||
"green": "#00FF00",
|
||||
"yellow": "#FFFF00",
|
||||
},
|
||||
Targeting: []byte(`{
|
||||
"if": [
|
||||
{
|
||||
"sem_ver": [{"var": "version"}, "<", "v1.2"]
|
||||
},
|
||||
"red", "green"
|
||||
]
|
||||
}`),
|
||||
},
|
||||
},
|
||||
},
|
||||
flagKey: "headerColor",
|
||||
context: map[string]any{
|
||||
"version": "v1.0.0",
|
||||
},
|
||||
expectedVariant: "red",
|
||||
expectedValue: "#FF0000",
|
||||
expectedReason: model.TargetingMatchReason,
|
||||
},
|
||||
"versions and major-version operator provided - no match": {
|
||||
flags: Flags{
|
||||
Flags: map[string]model.Flag{
|
||||
|
@ -924,12 +701,8 @@ func TestJSONEvaluator_semVerEvaluation(t *testing.T) {
|
|||
for name, tt := range tests {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
log := logger.NewLogger(nil, false)
|
||||
s, err := store.NewStore(log, sources)
|
||||
if err != nil {
|
||||
t.Fatalf("NewStore failed: %v", err)
|
||||
}
|
||||
je := NewJSON(log, s)
|
||||
je.store.Update(source, tt.flags.Flags, model.Metadata{})
|
||||
je := NewJSON(log, store.NewFlags())
|
||||
je.store.Flags = tt.flags.Flags
|
||||
|
||||
value, variant, reason, _, err := resolve[string](ctx, reqID, tt.flagKey, tt.context, je.evaluateVariant)
|
||||
|
||||
|
|
|
@ -13,8 +13,6 @@ import (
|
|||
)
|
||||
|
||||
func TestJSONEvaluator_startsWithEvaluation(t *testing.T) {
|
||||
const source = "testSource"
|
||||
var sources = []string{source}
|
||||
ctx := context.Background()
|
||||
|
||||
tests := map[string]struct {
|
||||
|
@ -187,12 +185,8 @@ func TestJSONEvaluator_startsWithEvaluation(t *testing.T) {
|
|||
for name, tt := range tests {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
log := logger.NewLogger(nil, false)
|
||||
s, err := store.NewStore(log, sources)
|
||||
if err != nil {
|
||||
t.Fatalf("NewStore failed: %v", err)
|
||||
}
|
||||
je := NewJSON(log, s)
|
||||
je.store.Update(source, tt.flags.Flags, model.Metadata{})
|
||||
je := NewJSON(log, store.NewFlags())
|
||||
je.store.Flags = tt.flags.Flags
|
||||
|
||||
value, variant, reason, _, err := resolve[string](ctx, reqID, tt.flagKey, tt.context, je.evaluateVariant)
|
||||
|
||||
|
@ -216,8 +210,6 @@ func TestJSONEvaluator_startsWithEvaluation(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestJSONEvaluator_endsWithEvaluation(t *testing.T) {
|
||||
const source = "testSource"
|
||||
var sources = []string{source}
|
||||
ctx := context.Background()
|
||||
|
||||
tests := map[string]struct {
|
||||
|
@ -390,12 +382,9 @@ func TestJSONEvaluator_endsWithEvaluation(t *testing.T) {
|
|||
for name, tt := range tests {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
log := logger.NewLogger(nil, false)
|
||||
s, err := store.NewStore(log, sources)
|
||||
if err != nil {
|
||||
t.Fatalf("NewStore failed: %v", err)
|
||||
}
|
||||
je := NewJSON(log, s)
|
||||
je.store.Update(source, tt.flags.Flags, model.Metadata{})
|
||||
je := NewJSON(log, store.NewFlags())
|
||||
|
||||
je.store.Flags = tt.flags.Flags
|
||||
|
||||
value, variant, reason, _, err := resolve[string](ctx, reqID, tt.flagKey, tt.context, je.evaluateVariant)
|
||||
|
||||
|
|
|
@ -2,26 +2,15 @@ package model
|
|||
|
||||
import "encoding/json"
|
||||
|
||||
const Key = "Key"
|
||||
const FlagSetId = "FlagSetId"
|
||||
const Source = "Source"
|
||||
const Priority = "Priority"
|
||||
|
||||
type Flag struct {
|
||||
Key string `json:"-"` // not serialized, used only for indexing
|
||||
FlagSetId string `json:"-"` // not serialized, used only for indexing
|
||||
Priority int `json:"-"` // not serialized, used only for indexing
|
||||
State string `json:"state"`
|
||||
DefaultVariant string `json:"defaultVariant"`
|
||||
Variants map[string]any `json:"variants"`
|
||||
Targeting json.RawMessage `json:"targeting,omitempty"`
|
||||
Source string `json:"source"`
|
||||
Selector string `json:"selector"`
|
||||
Metadata Metadata `json:"metadata,omitempty"`
|
||||
}
|
||||
|
||||
type Evaluators struct {
|
||||
Evaluators map[string]json.RawMessage `json:"$evaluators"`
|
||||
}
|
||||
|
||||
type Metadata = map[string]interface{}
|
||||
|
|
|
@ -1,52 +0,0 @@
|
|||
package notifications
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
"github.com/open-feature/flagd/core/pkg/model"
|
||||
)
|
||||
|
||||
const typeField = "type"
|
||||
|
||||
// Use to represent change notifications for mode PROVIDER_CONFIGURATION_CHANGE events.
|
||||
type Notifications map[string]any
|
||||
|
||||
// Generate notifications (deltas) from old and new flag sets for use in RPC mode PROVIDER_CONFIGURATION_CHANGE events.
|
||||
func NewFromFlags(oldFlags, newFlags map[string]model.Flag) Notifications {
|
||||
notifications := map[string]interface{}{}
|
||||
|
||||
// flags removed
|
||||
for key := range oldFlags {
|
||||
if _, ok := newFlags[key]; !ok {
|
||||
notifications[key] = map[string]interface{}{
|
||||
typeField: string(model.NotificationDelete),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// flags added or modified
|
||||
for key, newFlag := range newFlags {
|
||||
oldFlag, exists := oldFlags[key]
|
||||
if !exists {
|
||||
notifications[key] = map[string]interface{}{
|
||||
typeField: string(model.NotificationCreate),
|
||||
}
|
||||
} else if !flagsEqual(oldFlag, newFlag) {
|
||||
notifications[key] = map[string]interface{}{
|
||||
typeField: string(model.NotificationUpdate),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return notifications
|
||||
}
|
||||
|
||||
func flagsEqual(a, b model.Flag) bool {
|
||||
return a.State == b.State &&
|
||||
a.DefaultVariant == b.DefaultVariant &&
|
||||
reflect.DeepEqual(a.Variants, b.Variants) &&
|
||||
reflect.DeepEqual(a.Targeting, b.Targeting) &&
|
||||
a.Source == b.Source &&
|
||||
a.Selector == b.Selector &&
|
||||
reflect.DeepEqual(a.Metadata, b.Metadata)
|
||||
}
|
|
@ -1,102 +0,0 @@
|
|||
package notifications
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/open-feature/flagd/core/pkg/model"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNewFromFlags(t *testing.T) {
|
||||
flagA := model.Flag{
|
||||
Key: "flagA",
|
||||
State: "ENABLED",
|
||||
DefaultVariant: "on",
|
||||
Source: "source1",
|
||||
}
|
||||
flagAUpdated := model.Flag{
|
||||
Key: "flagA",
|
||||
State: "DISABLED",
|
||||
DefaultVariant: "on",
|
||||
Source: "source1",
|
||||
}
|
||||
flagB := model.Flag{
|
||||
Key: "flagB",
|
||||
State: "ENABLED",
|
||||
DefaultVariant: "off",
|
||||
Source: "source1",
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
oldFlags map[string]model.Flag
|
||||
newFlags map[string]model.Flag
|
||||
want Notifications
|
||||
}{
|
||||
{
|
||||
name: "flag added",
|
||||
oldFlags: map[string]model.Flag{},
|
||||
newFlags: map[string]model.Flag{"flagA": flagA},
|
||||
want: Notifications{
|
||||
"flagA": map[string]interface{}{
|
||||
"type": string(model.NotificationCreate),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "flag deleted",
|
||||
oldFlags: map[string]model.Flag{"flagA": flagA},
|
||||
newFlags: map[string]model.Flag{},
|
||||
want: Notifications{
|
||||
"flagA": map[string]interface{}{
|
||||
"type": string(model.NotificationDelete),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "flag changed",
|
||||
oldFlags: map[string]model.Flag{"flagA": flagA},
|
||||
newFlags: map[string]model.Flag{"flagA": flagAUpdated},
|
||||
want: Notifications{
|
||||
"flagA": map[string]interface{}{
|
||||
"type": string(model.NotificationUpdate),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "flag unchanged",
|
||||
oldFlags: map[string]model.Flag{"flagA": flagA},
|
||||
newFlags: map[string]model.Flag{"flagA": flagA},
|
||||
want: Notifications{},
|
||||
},
|
||||
{
|
||||
name: "mixed changes",
|
||||
oldFlags: map[string]model.Flag{
|
||||
"flagA": flagA,
|
||||
"flagB": flagB,
|
||||
},
|
||||
newFlags: map[string]model.Flag{
|
||||
"flagA": flagAUpdated, // updated
|
||||
"flagC": flagA, // added
|
||||
},
|
||||
want: Notifications{
|
||||
"flagA": map[string]interface{}{
|
||||
"type": string(model.NotificationUpdate),
|
||||
},
|
||||
"flagB": map[string]interface{}{
|
||||
"type": string(model.NotificationDelete),
|
||||
},
|
||||
"flagC": map[string]interface{}{
|
||||
"type": string(model.NotificationCreate),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := NewFromFlags(tt.oldFlags, tt.newFlags)
|
||||
assert.Equal(t, tt.want, got)
|
||||
})
|
||||
}
|
||||
}
|
|
@ -2,7 +2,6 @@ package service
|
|||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"connectrpc.com/connect"
|
||||
)
|
||||
|
@ -24,18 +23,15 @@ type Notification struct {
|
|||
type ReadinessProbe func() bool
|
||||
|
||||
type Configuration struct {
|
||||
ReadinessProbe ReadinessProbe
|
||||
Port uint16
|
||||
ManagementPort uint16
|
||||
ServiceName string
|
||||
CertPath string
|
||||
KeyPath string
|
||||
SocketPath string
|
||||
CORS []string
|
||||
Options []connect.HandlerOption
|
||||
ContextValues map[string]any
|
||||
HeaderToContextKeyMappings map[string]string
|
||||
StreamDeadline time.Duration
|
||||
ReadinessProbe ReadinessProbe
|
||||
Port uint16
|
||||
ManagementPort uint16
|
||||
ServiceName string
|
||||
CertPath string
|
||||
KeyPath string
|
||||
SocketPath string
|
||||
CORS []string
|
||||
Options []connect.HandlerOption
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -12,39 +12,36 @@ type Request struct {
|
|||
}
|
||||
|
||||
type EvaluationSuccess struct {
|
||||
Value interface{} `json:"value"`
|
||||
Key string `json:"key"`
|
||||
Reason string `json:"reason"`
|
||||
Variant string `json:"variant"`
|
||||
Metadata model.Metadata `json:"metadata"`
|
||||
Value interface{} `json:"value"`
|
||||
Key string `json:"key"`
|
||||
Reason string `json:"reason"`
|
||||
Variant string `json:"variant"`
|
||||
Metadata interface{} `json:"metadata"`
|
||||
}
|
||||
|
||||
type BulkEvaluationResponse struct {
|
||||
Flags []interface{} `json:"flags"`
|
||||
Metadata model.Metadata `json:"metadata"`
|
||||
Flags []interface{} `json:"flags"`
|
||||
}
|
||||
|
||||
type EvaluationError struct {
|
||||
Key string `json:"key"`
|
||||
ErrorCode string `json:"errorCode"`
|
||||
ErrorDetails string `json:"errorDetails"`
|
||||
Metadata model.Metadata `json:"metadata"`
|
||||
Key string `json:"key"`
|
||||
ErrorCode string `json:"errorCode"`
|
||||
ErrorDetails string `json:"errorDetails"`
|
||||
}
|
||||
|
||||
type BulkEvaluationError struct {
|
||||
ErrorCode string `json:"errorCode"`
|
||||
ErrorDetails string `json:"errorDetails"`
|
||||
Metadata model.Metadata `json:"metadata"`
|
||||
ErrorCode string `json:"errorCode"`
|
||||
ErrorDetails string `json:"errorDetails"`
|
||||
}
|
||||
|
||||
type InternalError struct {
|
||||
ErrorDetails string `json:"errorDetails"`
|
||||
}
|
||||
|
||||
func BulkEvaluationResponseFrom(resolutions []evaluator.AnyValue, metadata model.Metadata) BulkEvaluationResponse {
|
||||
func BulkEvaluationResponseFrom(values []evaluator.AnyValue) BulkEvaluationResponse {
|
||||
evaluations := make([]interface{}, 0)
|
||||
|
||||
for _, value := range resolutions {
|
||||
for _, value := range values {
|
||||
if value.Error != nil {
|
||||
_, evaluationError := EvaluationErrorResponseFrom(value)
|
||||
evaluations = append(evaluations, evaluationError)
|
||||
|
@ -55,7 +52,6 @@ func BulkEvaluationResponseFrom(resolutions []evaluator.AnyValue, metadata model
|
|||
|
||||
return BulkEvaluationResponse{
|
||||
evaluations,
|
||||
metadata,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -93,8 +89,7 @@ func BulkEvaluationContextErrorFrom(code string, details string) BulkEvaluationE
|
|||
|
||||
func EvaluationErrorResponseFrom(result evaluator.AnyValue) (int, EvaluationError) {
|
||||
payload := EvaluationError{
|
||||
Key: result.FlagKey,
|
||||
Metadata: result.Metadata,
|
||||
Key: result.FlagKey,
|
||||
}
|
||||
|
||||
status := 400
|
||||
|
|
|
@ -55,7 +55,7 @@ func TestBulkEvaluationResponse(t *testing.T) {
|
|||
{
|
||||
name: "empty input",
|
||||
input: nil,
|
||||
marshalledOutput: "{\"flags\":[],\"metadata\":{}}",
|
||||
marshalledOutput: "{\"flags\":[]}",
|
||||
},
|
||||
{
|
||||
name: "valid values",
|
||||
|
@ -70,21 +70,20 @@ func TestBulkEvaluationResponse(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
Value: false,
|
||||
Variant: "false",
|
||||
Reason: model.ErrorReason,
|
||||
FlagKey: "errorFlag",
|
||||
Error: errors.New(model.FlagNotFoundErrorCode),
|
||||
Metadata: map[string]interface{}{},
|
||||
Value: false,
|
||||
Variant: "false",
|
||||
Reason: model.ErrorReason,
|
||||
FlagKey: "errorFlag",
|
||||
Error: errors.New(model.FlagNotFoundErrorCode),
|
||||
},
|
||||
},
|
||||
marshalledOutput: "{\"flags\":[{\"value\":false,\"key\":\"key\",\"reason\":\"STATIC\",\"variant\":\"false\",\"metadata\":{\"key\":\"value\"}},{\"key\":\"errorFlag\",\"errorCode\":\"FLAG_NOT_FOUND\",\"errorDetails\":\"flag `errorFlag` does not exist\",\"metadata\":{}}],\"metadata\":{}}",
|
||||
marshalledOutput: "{\"flags\":[{\"value\":false,\"key\":\"key\",\"reason\":\"STATIC\",\"variant\":\"false\",\"metadata\":{\"key\":\"value\"}},{\"key\":\"errorFlag\",\"errorCode\":\"FLAG_NOT_FOUND\",\"errorDetails\":\"flag `errorFlag` does not exist\"}]}",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
response := BulkEvaluationResponseFrom(test.input, model.Metadata{})
|
||||
response := BulkEvaluationResponseFrom(test.input)
|
||||
|
||||
marshal, err := json.Marshal(response)
|
||||
if err != nil {
|
||||
|
|
|
@ -0,0 +1,304 @@
|
|||
package store
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sync"
|
||||
|
||||
"github.com/open-feature/flagd/core/pkg/logger"
|
||||
"github.com/open-feature/flagd/core/pkg/model"
|
||||
)
|
||||
|
||||
type IStore interface {
|
||||
GetAll(ctx context.Context) (map[string]model.Flag, error)
|
||||
Get(ctx context.Context, key string) (model.Flag, bool)
|
||||
SelectorForFlag(ctx context.Context, flag model.Flag) string
|
||||
}
|
||||
|
||||
type Flags struct {
|
||||
mx sync.RWMutex
|
||||
Flags map[string]model.Flag `json:"flags"`
|
||||
FlagSources []string
|
||||
SourceMetadata map[string]SourceDetails
|
||||
}
|
||||
|
||||
type SourceDetails struct {
|
||||
Source string
|
||||
Selector string
|
||||
}
|
||||
|
||||
func (f *Flags) hasPriority(stored string, new string) bool {
|
||||
if stored == new {
|
||||
return true
|
||||
}
|
||||
for i := len(f.FlagSources) - 1; i >= 0; i-- {
|
||||
switch f.FlagSources[i] {
|
||||
case stored:
|
||||
return false
|
||||
case new:
|
||||
return true
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func NewFlags() *Flags {
|
||||
return &Flags{
|
||||
Flags: map[string]model.Flag{},
|
||||
SourceMetadata: map[string]SourceDetails{},
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Flags) Set(key string, flag model.Flag) {
|
||||
f.mx.Lock()
|
||||
defer f.mx.Unlock()
|
||||
f.Flags[key] = flag
|
||||
}
|
||||
|
||||
func (f *Flags) Get(_ context.Context, key string) (model.Flag, bool) {
|
||||
f.mx.RLock()
|
||||
defer f.mx.RUnlock()
|
||||
flag, ok := f.Flags[key]
|
||||
|
||||
return flag, ok
|
||||
}
|
||||
|
||||
func (f *Flags) SelectorForFlag(_ context.Context, flag model.Flag) string {
|
||||
f.mx.RLock()
|
||||
defer f.mx.RUnlock()
|
||||
|
||||
return f.SourceMetadata[flag.Source].Selector
|
||||
}
|
||||
|
||||
func (f *Flags) Delete(key string) {
|
||||
f.mx.Lock()
|
||||
defer f.mx.Unlock()
|
||||
delete(f.Flags, key)
|
||||
}
|
||||
|
||||
func (f *Flags) String() (string, error) {
|
||||
f.mx.RLock()
|
||||
defer f.mx.RUnlock()
|
||||
bytes, err := json.Marshal(f)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("unable to marshal flags: %w", err)
|
||||
}
|
||||
|
||||
return string(bytes), nil
|
||||
}
|
||||
|
||||
// GetAll returns a copy of the store's state (copy in order to be concurrency safe)
|
||||
func (f *Flags) GetAll(_ context.Context) (map[string]model.Flag, error) {
|
||||
f.mx.RLock()
|
||||
defer f.mx.RUnlock()
|
||||
state := make(map[string]model.Flag, len(f.Flags))
|
||||
|
||||
for key, flag := range f.Flags {
|
||||
state[key] = flag
|
||||
}
|
||||
|
||||
return state, nil
|
||||
}
|
||||
|
||||
// Add new flags from source.
|
||||
func (f *Flags) Add(logger *logger.Logger, source string, selector string, flags map[string]model.Flag,
|
||||
) map[string]interface{} {
|
||||
notifications := map[string]interface{}{}
|
||||
|
||||
for k, newFlag := range flags {
|
||||
storedFlag, ok := f.Get(context.Background(), k)
|
||||
if ok && !f.hasPriority(storedFlag.Source, source) {
|
||||
logger.Debug(
|
||||
fmt.Sprintf(
|
||||
"not overwriting: flag %s from source %s does not have priority over %s",
|
||||
k,
|
||||
source,
|
||||
storedFlag.Source,
|
||||
),
|
||||
)
|
||||
continue
|
||||
}
|
||||
|
||||
notifications[k] = map[string]interface{}{
|
||||
"type": string(model.NotificationCreate),
|
||||
"source": source,
|
||||
}
|
||||
|
||||
// Store the new version of the flag
|
||||
newFlag.Source = source
|
||||
newFlag.Selector = selector
|
||||
f.Set(k, newFlag)
|
||||
}
|
||||
|
||||
return notifications
|
||||
}
|
||||
|
||||
// Update existing flags from source.
|
||||
func (f *Flags) Update(logger *logger.Logger, source string, selector string, flags map[string]model.Flag,
|
||||
) map[string]interface{} {
|
||||
notifications := map[string]interface{}{}
|
||||
|
||||
for k, flag := range flags {
|
||||
storedFlag, ok := f.Get(context.Background(), k)
|
||||
if !ok {
|
||||
logger.Warn(
|
||||
fmt.Sprintf("failed to update the flag, flag with key %s from source %s does not exist.",
|
||||
k,
|
||||
source))
|
||||
|
||||
continue
|
||||
}
|
||||
if !f.hasPriority(storedFlag.Source, source) {
|
||||
logger.Debug(
|
||||
fmt.Sprintf(
|
||||
"not updating: flag %s from source %s does not have priority over %s",
|
||||
k,
|
||||
source,
|
||||
storedFlag.Source,
|
||||
),
|
||||
)
|
||||
continue
|
||||
}
|
||||
|
||||
notifications[k] = map[string]interface{}{
|
||||
"type": string(model.NotificationUpdate),
|
||||
"source": source,
|
||||
}
|
||||
|
||||
flag.Source = source
|
||||
flag.Selector = selector
|
||||
f.Set(k, flag)
|
||||
}
|
||||
|
||||
return notifications
|
||||
}
|
||||
|
||||
// DeleteFlags matching flags from source.
|
||||
func (f *Flags) DeleteFlags(logger *logger.Logger, source string, flags map[string]model.Flag) map[string]interface{} {
|
||||
logger.Debug(
|
||||
fmt.Sprintf(
|
||||
"store resync triggered: delete event from source %s",
|
||||
source,
|
||||
),
|
||||
)
|
||||
ctx := context.Background()
|
||||
|
||||
notifications := map[string]interface{}{}
|
||||
if len(flags) == 0 {
|
||||
allFlags, err := f.GetAll(ctx)
|
||||
if err != nil {
|
||||
logger.Error(fmt.Sprintf("error while retrieving flags from the store: %v", err))
|
||||
return notifications
|
||||
}
|
||||
|
||||
for key, flag := range allFlags {
|
||||
if flag.Source != source {
|
||||
continue
|
||||
}
|
||||
notifications[key] = map[string]interface{}{
|
||||
"type": string(model.NotificationDelete),
|
||||
"source": source,
|
||||
}
|
||||
f.Delete(key)
|
||||
}
|
||||
}
|
||||
|
||||
for k := range flags {
|
||||
flag, ok := f.Get(ctx, k)
|
||||
if ok {
|
||||
if !f.hasPriority(flag.Source, source) {
|
||||
logger.Debug(
|
||||
fmt.Sprintf(
|
||||
"not deleting: flag %s from source %s cannot be deleted by %s",
|
||||
k,
|
||||
flag.Source,
|
||||
source,
|
||||
),
|
||||
)
|
||||
continue
|
||||
}
|
||||
notifications[k] = map[string]interface{}{
|
||||
"type": string(model.NotificationDelete),
|
||||
"source": source,
|
||||
}
|
||||
|
||||
f.Delete(k)
|
||||
} else {
|
||||
logger.Warn(
|
||||
fmt.Sprintf("failed to remove flag, flag with key %s from source %s does not exist.",
|
||||
k,
|
||||
source))
|
||||
}
|
||||
}
|
||||
|
||||
return notifications
|
||||
}
|
||||
|
||||
// Merge provided flags from source with currently stored flags.
|
||||
// nolint: funlen
|
||||
func (f *Flags) Merge(
|
||||
logger *logger.Logger,
|
||||
source string,
|
||||
selector string,
|
||||
flags map[string]model.Flag,
|
||||
) (map[string]interface{}, bool) {
|
||||
notifications := map[string]interface{}{}
|
||||
resyncRequired := false
|
||||
f.mx.Lock()
|
||||
for k, v := range f.Flags {
|
||||
if v.Source == source && v.Selector == selector {
|
||||
if _, ok := flags[k]; !ok {
|
||||
// flag has been deleted
|
||||
delete(f.Flags, k)
|
||||
notifications[k] = map[string]interface{}{
|
||||
"type": string(model.NotificationDelete),
|
||||
"source": source,
|
||||
}
|
||||
resyncRequired = true
|
||||
logger.Debug(
|
||||
fmt.Sprintf(
|
||||
"store resync triggered: flag %s has been deleted from source %s",
|
||||
k, source,
|
||||
),
|
||||
)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
f.mx.Unlock()
|
||||
for k, newFlag := range flags {
|
||||
newFlag.Source = source
|
||||
newFlag.Selector = selector
|
||||
storedFlag, ok := f.Get(context.Background(), k)
|
||||
if ok {
|
||||
if !f.hasPriority(storedFlag.Source, source) {
|
||||
logger.Debug(
|
||||
fmt.Sprintf(
|
||||
"not merging: flag %s from source %s does not have priority over %s",
|
||||
k, source, storedFlag.Source,
|
||||
),
|
||||
)
|
||||
continue
|
||||
}
|
||||
if reflect.DeepEqual(storedFlag, newFlag) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if !ok {
|
||||
notifications[k] = map[string]interface{}{
|
||||
"type": string(model.NotificationCreate),
|
||||
"source": source,
|
||||
}
|
||||
} else {
|
||||
notifications[k] = map[string]interface{}{
|
||||
"type": string(model.NotificationUpdate),
|
||||
"source": source,
|
||||
}
|
||||
}
|
||||
// Store the new version of the flag
|
||||
f.Set(k, newFlag)
|
||||
}
|
||||
return notifications, resyncRequired
|
||||
}
|
|
@ -0,0 +1,545 @@
|
|||
package store
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/open-feature/flagd/core/pkg/logger"
|
||||
"github.com/open-feature/flagd/core/pkg/model"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestHasPriority(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
currentState *Flags
|
||||
storedSource string
|
||||
newSource string
|
||||
hasPriority bool
|
||||
}{
|
||||
{
|
||||
name: "same source",
|
||||
currentState: &Flags{},
|
||||
storedSource: "A",
|
||||
newSource: "A",
|
||||
hasPriority: true,
|
||||
},
|
||||
{
|
||||
name: "no priority",
|
||||
currentState: &Flags{
|
||||
FlagSources: []string{
|
||||
"B",
|
||||
"A",
|
||||
},
|
||||
},
|
||||
storedSource: "A",
|
||||
newSource: "B",
|
||||
hasPriority: false,
|
||||
},
|
||||
{
|
||||
name: "priority",
|
||||
currentState: &Flags{
|
||||
FlagSources: []string{
|
||||
"A",
|
||||
"B",
|
||||
},
|
||||
},
|
||||
storedSource: "A",
|
||||
newSource: "B",
|
||||
hasPriority: true,
|
||||
},
|
||||
{
|
||||
name: "not in sources",
|
||||
currentState: &Flags{
|
||||
FlagSources: []string{
|
||||
"A",
|
||||
"B",
|
||||
},
|
||||
},
|
||||
storedSource: "C",
|
||||
newSource: "D",
|
||||
hasPriority: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
p := tt.currentState.hasPriority(tt.storedSource, tt.newSource)
|
||||
require.Equal(t, p, tt.hasPriority)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMergeFlags(t *testing.T) {
|
||||
t.Parallel()
|
||||
tests := []struct {
|
||||
name string
|
||||
current *Flags
|
||||
new map[string]model.Flag
|
||||
newSource string
|
||||
newSelector string
|
||||
want *Flags
|
||||
wantNotifs map[string]interface{}
|
||||
wantResync bool
|
||||
}{
|
||||
{
|
||||
name: "both nil",
|
||||
current: &Flags{Flags: nil},
|
||||
new: nil,
|
||||
want: &Flags{Flags: nil},
|
||||
wantNotifs: map[string]interface{}{},
|
||||
},
|
||||
{
|
||||
name: "both empty flags",
|
||||
current: &Flags{Flags: map[string]model.Flag{}},
|
||||
new: map[string]model.Flag{},
|
||||
want: &Flags{Flags: map[string]model.Flag{}},
|
||||
wantNotifs: map[string]interface{}{},
|
||||
},
|
||||
{
|
||||
name: "empty new",
|
||||
current: &Flags{Flags: map[string]model.Flag{}},
|
||||
new: nil,
|
||||
want: &Flags{Flags: map[string]model.Flag{}},
|
||||
wantNotifs: map[string]interface{}{},
|
||||
},
|
||||
{
|
||||
name: "merging with new source",
|
||||
current: &Flags{
|
||||
Flags: map[string]model.Flag{
|
||||
"waka": {
|
||||
DefaultVariant: "off",
|
||||
Source: "1",
|
||||
},
|
||||
},
|
||||
},
|
||||
new: map[string]model.Flag{
|
||||
"paka": {
|
||||
DefaultVariant: "on",
|
||||
},
|
||||
},
|
||||
newSource: "2",
|
||||
want: &Flags{Flags: map[string]model.Flag{
|
||||
"waka": {
|
||||
DefaultVariant: "off",
|
||||
Source: "1",
|
||||
},
|
||||
"paka": {
|
||||
DefaultVariant: "on",
|
||||
Source: "2",
|
||||
},
|
||||
}},
|
||||
wantNotifs: map[string]interface{}{"paka": map[string]interface{}{"type": "write", "source": "2"}},
|
||||
},
|
||||
{
|
||||
name: "override by new update",
|
||||
current: &Flags{Flags: map[string]model.Flag{
|
||||
"waka": {DefaultVariant: "off"},
|
||||
"paka": {DefaultVariant: "off"},
|
||||
}},
|
||||
new: map[string]model.Flag{
|
||||
"waka": {DefaultVariant: "on"},
|
||||
"paka": {DefaultVariant: "on"},
|
||||
},
|
||||
want: &Flags{Flags: map[string]model.Flag{
|
||||
"waka": {DefaultVariant: "on"},
|
||||
"paka": {DefaultVariant: "on"},
|
||||
}},
|
||||
wantNotifs: map[string]interface{}{
|
||||
"waka": map[string]interface{}{"type": "update", "source": ""},
|
||||
"paka": map[string]interface{}{"type": "update", "source": ""},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "identical update so empty notifications",
|
||||
current: &Flags{
|
||||
Flags: map[string]model.Flag{"hello": {DefaultVariant: "off"}},
|
||||
},
|
||||
new: map[string]model.Flag{
|
||||
"hello": {DefaultVariant: "off"},
|
||||
},
|
||||
want: &Flags{Flags: map[string]model.Flag{
|
||||
"hello": {DefaultVariant: "off"},
|
||||
}},
|
||||
wantNotifs: map[string]interface{}{},
|
||||
},
|
||||
{
|
||||
name: "deleted flag & trigger resync for same source",
|
||||
current: &Flags{Flags: map[string]model.Flag{"hello": {DefaultVariant: "off", Source: "A"}}},
|
||||
new: map[string]model.Flag{},
|
||||
newSource: "A",
|
||||
want: &Flags{Flags: map[string]model.Flag{}},
|
||||
wantNotifs: map[string]interface{}{"hello": map[string]interface{}{"type": "delete", "source": "A"}},
|
||||
wantResync: true,
|
||||
},
|
||||
{
|
||||
name: "no deleted & no resync for same source but different selector",
|
||||
current: &Flags{Flags: map[string]model.Flag{"hello": {DefaultVariant: "off", Source: "A", Selector: "X"}}},
|
||||
new: map[string]model.Flag{},
|
||||
newSource: "A",
|
||||
newSelector: "Y",
|
||||
want: &Flags{Flags: map[string]model.Flag{"hello": {DefaultVariant: "off", Source: "A", Selector: "X"}}},
|
||||
wantResync: false,
|
||||
wantNotifs: map[string]interface{}{},
|
||||
},
|
||||
{
|
||||
name: "no merge due to low priority",
|
||||
current: &Flags{
|
||||
FlagSources: []string{
|
||||
"B",
|
||||
"A",
|
||||
},
|
||||
Flags: map[string]model.Flag{
|
||||
"hello": {
|
||||
DefaultVariant: "off",
|
||||
Source: "A",
|
||||
},
|
||||
},
|
||||
},
|
||||
new: map[string]model.Flag{"hello": {DefaultVariant: "off"}},
|
||||
newSource: "B",
|
||||
want: &Flags{
|
||||
FlagSources: []string{
|
||||
"B",
|
||||
"A",
|
||||
},
|
||||
Flags: map[string]model.Flag{
|
||||
"hello": {
|
||||
DefaultVariant: "off",
|
||||
Source: "A",
|
||||
},
|
||||
},
|
||||
},
|
||||
wantNotifs: map[string]interface{}{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
gotNotifs, resyncRequired := tt.current.Merge(logger.NewLogger(nil, false), tt.newSource, tt.newSelector, tt.new)
|
||||
|
||||
require.True(t, reflect.DeepEqual(tt.want, tt.current))
|
||||
require.Equal(t, tt.wantNotifs, gotNotifs)
|
||||
require.Equal(t, tt.wantResync, resyncRequired)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFlags_Add(t *testing.T) {
|
||||
mockLogger := logger.NewLogger(nil, false)
|
||||
mockSource := "source"
|
||||
mockOverrideSource := "source-2"
|
||||
|
||||
type request struct {
|
||||
source string
|
||||
selector string
|
||||
flags map[string]model.Flag
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
storedState *Flags
|
||||
addRequest request
|
||||
expectedState *Flags
|
||||
expectedNotificationKeys []string
|
||||
}{
|
||||
{
|
||||
name: "Add success",
|
||||
storedState: &Flags{
|
||||
Flags: map[string]model.Flag{
|
||||
"A": {Source: mockSource},
|
||||
},
|
||||
},
|
||||
addRequest: request{
|
||||
source: mockSource,
|
||||
flags: map[string]model.Flag{
|
||||
"B": {Source: mockSource},
|
||||
},
|
||||
},
|
||||
expectedState: &Flags{
|
||||
Flags: map[string]model.Flag{
|
||||
"A": {Source: mockSource},
|
||||
"B": {Source: mockSource},
|
||||
},
|
||||
},
|
||||
expectedNotificationKeys: []string{"B"},
|
||||
},
|
||||
{
|
||||
name: "Add multiple success",
|
||||
storedState: &Flags{
|
||||
Flags: map[string]model.Flag{
|
||||
"A": {Source: mockSource},
|
||||
},
|
||||
},
|
||||
addRequest: request{
|
||||
source: mockSource,
|
||||
flags: map[string]model.Flag{
|
||||
"B": {Source: mockSource},
|
||||
"C": {Source: mockSource},
|
||||
},
|
||||
},
|
||||
expectedState: &Flags{
|
||||
Flags: map[string]model.Flag{
|
||||
"A": {Source: mockSource},
|
||||
"B": {Source: mockSource},
|
||||
"C": {Source: mockSource},
|
||||
},
|
||||
},
|
||||
expectedNotificationKeys: []string{"B", "C"},
|
||||
},
|
||||
{
|
||||
name: "Add success - conflict and override",
|
||||
storedState: &Flags{
|
||||
Flags: map[string]model.Flag{
|
||||
"A": {Source: mockSource},
|
||||
},
|
||||
},
|
||||
addRequest: request{
|
||||
source: mockOverrideSource,
|
||||
flags: map[string]model.Flag{
|
||||
"A": {Source: mockOverrideSource},
|
||||
},
|
||||
},
|
||||
expectedState: &Flags{
|
||||
Flags: map[string]model.Flag{
|
||||
"A": {Source: mockOverrideSource},
|
||||
},
|
||||
},
|
||||
expectedNotificationKeys: []string{"A"},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
messages := tt.storedState.Add(mockLogger, tt.addRequest.source, tt.addRequest.selector, tt.addRequest.flags)
|
||||
|
||||
require.Equal(t, tt.storedState, tt.expectedState)
|
||||
|
||||
for k := range messages {
|
||||
require.Containsf(t, tt.expectedNotificationKeys, k,
|
||||
"Message key %s not present in the expected key list", k)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFlags_Update(t *testing.T) {
|
||||
mockLogger := logger.NewLogger(nil, false)
|
||||
mockSource := "source"
|
||||
mockOverrideSource := "source-2"
|
||||
|
||||
type request struct {
|
||||
source string
|
||||
selector string
|
||||
flags map[string]model.Flag
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
storedState *Flags
|
||||
UpdateRequest request
|
||||
expectedState *Flags
|
||||
expectedNotificationKeys []string
|
||||
}{
|
||||
{
|
||||
name: "Update success",
|
||||
storedState: &Flags{
|
||||
Flags: map[string]model.Flag{
|
||||
"A": {Source: mockSource, DefaultVariant: "True"},
|
||||
},
|
||||
},
|
||||
UpdateRequest: request{
|
||||
source: mockSource,
|
||||
flags: map[string]model.Flag{
|
||||
"A": {Source: mockSource, DefaultVariant: "False"},
|
||||
},
|
||||
},
|
||||
expectedState: &Flags{
|
||||
Flags: map[string]model.Flag{
|
||||
"A": {Source: mockSource, DefaultVariant: "False"},
|
||||
},
|
||||
},
|
||||
expectedNotificationKeys: []string{"A"},
|
||||
},
|
||||
{
|
||||
name: "Update multiple success",
|
||||
storedState: &Flags{
|
||||
Flags: map[string]model.Flag{
|
||||
"A": {Source: mockSource, DefaultVariant: "True"},
|
||||
"B": {Source: mockSource, DefaultVariant: "True"},
|
||||
},
|
||||
},
|
||||
UpdateRequest: request{
|
||||
source: mockSource,
|
||||
flags: map[string]model.Flag{
|
||||
"A": {Source: mockSource, DefaultVariant: "False"},
|
||||
"B": {Source: mockSource, DefaultVariant: "False"},
|
||||
},
|
||||
},
|
||||
expectedState: &Flags{
|
||||
Flags: map[string]model.Flag{
|
||||
"A": {Source: mockSource, DefaultVariant: "False"},
|
||||
"B": {Source: mockSource, DefaultVariant: "False"},
|
||||
},
|
||||
},
|
||||
expectedNotificationKeys: []string{"A", "B"},
|
||||
},
|
||||
{
|
||||
name: "Update success - conflict and override",
|
||||
storedState: &Flags{
|
||||
Flags: map[string]model.Flag{
|
||||
"A": {Source: mockSource, DefaultVariant: "True"},
|
||||
},
|
||||
},
|
||||
UpdateRequest: request{
|
||||
source: mockOverrideSource,
|
||||
flags: map[string]model.Flag{
|
||||
"A": {Source: mockOverrideSource, DefaultVariant: "True"},
|
||||
},
|
||||
},
|
||||
expectedState: &Flags{
|
||||
Flags: map[string]model.Flag{
|
||||
"A": {Source: mockOverrideSource, DefaultVariant: "True"},
|
||||
},
|
||||
},
|
||||
expectedNotificationKeys: []string{"A"},
|
||||
},
|
||||
{
|
||||
name: "Update fail",
|
||||
storedState: &Flags{
|
||||
Flags: map[string]model.Flag{
|
||||
"A": {Source: mockSource},
|
||||
},
|
||||
},
|
||||
UpdateRequest: request{
|
||||
source: mockSource,
|
||||
flags: map[string]model.Flag{
|
||||
"B": {Source: mockSource},
|
||||
},
|
||||
},
|
||||
expectedState: &Flags{
|
||||
Flags: map[string]model.Flag{
|
||||
"A": {Source: mockSource},
|
||||
},
|
||||
},
|
||||
expectedNotificationKeys: []string{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
messages := tt.storedState.Update(mockLogger, tt.UpdateRequest.source,
|
||||
tt.UpdateRequest.selector, tt.UpdateRequest.flags)
|
||||
|
||||
require.Equal(t, tt.storedState, tt.expectedState)
|
||||
|
||||
for k := range messages {
|
||||
require.Containsf(t, tt.expectedNotificationKeys, k,
|
||||
"Message key %s not present in the expected key list", k)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFlags_Delete(t *testing.T) {
|
||||
mockLogger := logger.NewLogger(nil, false)
|
||||
mockSource := "source"
|
||||
mockSource2 := "source2"
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
storedState *Flags
|
||||
deleteRequest map[string]model.Flag
|
||||
expectedState *Flags
|
||||
expectedNotificationKeys []string
|
||||
}{
|
||||
{
|
||||
name: "Remove success",
|
||||
storedState: &Flags{
|
||||
Flags: map[string]model.Flag{
|
||||
"A": {Source: mockSource},
|
||||
"B": {Source: mockSource},
|
||||
"C": {Source: mockSource2},
|
||||
},
|
||||
FlagSources: []string{
|
||||
mockSource,
|
||||
mockSource2,
|
||||
},
|
||||
},
|
||||
deleteRequest: map[string]model.Flag{
|
||||
"A": {Source: mockSource},
|
||||
},
|
||||
expectedState: &Flags{
|
||||
Flags: map[string]model.Flag{
|
||||
"B": {Source: mockSource},
|
||||
"C": {Source: mockSource2},
|
||||
},
|
||||
FlagSources: []string{
|
||||
mockSource,
|
||||
mockSource2,
|
||||
},
|
||||
},
|
||||
expectedNotificationKeys: []string{"A"},
|
||||
},
|
||||
{
|
||||
name: "Nothing to remove",
|
||||
storedState: &Flags{
|
||||
Flags: map[string]model.Flag{
|
||||
"A": {Source: mockSource},
|
||||
"B": {Source: mockSource},
|
||||
"C": {Source: mockSource2},
|
||||
},
|
||||
FlagSources: []string{
|
||||
mockSource,
|
||||
mockSource2,
|
||||
},
|
||||
},
|
||||
deleteRequest: map[string]model.Flag{
|
||||
"C": {Source: mockSource},
|
||||
},
|
||||
expectedState: &Flags{
|
||||
Flags: map[string]model.Flag{
|
||||
"A": {Source: mockSource},
|
||||
"B": {Source: mockSource},
|
||||
"C": {Source: mockSource2},
|
||||
},
|
||||
FlagSources: []string{
|
||||
mockSource,
|
||||
mockSource2,
|
||||
},
|
||||
},
|
||||
expectedNotificationKeys: []string{},
|
||||
},
|
||||
{
|
||||
name: "Remove all",
|
||||
storedState: &Flags{
|
||||
Flags: map[string]model.Flag{
|
||||
"A": {Source: mockSource},
|
||||
"B": {Source: mockSource},
|
||||
"C": {Source: mockSource2},
|
||||
},
|
||||
},
|
||||
deleteRequest: map[string]model.Flag{},
|
||||
expectedState: &Flags{
|
||||
Flags: map[string]model.Flag{
|
||||
"C": {Source: mockSource2},
|
||||
},
|
||||
},
|
||||
expectedNotificationKeys: []string{"A", "B"},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
messages := tt.storedState.DeleteFlags(mockLogger, mockSource, tt.deleteRequest)
|
||||
|
||||
require.Equal(t, tt.storedState, tt.expectedState)
|
||||
|
||||
for k := range messages {
|
||||
require.Containsf(t, tt.expectedNotificationKeys, k,
|
||||
"Message key %s not present in the expected key list", k)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
|
@ -1,133 +0,0 @@
|
|||
package store
|
||||
|
||||
import (
|
||||
"maps"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
uuid "github.com/google/uuid"
|
||||
"github.com/open-feature/flagd/core/pkg/model"
|
||||
)
|
||||
|
||||
// flags table and index constants
|
||||
const flagsTable = "flags"
|
||||
|
||||
const idIndex = "id"
|
||||
const keyIndex = "key"
|
||||
const sourceIndex = "source"
|
||||
const priorityIndex = "priority"
|
||||
const flagSetIdIndex = "flagSetId"
|
||||
|
||||
// compound indices; maintain sub-indexes alphabetically; order matters; these must match what's generated in the SelectorMapToQuery func.
|
||||
const flagSetIdSourceCompoundIndex = flagSetIdIndex + "+" + sourceIndex
|
||||
const keySourceCompoundIndex = keyIndex + "+" + sourceIndex
|
||||
const flagSetIdKeySourceCompoundIndex = flagSetIdIndex + "+" + keyIndex + "+" + sourceIndex
|
||||
|
||||
// flagSetId defaults to a UUID generated at startup to make our queries consistent
|
||||
// any flag without a "flagSetId" is assigned this one; it's never exposed externally
|
||||
var nilFlagSetId = uuid.New().String()
|
||||
|
||||
// A selector represents a set of constraints used to query the store.
|
||||
type Selector struct {
|
||||
indexMap map[string]string
|
||||
}
|
||||
|
||||
// NewSelector creates a new Selector from a selector expression string.
|
||||
// For example, to select flags from source "./mySource" and flagSetId "1234", use the expression:
|
||||
// "source=./mySource,flagSetId=1234"
|
||||
func NewSelector(selectorExpression string) Selector {
|
||||
return Selector{
|
||||
indexMap: expressionToMap(selectorExpression),
|
||||
}
|
||||
}
|
||||
|
||||
func expressionToMap(sExp string) map[string]string {
|
||||
selectorMap := make(map[string]string)
|
||||
if sExp == "" {
|
||||
return selectorMap
|
||||
}
|
||||
|
||||
if strings.Index(sExp, "=") == -1 {
|
||||
// if no '=' is found, treat the whole string as as source (backwards compatibility)
|
||||
// we may may support interpreting this as a flagSetId in the future as an option
|
||||
selectorMap[sourceIndex] = sExp
|
||||
return selectorMap
|
||||
}
|
||||
|
||||
// Split the selector by commas
|
||||
pairs := strings.Split(sExp, ",")
|
||||
for _, pair := range pairs {
|
||||
// Split each pair by the first equal sign
|
||||
parts := strings.Split(pair, "=")
|
||||
if len(parts) == 2 {
|
||||
key := parts[0]
|
||||
value := parts[1]
|
||||
selectorMap[key] = value
|
||||
}
|
||||
}
|
||||
return selectorMap
|
||||
}
|
||||
|
||||
func (s Selector) WithIndex(key string, value string) Selector {
|
||||
m := maps.Clone(s.indexMap)
|
||||
m[key] = value
|
||||
return Selector{
|
||||
indexMap: m,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Selector) IsEmpty() bool {
|
||||
return s == nil || len(s.indexMap) == 0
|
||||
}
|
||||
|
||||
// SelectorMapToQuery converts the selector map to an indexId and constraints for querying the store.
|
||||
// For a given index, a specific order and number of constraints are required.
|
||||
// Both the indexId and constraints are generated based on the keys present in the selector's internal map.
|
||||
func (s Selector) ToQuery() (indexId string, constraints []interface{}) {
|
||||
|
||||
if len(s.indexMap) == 2 && s.indexMap[flagSetIdIndex] != "" && s.indexMap[keyIndex] != "" {
|
||||
// special case for flagSetId and key (this is the "id" index)
|
||||
return idIndex, []interface{}{s.indexMap[flagSetIdIndex], s.indexMap[keyIndex]}
|
||||
}
|
||||
|
||||
qs := []string{}
|
||||
keys := make([]string, 0, len(s.indexMap))
|
||||
|
||||
for key := range s.indexMap {
|
||||
keys = append(keys, key)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
||||
for _, key := range keys {
|
||||
indexId += key + "+"
|
||||
qs = append(qs, s.indexMap[key])
|
||||
}
|
||||
|
||||
indexId = strings.TrimSuffix(indexId, "+")
|
||||
// Convert []string to []interface{}
|
||||
c := make([]interface{}, 0, len(qs))
|
||||
for _, v := range qs {
|
||||
c = append(c, v)
|
||||
}
|
||||
constraints = c
|
||||
|
||||
return indexId, constraints
|
||||
}
|
||||
|
||||
// SelectorToMetadata converts the selector's internal map to metadata for logging or tracing purposes.
|
||||
// Only includes known indices to avoid leaking sensitive information, and is usually returned as the "top level" metadata
|
||||
func (s *Selector) ToMetadata() model.Metadata {
|
||||
meta := model.Metadata{}
|
||||
|
||||
if s == nil || s.indexMap == nil {
|
||||
return meta
|
||||
}
|
||||
|
||||
if s.indexMap[flagSetIdIndex] != "" {
|
||||
meta[flagSetIdIndex] = s.indexMap[flagSetIdIndex]
|
||||
}
|
||||
if s.indexMap[sourceIndex] != "" {
|
||||
meta[sourceIndex] = s.indexMap[sourceIndex]
|
||||
}
|
||||
return meta
|
||||
}
|
|
@ -1,193 +0,0 @@
|
|||
package store
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/open-feature/flagd/core/pkg/model"
|
||||
)
|
||||
|
||||
func TestSelector_IsEmpty(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
selector *Selector
|
||||
wantEmpty bool
|
||||
}{
|
||||
{
|
||||
name: "nil selector",
|
||||
selector: nil,
|
||||
wantEmpty: true,
|
||||
},
|
||||
{
|
||||
name: "nil indexMap",
|
||||
selector: &Selector{indexMap: nil},
|
||||
wantEmpty: true,
|
||||
},
|
||||
{
|
||||
name: "empty indexMap",
|
||||
selector: &Selector{indexMap: map[string]string{}},
|
||||
wantEmpty: true,
|
||||
},
|
||||
{
|
||||
name: "non-empty indexMap",
|
||||
selector: &Selector{indexMap: map[string]string{"source": "abc"}},
|
||||
wantEmpty: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := tt.selector.IsEmpty()
|
||||
if got != tt.wantEmpty {
|
||||
t.Errorf("IsEmpty() = %v, want %v", got, tt.wantEmpty)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSelector_WithIndex(t *testing.T) {
|
||||
oldS := Selector{indexMap: map[string]string{"source": "abc"}}
|
||||
newS := oldS.WithIndex("flagSetId", "1234")
|
||||
|
||||
if newS.indexMap["source"] != "abc" {
|
||||
t.Errorf("WithIndex did not preserve existing keys")
|
||||
}
|
||||
if newS.indexMap["flagSetId"] != "1234" {
|
||||
t.Errorf("WithIndex did not add new key")
|
||||
}
|
||||
// Ensure original is unchanged
|
||||
if _, ok := oldS.indexMap["flagSetId"]; ok {
|
||||
t.Errorf("WithIndex mutated original selector")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSelector_ToQuery(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
selector Selector
|
||||
wantIndex string
|
||||
wantConstr []interface{}
|
||||
}{
|
||||
{
|
||||
name: "flagSetId and key primary index special case",
|
||||
selector: Selector{indexMap: map[string]string{"flagSetId": "fsid", "key": "myKey"}},
|
||||
wantIndex: "id",
|
||||
wantConstr: []interface{}{"fsid", "myKey"},
|
||||
},
|
||||
{
|
||||
name: "multiple keys sorted",
|
||||
selector: Selector{indexMap: map[string]string{"source": "src", "flagSetId": "fsid"}},
|
||||
wantIndex: "flagSetId+source",
|
||||
wantConstr: []interface{}{"fsid", "src"},
|
||||
},
|
||||
{
|
||||
name: "single key",
|
||||
selector: Selector{indexMap: map[string]string{"source": "src"}},
|
||||
wantIndex: "source",
|
||||
wantConstr: []interface{}{"src"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
gotIndex, gotConstr := tt.selector.ToQuery()
|
||||
if gotIndex != tt.wantIndex {
|
||||
t.Errorf("ToQuery() index = %v, want %v", gotIndex, tt.wantIndex)
|
||||
}
|
||||
if !reflect.DeepEqual(gotConstr, tt.wantConstr) {
|
||||
t.Errorf("ToQuery() constraints = %v, want %v", gotConstr, tt.wantConstr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSelector_ToMetadata(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
selector *Selector
|
||||
want model.Metadata
|
||||
}{
|
||||
{
|
||||
name: "nil selector",
|
||||
selector: nil,
|
||||
want: model.Metadata{},
|
||||
},
|
||||
{
|
||||
name: "nil indexMap",
|
||||
selector: &Selector{indexMap: nil},
|
||||
want: model.Metadata{},
|
||||
},
|
||||
{
|
||||
name: "empty indexMap",
|
||||
selector: &Selector{indexMap: map[string]string{}},
|
||||
want: model.Metadata{},
|
||||
},
|
||||
{
|
||||
name: "flagSetId only",
|
||||
selector: &Selector{indexMap: map[string]string{"flagSetId": "fsid"}},
|
||||
want: model.Metadata{"flagSetId": "fsid"},
|
||||
},
|
||||
{
|
||||
name: "source only",
|
||||
selector: &Selector{indexMap: map[string]string{"source": "src"}},
|
||||
want: model.Metadata{"source": "src"},
|
||||
},
|
||||
{
|
||||
name: "flagSetId and source",
|
||||
selector: &Selector{indexMap: map[string]string{"flagSetId": "fsid", "source": "src"}},
|
||||
want: model.Metadata{"flagSetId": "fsid", "source": "src"},
|
||||
},
|
||||
{
|
||||
name: "flagSetId, source, and key (key should be ignored)",
|
||||
selector: &Selector{indexMap: map[string]string{"flagSetId": "fsid", "source": "src", "key": "myKey"}},
|
||||
want: model.Metadata{"flagSetId": "fsid", "source": "src"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := tt.selector.ToMetadata()
|
||||
if !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("ToMetadata() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewSelector(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
wantMap map[string]string
|
||||
}{
|
||||
{
|
||||
name: "source and flagSetId",
|
||||
input: "source=abc,flagSetId=1234",
|
||||
wantMap: map[string]string{"source": "abc", "flagSetId": "1234"},
|
||||
},
|
||||
{
|
||||
name: "source",
|
||||
input: "source=abc",
|
||||
wantMap: map[string]string{"source": "abc"},
|
||||
},
|
||||
{
|
||||
name: "no equals, treat as source",
|
||||
input: "mysource",
|
||||
wantMap: map[string]string{"source": "mysource"},
|
||||
},
|
||||
{
|
||||
name: "empty string",
|
||||
input: "",
|
||||
wantMap: map[string]string{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
s := NewSelector(tt.input)
|
||||
if !reflect.DeepEqual(s.indexMap, tt.wantMap) {
|
||||
t.Errorf("NewSelector(%q) indexMap = %v, want %v", tt.input, s.indexMap, tt.wantMap)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
|
@ -1,396 +0,0 @@
|
|||
package store
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"slices"
|
||||
"sync"
|
||||
|
||||
"github.com/hashicorp/go-memdb"
|
||||
"github.com/open-feature/flagd/core/pkg/logger"
|
||||
"github.com/open-feature/flagd/core/pkg/model"
|
||||
"github.com/open-feature/flagd/core/pkg/notifications"
|
||||
)
|
||||
|
||||
var noValidatedSources = []string{}
|
||||
|
||||
type SelectorContextKey struct{}
|
||||
|
||||
type FlagQueryResult struct {
|
||||
Flags map[string]model.Flag
|
||||
}
|
||||
|
||||
type IStore interface {
|
||||
Get(ctx context.Context, key string, selector *Selector) (model.Flag, model.Metadata, error)
|
||||
GetAll(ctx context.Context, selector *Selector) (map[string]model.Flag, model.Metadata, error)
|
||||
Watch(ctx context.Context, selector *Selector, watcher chan<- FlagQueryResult)
|
||||
}
|
||||
|
||||
var _ IStore = (*Store)(nil)
|
||||
|
||||
type Store struct {
|
||||
mx sync.RWMutex
|
||||
db *memdb.MemDB
|
||||
logger *logger.Logger
|
||||
sources []string
|
||||
// deprecated: has no effect and will be removed soon.
|
||||
FlagSources []string
|
||||
}
|
||||
|
||||
type SourceDetails struct {
|
||||
Source string
|
||||
Selector string
|
||||
}
|
||||
|
||||
// NewStore creates a new in-memory store with the given sources.
|
||||
// The order of sources in the slice determines their priority, when queries result in duplicate flags (queries without source or flagSetId), the higher priority source "wins".
|
||||
func NewStore(logger *logger.Logger, sources []string) (*Store, error) {
|
||||
|
||||
// a unique index must exist for each set of constraints - for example, to look up by key and source, we need a compound index on key+source, etc
|
||||
// we maybe want to generate these dynamically in the future to support more robust querying, but for now we will hardcode the ones we need
|
||||
schema := &memdb.DBSchema{
|
||||
Tables: map[string]*memdb.TableSchema{
|
||||
flagsTable: {
|
||||
Name: flagsTable,
|
||||
Indexes: map[string]*memdb.IndexSchema{
|
||||
// primary index; must be unique and named "id"
|
||||
idIndex: {
|
||||
Name: idIndex,
|
||||
Unique: true,
|
||||
Indexer: &memdb.CompoundIndex{
|
||||
Indexes: []memdb.Indexer{
|
||||
&memdb.StringFieldIndex{Field: model.FlagSetId, Lowercase: false},
|
||||
&memdb.StringFieldIndex{Field: model.Key, Lowercase: false},
|
||||
},
|
||||
},
|
||||
},
|
||||
// for looking up by source
|
||||
sourceIndex: {
|
||||
Name: sourceIndex,
|
||||
Unique: false,
|
||||
Indexer: &memdb.StringFieldIndex{Field: model.Source, Lowercase: false},
|
||||
},
|
||||
// for looking up by priority, used to maintain highest priority flag when there are duplicates and no selector is provided
|
||||
priorityIndex: {
|
||||
Name: priorityIndex,
|
||||
Unique: false,
|
||||
Indexer: &memdb.IntFieldIndex{Field: model.Priority},
|
||||
},
|
||||
// for looking up by flagSetId
|
||||
flagSetIdIndex: {
|
||||
Name: flagSetIdIndex,
|
||||
Unique: false,
|
||||
Indexer: &memdb.StringFieldIndex{Field: model.FlagSetId, Lowercase: false},
|
||||
},
|
||||
keyIndex: {
|
||||
Name: keyIndex,
|
||||
Unique: false,
|
||||
Indexer: &memdb.StringFieldIndex{Field: model.Key, Lowercase: false},
|
||||
},
|
||||
flagSetIdSourceCompoundIndex: {
|
||||
Name: flagSetIdSourceCompoundIndex,
|
||||
Unique: false,
|
||||
Indexer: &memdb.CompoundIndex{
|
||||
Indexes: []memdb.Indexer{
|
||||
&memdb.StringFieldIndex{Field: model.FlagSetId, Lowercase: false},
|
||||
&memdb.StringFieldIndex{Field: model.Source, Lowercase: false},
|
||||
},
|
||||
},
|
||||
},
|
||||
keySourceCompoundIndex: {
|
||||
Name: keySourceCompoundIndex,
|
||||
Unique: false, // duplicate from a single source ARE allowed (they just must have different flag sets)
|
||||
Indexer: &memdb.CompoundIndex{
|
||||
Indexes: []memdb.Indexer{
|
||||
&memdb.StringFieldIndex{Field: model.Key, Lowercase: false},
|
||||
&memdb.StringFieldIndex{Field: model.Source, Lowercase: false},
|
||||
},
|
||||
},
|
||||
},
|
||||
// used to query all flags from a specific source so we know which flags to delete if a flag is missing from a source
|
||||
flagSetIdKeySourceCompoundIndex: {
|
||||
Name: flagSetIdKeySourceCompoundIndex,
|
||||
Unique: true,
|
||||
Indexer: &memdb.CompoundIndex{
|
||||
Indexes: []memdb.Indexer{
|
||||
&memdb.StringFieldIndex{Field: model.FlagSetId, Lowercase: false},
|
||||
&memdb.StringFieldIndex{Field: model.Key, Lowercase: false},
|
||||
&memdb.StringFieldIndex{Field: model.Source, Lowercase: false},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Create a new data base
|
||||
db, err := memdb.NewMemDB(schema)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to initialize flag database: %w", err)
|
||||
}
|
||||
|
||||
// clone the sources to avoid modifying the original slice
|
||||
s := slices.Clone(sources)
|
||||
|
||||
return &Store{
|
||||
sources: s,
|
||||
db: db,
|
||||
logger: logger,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Deprecated: use NewStore instead - will be removed very soon.
|
||||
func NewFlags() *Store {
|
||||
state, err := NewStore(logger.NewLogger(nil, false), noValidatedSources)
|
||||
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("unable to create flag store: %v", err))
|
||||
}
|
||||
return state
|
||||
}
|
||||
|
||||
func (s *Store) Get(_ context.Context, key string, selector *Selector) (model.Flag, model.Metadata, error) {
|
||||
s.logger.Debug(fmt.Sprintf("getting flag %s", key))
|
||||
txn := s.db.Txn(false)
|
||||
queryMeta := selector.ToMetadata()
|
||||
|
||||
// if present, use the selector to query the flags
|
||||
if !selector.IsEmpty() {
|
||||
selector := selector.WithIndex("key", key)
|
||||
indexId, constraints := selector.ToQuery()
|
||||
s.logger.Debug(fmt.Sprintf("getting flag with query: %s, %v", indexId, constraints))
|
||||
raw, err := txn.First(flagsTable, indexId, constraints...)
|
||||
flag, ok := raw.(model.Flag)
|
||||
if err != nil {
|
||||
return model.Flag{}, queryMeta, fmt.Errorf("flag %s not found: %w", key, err)
|
||||
}
|
||||
if !ok {
|
||||
return model.Flag{}, queryMeta, fmt.Errorf("flag %s is not a valid flag", key)
|
||||
}
|
||||
return flag, queryMeta, nil
|
||||
|
||||
}
|
||||
// otherwise, get all flags with the given key, and keep the last one with the highest priority
|
||||
s.logger.Debug(fmt.Sprintf("getting highest priority flag with key: %s", key))
|
||||
it, err := txn.Get(flagsTable, keyIndex, key)
|
||||
if err != nil {
|
||||
return model.Flag{}, queryMeta, fmt.Errorf("flag %s not found: %w", key, err)
|
||||
}
|
||||
flag := model.Flag{}
|
||||
found := false
|
||||
for raw := it.Next(); raw != nil; raw = it.Next() {
|
||||
nextFlag, ok := raw.(model.Flag)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
found = true
|
||||
if nextFlag.Priority >= flag.Priority {
|
||||
flag = nextFlag
|
||||
} else {
|
||||
s.logger.Debug(fmt.Sprintf("discarding flag %s from lower priority source %s in favor of flag from source %s", nextFlag.Key, s.sources[nextFlag.Priority], s.sources[flag.Priority]))
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
return flag, queryMeta, fmt.Errorf("flag %s not found", key)
|
||||
}
|
||||
return flag, queryMeta, nil
|
||||
}
|
||||
|
||||
func (f *Store) String() (string, error) {
|
||||
f.logger.Debug("dumping flags to string")
|
||||
f.mx.RLock()
|
||||
defer f.mx.RUnlock()
|
||||
|
||||
state, _, err := f.GetAll(context.Background(), nil)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("unable to get all flags: %w", err)
|
||||
}
|
||||
|
||||
bytes, err := json.Marshal(state)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("unable to marshal flags: %w", err)
|
||||
}
|
||||
|
||||
return string(bytes), nil
|
||||
}
|
||||
|
||||
// GetAll returns a copy of the store's state (copy in order to be concurrency safe)
|
||||
func (s *Store) GetAll(ctx context.Context, selector *Selector) (map[string]model.Flag, model.Metadata, error) {
|
||||
flags := make(map[string]model.Flag)
|
||||
queryMeta := selector.ToMetadata()
|
||||
it, err := s.selectOrAll(selector)
|
||||
|
||||
if err != nil {
|
||||
s.logger.Error(fmt.Sprintf("flag query error: %v", err))
|
||||
return flags, queryMeta, err
|
||||
}
|
||||
flags = s.collect(it)
|
||||
return flags, queryMeta, nil
|
||||
}
|
||||
|
||||
// Update the flag state with the provided flags.
|
||||
func (s *Store) Update(
|
||||
source string,
|
||||
flags map[string]model.Flag,
|
||||
metadata model.Metadata,
|
||||
) (map[string]interface{}, bool) {
|
||||
resyncRequired := false
|
||||
|
||||
if source == "" {
|
||||
panic("source cannot be empty")
|
||||
}
|
||||
|
||||
priority := slices.Index(s.sources, source)
|
||||
if priority == -1 {
|
||||
// this is a hack to allow old constructors that didn't pass sources, remove when we remove "NewFlags" constructor
|
||||
if !slices.Equal(s.sources, noValidatedSources) {
|
||||
panic(fmt.Sprintf("source %s is not registered in the store", source))
|
||||
}
|
||||
// same as above - remove when we remove "NewFlags" constructor
|
||||
priority = 0
|
||||
}
|
||||
|
||||
txn := s.db.Txn(true)
|
||||
defer txn.Abort()
|
||||
|
||||
// get all flags for the source we are updating
|
||||
selector := NewSelector(sourceIndex + "=" + source)
|
||||
oldFlags, _, _ := s.GetAll(context.Background(), &selector)
|
||||
|
||||
s.mx.Lock()
|
||||
for key := range oldFlags {
|
||||
if _, ok := flags[key]; !ok {
|
||||
// flag has been deleted
|
||||
s.logger.Debug(fmt.Sprintf("flag %s has been deleted from source %s", key, source))
|
||||
|
||||
count, err := txn.DeleteAll(flagsTable, keySourceCompoundIndex, key, source)
|
||||
s.logger.Debug(fmt.Sprintf("deleted %d flags with key %s from source %s", count, key, source))
|
||||
|
||||
if err != nil {
|
||||
s.logger.Error(fmt.Sprintf("error deleting flag: %s, %v", key, err))
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
s.mx.Unlock()
|
||||
for key, newFlag := range flags {
|
||||
s.logger.Debug(fmt.Sprintf("got metadata %v", metadata))
|
||||
|
||||
newFlag.Key = key
|
||||
newFlag.Source = source
|
||||
newFlag.Priority = priority
|
||||
newFlag.Metadata = patchMetadata(metadata, newFlag.Metadata)
|
||||
|
||||
// flagSetId defaults to a UUID generated at startup to make our queries isomorphic
|
||||
flagSetId := nilFlagSetId
|
||||
// flagSetId is inherited from the set, but can be overridden by the flag
|
||||
setFlagSetId, ok := newFlag.Metadata["flagSetId"].(string)
|
||||
if ok {
|
||||
flagSetId = setFlagSetId
|
||||
}
|
||||
newFlag.FlagSetId = flagSetId
|
||||
|
||||
raw, err := txn.First(flagsTable, keySourceCompoundIndex, key, source)
|
||||
if err != nil {
|
||||
s.logger.Error(fmt.Sprintf("unable to get flag %s from source %s: %v", key, source, err))
|
||||
continue
|
||||
}
|
||||
oldFlag, ok := raw.(model.Flag)
|
||||
// If we already have a flag with the same key and source, we need to check if it has the same flagSetId
|
||||
if ok {
|
||||
if oldFlag.FlagSetId != newFlag.FlagSetId {
|
||||
// If the flagSetId is different, we need to delete the entry, since flagSetId+key represents the primary index, and it's now been changed.
|
||||
// This is important especially for clients listening to flagSetId changes, as they expect the flag to be removed from the set in this case.
|
||||
_, err = txn.DeleteAll(flagsTable, idIndex, oldFlag.FlagSetId, key)
|
||||
if err != nil {
|
||||
s.logger.Error(fmt.Sprintf("unable to delete flags with key %s and flagSetId %s: %v", key, oldFlag.FlagSetId, err))
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
// Store the new version of the flag
|
||||
s.logger.Debug(fmt.Sprintf("storing flag: %v", newFlag))
|
||||
err = txn.Insert(flagsTable, newFlag)
|
||||
if err != nil {
|
||||
s.logger.Error(fmt.Sprintf("unable to insert flag %s: %v", key, err))
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
txn.Commit()
|
||||
return notifications.NewFromFlags(oldFlags, flags), resyncRequired
|
||||
}
|
||||
|
||||
// Watch the result-set of a selector for changes, sending updates to the watcher channel.
|
||||
func (s *Store) Watch(ctx context.Context, selector *Selector, watcher chan<- FlagQueryResult) {
|
||||
go func() {
|
||||
for {
|
||||
ws := memdb.NewWatchSet()
|
||||
it, err := s.selectOrAll(selector)
|
||||
if err != nil {
|
||||
s.logger.Error(fmt.Sprintf("error watching flags: %v", err))
|
||||
close(watcher)
|
||||
return
|
||||
}
|
||||
ws.Add(it.WatchCh())
|
||||
|
||||
flags := s.collect(it)
|
||||
watcher <- FlagQueryResult{
|
||||
Flags: flags,
|
||||
}
|
||||
|
||||
if err = ws.WatchCtx(ctx); err != nil {
|
||||
s.logger.Error(fmt.Sprintf("error watching flags: %v", err))
|
||||
close(watcher)
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// returns an iterator for the given selector, or all flags if the selector is nil or empty
|
||||
func (s *Store) selectOrAll(selector *Selector) (it memdb.ResultIterator, err error) {
|
||||
txn := s.db.Txn(false)
|
||||
if !selector.IsEmpty() {
|
||||
indexId, constraints := selector.ToQuery()
|
||||
s.logger.Debug(fmt.Sprintf("getting all flags with query: %s, %v", indexId, constraints))
|
||||
return txn.Get(flagsTable, indexId, constraints...)
|
||||
} else {
|
||||
// no selector, get all flags
|
||||
return txn.Get(flagsTable, idIndex)
|
||||
}
|
||||
}
|
||||
|
||||
// collects flags from an iterator, ensuring that only the highest priority flag is kept when there are duplicates
|
||||
func (s *Store) collect(it memdb.ResultIterator) map[string]model.Flag {
|
||||
flags := make(map[string]model.Flag)
|
||||
for raw := it.Next(); raw != nil; raw = it.Next() {
|
||||
flag := raw.(model.Flag)
|
||||
if existing, ok := flags[flag.Key]; ok {
|
||||
if flag.Priority < existing.Priority {
|
||||
s.logger.Debug(fmt.Sprintf("discarding duplicate flag %s from lower priority source %s in favor of flag from source %s", flag.Key, s.sources[flag.Priority], s.sources[existing.Priority]))
|
||||
continue // we already have a higher priority flag
|
||||
}
|
||||
s.logger.Debug(fmt.Sprintf("overwriting duplicate flag %s from lower priority source %s in favor of flag from source %s", flag.Key, s.sources[existing.Priority], s.sources[flag.Priority]))
|
||||
}
|
||||
flags[flag.Key] = flag
|
||||
}
|
||||
return flags
|
||||
}
|
||||
|
||||
func patchMetadata(original, patch model.Metadata) model.Metadata {
|
||||
patched := make(model.Metadata)
|
||||
if original == nil && patch == nil {
|
||||
return nil
|
||||
}
|
||||
for key, value := range original {
|
||||
patched[key] = value
|
||||
}
|
||||
for key, value := range patch { // patch values overwrite m1 values on key conflict
|
||||
patched[key] = value
|
||||
}
|
||||
return patched
|
||||
}
|
|
@ -1,487 +0,0 @@
|
|||
package store
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/open-feature/flagd/core/pkg/logger"
|
||||
"github.com/open-feature/flagd/core/pkg/model"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestUpdateFlags(t *testing.T) {
|
||||
|
||||
const source1 = "source1"
|
||||
const source2 = "source2"
|
||||
var sources = []string{source1, source2}
|
||||
|
||||
t.Parallel()
|
||||
tests := []struct {
|
||||
name string
|
||||
setup func(t *testing.T) *Store
|
||||
newFlags map[string]model.Flag
|
||||
source string
|
||||
wantFlags map[string]model.Flag
|
||||
setMetadata model.Metadata
|
||||
wantNotifs map[string]interface{}
|
||||
wantResync bool
|
||||
}{
|
||||
{
|
||||
name: "both nil",
|
||||
setup: func(t *testing.T) *Store {
|
||||
s, err := NewStore(logger.NewLogger(nil, false), sources)
|
||||
if err != nil {
|
||||
t.Fatalf("NewStore failed: %v", err)
|
||||
}
|
||||
return s
|
||||
},
|
||||
source: source1,
|
||||
newFlags: nil,
|
||||
wantFlags: map[string]model.Flag{},
|
||||
wantNotifs: map[string]interface{}{},
|
||||
},
|
||||
{
|
||||
name: "both empty flags",
|
||||
setup: func(t *testing.T) *Store {
|
||||
s, err := NewStore(logger.NewLogger(nil, false), sources)
|
||||
if err != nil {
|
||||
t.Fatalf("NewStore failed: %v", err)
|
||||
}
|
||||
return s
|
||||
},
|
||||
source: source1,
|
||||
newFlags: map[string]model.Flag{},
|
||||
wantFlags: map[string]model.Flag{},
|
||||
wantNotifs: map[string]interface{}{},
|
||||
},
|
||||
{
|
||||
name: "empty new",
|
||||
setup: func(t *testing.T) *Store {
|
||||
s, err := NewStore(logger.NewLogger(nil, false), sources)
|
||||
if err != nil {
|
||||
t.Fatalf("NewStore failed: %v", err)
|
||||
}
|
||||
return s
|
||||
},
|
||||
source: source1,
|
||||
newFlags: nil,
|
||||
wantFlags: map[string]model.Flag{},
|
||||
wantNotifs: map[string]interface{}{},
|
||||
},
|
||||
{
|
||||
name: "update from source 1 (old flag removed)",
|
||||
setup: func(t *testing.T) *Store {
|
||||
s, err := NewStore(logger.NewLogger(nil, false), sources)
|
||||
if err != nil {
|
||||
t.Fatalf("NewStore failed: %v", err)
|
||||
}
|
||||
s.Update(source1, map[string]model.Flag{
|
||||
"waka": {DefaultVariant: "off"},
|
||||
}, nil)
|
||||
return s
|
||||
},
|
||||
newFlags: map[string]model.Flag{
|
||||
"paka": {DefaultVariant: "on"},
|
||||
},
|
||||
source: source1,
|
||||
wantFlags: map[string]model.Flag{
|
||||
"paka": {Key: "paka", DefaultVariant: "on", Source: source1, FlagSetId: nilFlagSetId, Priority: 0},
|
||||
},
|
||||
wantNotifs: map[string]interface{}{
|
||||
"paka": map[string]interface{}{"type": "write"},
|
||||
"waka": map[string]interface{}{"type": "delete"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "update from source 1 (new flag added)",
|
||||
setup: func(t *testing.T) *Store {
|
||||
s, err := NewStore(logger.NewLogger(nil, false), sources)
|
||||
if err != nil {
|
||||
t.Fatalf("NewStore failed: %v", err)
|
||||
}
|
||||
s.Update(source1, map[string]model.Flag{
|
||||
"waka": {DefaultVariant: "off"},
|
||||
}, nil)
|
||||
return s
|
||||
},
|
||||
newFlags: map[string]model.Flag{
|
||||
"paka": {DefaultVariant: "on"},
|
||||
},
|
||||
source: source2,
|
||||
wantFlags: map[string]model.Flag{
|
||||
"waka": {Key: "waka", DefaultVariant: "off", Source: source1, FlagSetId: nilFlagSetId, Priority: 0},
|
||||
"paka": {Key: "paka", DefaultVariant: "on", Source: source2, FlagSetId: nilFlagSetId, Priority: 1},
|
||||
},
|
||||
wantNotifs: map[string]interface{}{"paka": map[string]interface{}{"type": "write"}},
|
||||
},
|
||||
{
|
||||
name: "flag set inheritance",
|
||||
setup: func(t *testing.T) *Store {
|
||||
s, err := NewStore(logger.NewLogger(nil, false), sources)
|
||||
if err != nil {
|
||||
t.Fatalf("NewStore failed: %v", err)
|
||||
}
|
||||
s.Update(source1, map[string]model.Flag{}, model.Metadata{})
|
||||
return s
|
||||
},
|
||||
setMetadata: model.Metadata{
|
||||
"flagSetId": "topLevelSet", // top level set metadata, including flagSetId
|
||||
},
|
||||
newFlags: map[string]model.Flag{
|
||||
"waka": {DefaultVariant: "on"},
|
||||
"paka": {DefaultVariant: "on", Metadata: model.Metadata{"flagSetId": "flagLevelSet"}}, // overrides set level flagSetId
|
||||
},
|
||||
source: source1,
|
||||
wantFlags: map[string]model.Flag{
|
||||
"waka": {Key: "waka", DefaultVariant: "on", Source: source1, FlagSetId: "topLevelSet", Priority: 0, Metadata: model.Metadata{"flagSetId": "topLevelSet"}},
|
||||
"paka": {Key: "paka", DefaultVariant: "on", Source: source1, FlagSetId: "flagLevelSet", Priority: 0, Metadata: model.Metadata{"flagSetId": "flagLevelSet"}},
|
||||
},
|
||||
wantNotifs: map[string]interface{}{
|
||||
"paka": map[string]interface{}{"type": "write"},
|
||||
"waka": map[string]interface{}{"type": "write"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
store := tt.setup(t)
|
||||
gotNotifs, resyncRequired := store.Update(tt.source, tt.newFlags, tt.setMetadata)
|
||||
gotFlags, _, _ := store.GetAll(context.Background(), nil)
|
||||
|
||||
require.Equal(t, tt.wantFlags, gotFlags)
|
||||
require.Equal(t, tt.wantNotifs, gotNotifs)
|
||||
require.Equal(t, tt.wantResync, resyncRequired)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGet(t *testing.T) {
|
||||
|
||||
sourceA := "sourceA"
|
||||
sourceB := "sourceB"
|
||||
sourceC := "sourceC"
|
||||
flagSetIdB := "flagSetIdA"
|
||||
flagSetIdC := "flagSetIdC"
|
||||
var sources = []string{sourceA, sourceB, sourceC}
|
||||
|
||||
sourceASelector := NewSelector("source=" + sourceA)
|
||||
flagSetIdCSelector := NewSelector("flagSetId=" + flagSetIdC)
|
||||
|
||||
t.Parallel()
|
||||
tests := []struct {
|
||||
name string
|
||||
key string
|
||||
selector *Selector
|
||||
wantFlag model.Flag
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "nil selector",
|
||||
key: "flagA",
|
||||
selector: nil,
|
||||
wantFlag: model.Flag{Key: "flagA", DefaultVariant: "off", Source: sourceA, FlagSetId: nilFlagSetId, Priority: 0},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "flagSetId selector",
|
||||
key: "dupe",
|
||||
selector: &flagSetIdCSelector,
|
||||
wantFlag: model.Flag{Key: "dupe", DefaultVariant: "off", Source: sourceC, FlagSetId: flagSetIdC, Priority: 2, Metadata: model.Metadata{"flagSetId": flagSetIdC}},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "source selector",
|
||||
key: "dupe",
|
||||
selector: &sourceASelector,
|
||||
wantFlag: model.Flag{Key: "dupe", DefaultVariant: "on", Source: sourceA, FlagSetId: nilFlagSetId, Priority: 0},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "flag not found with source selector",
|
||||
key: "flagB",
|
||||
selector: &sourceASelector,
|
||||
wantFlag: model.Flag{Key: "flagB", DefaultVariant: "off", Source: sourceB, FlagSetId: flagSetIdB, Priority: 1, Metadata: model.Metadata{"flagSetId": flagSetIdB}},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "flag not found with flagSetId selector",
|
||||
key: "flagB",
|
||||
selector: &flagSetIdCSelector,
|
||||
wantFlag: model.Flag{Key: "flagB", DefaultVariant: "off", Source: sourceB, FlagSetId: flagSetIdB, Priority: 1, Metadata: model.Metadata{"flagSetId": flagSetIdB}},
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
sourceAFlags := map[string]model.Flag{
|
||||
"flagA": {Key: "flagA", DefaultVariant: "off"},
|
||||
"dupe": {Key: "dupe", DefaultVariant: "on"},
|
||||
}
|
||||
sourceBFlags := map[string]model.Flag{
|
||||
"flagB": {Key: "flagB", DefaultVariant: "off", Metadata: model.Metadata{"flagSetId": flagSetIdB}},
|
||||
}
|
||||
sourceCFlags := map[string]model.Flag{
|
||||
"flagC": {Key: "flagC", DefaultVariant: "off", Metadata: model.Metadata{"flagSetId": flagSetIdC}},
|
||||
"dupe": {Key: "dupe", DefaultVariant: "off", Metadata: model.Metadata{"flagSetId": flagSetIdC}},
|
||||
}
|
||||
|
||||
store, err := NewStore(logger.NewLogger(nil, false), sources)
|
||||
if err != nil {
|
||||
t.Fatalf("NewStore failed: %v", err)
|
||||
}
|
||||
|
||||
store.Update(sourceA, sourceAFlags, nil)
|
||||
store.Update(sourceB, sourceBFlags, nil)
|
||||
store.Update(sourceC, sourceCFlags, nil)
|
||||
gotFlag, _, err := store.Get(context.Background(), tt.key, tt.selector)
|
||||
|
||||
if !tt.wantErr {
|
||||
require.Equal(t, tt.wantFlag, gotFlag)
|
||||
} else {
|
||||
require.Error(t, err, "expected an error for key %s with selector %v", tt.key, tt.selector)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetAllNoWatcher(t *testing.T) {
|
||||
|
||||
sourceA := "sourceA"
|
||||
sourceB := "sourceB"
|
||||
sourceC := "sourceC"
|
||||
flagSetIdB := "flagSetIdA"
|
||||
flagSetIdC := "flagSetIdC"
|
||||
sources := []string{sourceA, sourceB, sourceC}
|
||||
|
||||
sourceASelector := NewSelector("source=" + sourceA)
|
||||
flagSetIdCSelector := NewSelector("flagSetId=" + flagSetIdC)
|
||||
|
||||
t.Parallel()
|
||||
tests := []struct {
|
||||
name string
|
||||
selector *Selector
|
||||
wantFlags map[string]model.Flag
|
||||
}{
|
||||
{
|
||||
name: "nil selector",
|
||||
selector: nil,
|
||||
wantFlags: map[string]model.Flag{
|
||||
// "dupe" should be overwritten by higher priority flag
|
||||
"flagA": {Key: "flagA", DefaultVariant: "off", Source: sourceA, FlagSetId: nilFlagSetId, Priority: 0},
|
||||
"flagB": {Key: "flagB", DefaultVariant: "off", Source: sourceB, FlagSetId: flagSetIdB, Priority: 1, Metadata: model.Metadata{"flagSetId": flagSetIdB}},
|
||||
"flagC": {Key: "flagC", DefaultVariant: "off", Source: sourceC, FlagSetId: flagSetIdC, Priority: 2, Metadata: model.Metadata{"flagSetId": flagSetIdC}},
|
||||
"dupe": {Key: "dupe", DefaultVariant: "off", Source: sourceC, FlagSetId: flagSetIdC, Priority: 2, Metadata: model.Metadata{"flagSetId": flagSetIdC}},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "source selector",
|
||||
selector: &sourceASelector,
|
||||
wantFlags: map[string]model.Flag{
|
||||
// we should get the "dupe" from sourceA
|
||||
"flagA": {Key: "flagA", DefaultVariant: "off", Source: sourceA, FlagSetId: nilFlagSetId, Priority: 0},
|
||||
"dupe": {Key: "dupe", DefaultVariant: "on", Source: sourceA, FlagSetId: nilFlagSetId, Priority: 0},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "flagSetId selector",
|
||||
selector: &flagSetIdCSelector,
|
||||
wantFlags: map[string]model.Flag{
|
||||
// we should get the "dupe" from flagSetIdC
|
||||
"flagC": {Key: "flagC", DefaultVariant: "off", Source: sourceC, FlagSetId: flagSetIdC, Priority: 2, Metadata: model.Metadata{"flagSetId": flagSetIdC}},
|
||||
"dupe": {Key: "dupe", DefaultVariant: "off", Source: sourceC, FlagSetId: flagSetIdC, Priority: 2, Metadata: model.Metadata{"flagSetId": flagSetIdC}},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
sourceAFlags := map[string]model.Flag{
|
||||
"flagA": {Key: "flagA", DefaultVariant: "off"},
|
||||
"dupe": {Key: "dupe", DefaultVariant: "on"},
|
||||
}
|
||||
sourceBFlags := map[string]model.Flag{
|
||||
"flagB": {Key: "flagB", DefaultVariant: "off", Metadata: model.Metadata{"flagSetId": flagSetIdB}},
|
||||
}
|
||||
sourceCFlags := map[string]model.Flag{
|
||||
"flagC": {Key: "flagC", DefaultVariant: "off", Metadata: model.Metadata{"flagSetId": flagSetIdC}},
|
||||
"dupe": {Key: "dupe", DefaultVariant: "off", Metadata: model.Metadata{"flagSetId": flagSetIdC}},
|
||||
}
|
||||
|
||||
store, err := NewStore(logger.NewLogger(nil, false), sources)
|
||||
if err != nil {
|
||||
t.Fatalf("NewStore failed: %v", err)
|
||||
}
|
||||
|
||||
store.Update(sourceA, sourceAFlags, nil)
|
||||
store.Update(sourceB, sourceBFlags, nil)
|
||||
store.Update(sourceC, sourceCFlags, nil)
|
||||
gotFlags, _, _ := store.GetAll(context.Background(), tt.selector)
|
||||
|
||||
require.Equal(t, len(tt.wantFlags), len(gotFlags))
|
||||
require.Equal(t, tt.wantFlags, gotFlags)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestWatch(t *testing.T) {
|
||||
|
||||
sourceA := "sourceA"
|
||||
sourceB := "sourceB"
|
||||
sourceC := "sourceC"
|
||||
myFlagSetId := "myFlagSet"
|
||||
var sources = []string{sourceA, sourceB, sourceC}
|
||||
pauseTime := 100 * time.Millisecond // time for updates to settle
|
||||
timeout := 1000 * time.Millisecond // time to make sure we get enough updates, and no extras
|
||||
|
||||
sourceASelector := NewSelector("source=" + sourceA)
|
||||
flagSetIdCSelector := NewSelector("flagSetId=" + myFlagSetId)
|
||||
emptySelector := NewSelector("")
|
||||
sourceCSelector := NewSelector("source=" + sourceC)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
selector *Selector
|
||||
wantUpdates int
|
||||
}{
|
||||
{
|
||||
name: "flag source selector (initial, plus 1 update)",
|
||||
selector: &sourceASelector,
|
||||
wantUpdates: 2,
|
||||
},
|
||||
{
|
||||
name: "flag set selector (initial, plus 3 updates)",
|
||||
selector: &flagSetIdCSelector,
|
||||
wantUpdates: 4,
|
||||
},
|
||||
{
|
||||
name: "no selector (all updates)",
|
||||
selector: &emptySelector,
|
||||
wantUpdates: 5,
|
||||
},
|
||||
{
|
||||
name: "flag source selector for unchanged source (initial, plus no updates)",
|
||||
selector: &sourceCSelector,
|
||||
wantUpdates: 1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
sourceAFlags := map[string]model.Flag{
|
||||
"flagA": {Key: "flagA", DefaultVariant: "off"},
|
||||
}
|
||||
sourceBFlags := map[string]model.Flag{
|
||||
"flagB": {Key: "flagB", DefaultVariant: "off", Metadata: model.Metadata{"flagSetId": myFlagSetId}},
|
||||
}
|
||||
sourceCFlags := map[string]model.Flag{
|
||||
"flagC": {Key: "flagC", DefaultVariant: "off"},
|
||||
}
|
||||
|
||||
store, err := NewStore(logger.NewLogger(nil, false), sources)
|
||||
if err != nil {
|
||||
t.Fatalf("NewStore failed: %v", err)
|
||||
}
|
||||
|
||||
// setup initial flags
|
||||
store.Update(sourceA, sourceAFlags, model.Metadata{})
|
||||
store.Update(sourceB, sourceBFlags, model.Metadata{})
|
||||
store.Update(sourceC, sourceCFlags, model.Metadata{})
|
||||
watcher := make(chan FlagQueryResult, 1)
|
||||
time.Sleep(pauseTime)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
store.Watch(ctx, tt.selector, watcher)
|
||||
|
||||
// perform updates
|
||||
go func() {
|
||||
|
||||
time.Sleep(pauseTime)
|
||||
|
||||
// changing a flag default variant should trigger an update
|
||||
store.Update(sourceA, map[string]model.Flag{
|
||||
"flagA": {Key: "flagA", DefaultVariant: "on"},
|
||||
}, model.Metadata{})
|
||||
|
||||
time.Sleep(pauseTime)
|
||||
|
||||
// changing a flag default variant should trigger an update
|
||||
store.Update(sourceB, map[string]model.Flag{
|
||||
"flagB": {Key: "flagB", DefaultVariant: "on", Metadata: model.Metadata{"flagSetId": myFlagSetId}},
|
||||
}, model.Metadata{})
|
||||
|
||||
time.Sleep(pauseTime)
|
||||
|
||||
// removing a flag set id should trigger an update (even for flag set id selectors; it should remove the flag from the set)
|
||||
store.Update(sourceB, map[string]model.Flag{
|
||||
"flagB": {Key: "flagB", DefaultVariant: "on"},
|
||||
}, model.Metadata{})
|
||||
|
||||
time.Sleep(pauseTime)
|
||||
|
||||
// adding a flag set id should trigger an update
|
||||
store.Update(sourceB, map[string]model.Flag{
|
||||
"flagB": {Key: "flagB", DefaultVariant: "on", Metadata: model.Metadata{"flagSetId": myFlagSetId}},
|
||||
}, model.Metadata{})
|
||||
}()
|
||||
|
||||
updates := 0
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-time.After(timeout):
|
||||
assert.Equal(t, tt.wantUpdates, updates, "expected %d updates, got %d", tt.wantUpdates, updates)
|
||||
cancel()
|
||||
_, open := <-watcher
|
||||
assert.False(t, open, "watcher channel should be closed after cancel")
|
||||
return
|
||||
case q := <-watcher:
|
||||
if q.Flags != nil {
|
||||
updates++
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestQueryMetadata(t *testing.T) {
|
||||
|
||||
sourceA := "sourceA"
|
||||
otherSource := "otherSource"
|
||||
nonExistingFlagSetId := "nonExistingFlagSetId"
|
||||
var sources = []string{sourceA}
|
||||
sourceAFlags := map[string]model.Flag{
|
||||
"flagA": {Key: "flagA", DefaultVariant: "off"},
|
||||
"flagB": {Key: "flagB", DefaultVariant: "on"},
|
||||
}
|
||||
|
||||
store, err := NewStore(logger.NewLogger(nil, false), sources)
|
||||
if err != nil {
|
||||
t.Fatalf("NewStore failed: %v", err)
|
||||
}
|
||||
|
||||
// setup initial flags
|
||||
store.Update(sourceA, sourceAFlags, model.Metadata{})
|
||||
|
||||
selector := NewSelector("source=" + otherSource + ",flagSetId=" + nonExistingFlagSetId)
|
||||
_, metadata, _ := store.GetAll(context.Background(), &selector)
|
||||
assert.Equal(t, metadata, model.Metadata{"source": otherSource, "flagSetId": nonExistingFlagSetId}, "metadata did not match expected")
|
||||
|
||||
selector = NewSelector("source=" + otherSource + ",flagSetId=" + nonExistingFlagSetId)
|
||||
_, metadata, _ = store.Get(context.Background(), "key", &selector)
|
||||
assert.Equal(t, metadata, model.Metadata{"source": otherSource, "flagSetId": nonExistingFlagSetId}, "metadata did not match expected")
|
||||
}
|
|
@ -1,147 +0,0 @@
|
|||
package blob
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/open-feature/flagd/core/pkg/logger"
|
||||
"github.com/open-feature/flagd/core/pkg/sync"
|
||||
"github.com/open-feature/flagd/core/pkg/utils"
|
||||
"gocloud.dev/blob"
|
||||
_ "gocloud.dev/blob/azureblob" // needed to initialize Azure Blob Storage driver
|
||||
_ "gocloud.dev/blob/gcsblob" // needed to initialize GCS driver
|
||||
_ "gocloud.dev/blob/s3blob" // needed to initialize s3 driver
|
||||
)
|
||||
|
||||
type Sync struct {
|
||||
Bucket string
|
||||
Object string
|
||||
BlobURLMux *blob.URLMux
|
||||
Cron Cron
|
||||
Logger *logger.Logger
|
||||
Interval uint32
|
||||
ready bool
|
||||
lastUpdated time.Time
|
||||
}
|
||||
|
||||
// Cron defines the behaviour required of a cron
|
||||
type Cron interface {
|
||||
AddFunc(spec string, cmd func()) error
|
||||
Start()
|
||||
Stop()
|
||||
}
|
||||
|
||||
func (hs *Sync) Init(_ context.Context) error {
|
||||
if hs.Bucket == "" {
|
||||
return errors.New("no bucket string set")
|
||||
}
|
||||
if hs.Object == "" {
|
||||
return errors.New("no object string set")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (hs *Sync) IsReady() bool {
|
||||
return hs.ready
|
||||
}
|
||||
|
||||
func (hs *Sync) Sync(ctx context.Context, dataSync chan<- sync.DataSync) error {
|
||||
hs.Logger.Info(fmt.Sprintf("starting sync from %s/%s with interval %ds", hs.Bucket, hs.Object, hs.Interval))
|
||||
_ = hs.Cron.AddFunc(fmt.Sprintf("*/%d * * * *", hs.Interval), func() {
|
||||
err := hs.sync(ctx, dataSync, false)
|
||||
if err != nil {
|
||||
hs.Logger.Warn(fmt.Sprintf("sync failed: %v", err))
|
||||
}
|
||||
})
|
||||
// Initial fetch
|
||||
hs.Logger.Debug(fmt.Sprintf("initial sync of the %s/%s", hs.Bucket, hs.Object))
|
||||
err := hs.sync(ctx, dataSync, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
hs.ready = true
|
||||
hs.Cron.Start()
|
||||
<-ctx.Done()
|
||||
hs.Cron.Stop()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (hs *Sync) ReSync(ctx context.Context, dataSync chan<- sync.DataSync) error {
|
||||
return hs.sync(ctx, dataSync, true)
|
||||
}
|
||||
|
||||
func (hs *Sync) sync(ctx context.Context, dataSync chan<- sync.DataSync, skipCheckingModTime bool) error {
|
||||
bucket, err := hs.getBucket(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't get bucket: %v", err)
|
||||
}
|
||||
defer bucket.Close()
|
||||
var updated time.Time
|
||||
if !skipCheckingModTime {
|
||||
updated, err = hs.fetchObjectModificationTime(ctx, bucket)
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't get object attributes: %v", err)
|
||||
}
|
||||
if hs.lastUpdated.Equal(updated) {
|
||||
hs.Logger.Debug("configuration hasn't changed, skipping fetching full object")
|
||||
return nil
|
||||
}
|
||||
if hs.lastUpdated.After(updated) {
|
||||
hs.Logger.Warn("configuration changed but the modification time decreased instead of increasing")
|
||||
}
|
||||
}
|
||||
msg, err := hs.fetchObject(ctx, bucket)
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't get object: %v", err)
|
||||
}
|
||||
hs.Logger.Debug(fmt.Sprintf("configuration updated: %s", msg))
|
||||
if !skipCheckingModTime {
|
||||
hs.lastUpdated = updated
|
||||
}
|
||||
dataSync <- sync.DataSync{FlagData: msg, Source: hs.Bucket + hs.Object}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (hs *Sync) getBucket(ctx context.Context) (*blob.Bucket, error) {
|
||||
b, err := hs.BlobURLMux.OpenBucket(ctx, hs.Bucket)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error opening bucket %s: %v", hs.Bucket, err)
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (hs *Sync) fetchObjectModificationTime(ctx context.Context, bucket *blob.Bucket) (time.Time, error) {
|
||||
if hs.Object == "" {
|
||||
return time.Time{}, errors.New("no object string set")
|
||||
}
|
||||
attrs, err := bucket.Attributes(ctx, hs.Object)
|
||||
if err != nil {
|
||||
return time.Time{}, fmt.Errorf("error fetching attributes for object %s/%s: %w", hs.Bucket, hs.Object, err)
|
||||
}
|
||||
return attrs.ModTime, nil
|
||||
}
|
||||
|
||||
func (hs *Sync) fetchObject(ctx context.Context, bucket *blob.Bucket) (string, error) {
|
||||
r, err := bucket.NewReader(ctx, hs.Object, nil)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error opening reader for object %s/%s: %w", hs.Bucket, hs.Object, err)
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
data, err := io.ReadAll(r)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error downloading object %s/%s: %w", hs.Bucket, hs.Object, err)
|
||||
}
|
||||
|
||||
json, err := utils.ConvertToJSON(data, filepath.Ext(hs.Object), r.ContentType())
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error converting blob data to json: %w", err)
|
||||
}
|
||||
return json, nil
|
||||
}
|
|
@ -1,152 +0,0 @@
|
|||
package blob
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/open-feature/flagd/core/pkg/logger"
|
||||
"github.com/open-feature/flagd/core/pkg/sync"
|
||||
synctesting "github.com/open-feature/flagd/core/pkg/sync/testing"
|
||||
"go.uber.org/mock/gomock"
|
||||
)
|
||||
|
||||
func TestBlobSync(t *testing.T) {
|
||||
tests := map[string]struct {
|
||||
scheme string
|
||||
bucket string
|
||||
object string
|
||||
content string
|
||||
convertedContent string
|
||||
}{
|
||||
"json file type": {
|
||||
scheme: "xyz",
|
||||
bucket: "b",
|
||||
object: "flags.json",
|
||||
content: "{\"flags\":{}}",
|
||||
convertedContent: "{\"flags\":{}}",
|
||||
},
|
||||
"yaml file type": {
|
||||
scheme: "xyz",
|
||||
bucket: "b",
|
||||
object: "flags.yaml",
|
||||
content: "flags: []",
|
||||
convertedContent: "{\"flags\":[]}",
|
||||
},
|
||||
}
|
||||
|
||||
for name, tt := range tests {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
mockCron := synctesting.NewMockCron(ctrl)
|
||||
mockCron.EXPECT().AddFunc(gomock.Any(), gomock.Any()).DoAndReturn(func(spec string, cmd func()) error {
|
||||
return nil
|
||||
})
|
||||
mockCron.EXPECT().Start().Times(1)
|
||||
|
||||
blobSync := &Sync{
|
||||
Bucket: tt.scheme + "://" + tt.bucket,
|
||||
Object: tt.object,
|
||||
Cron: mockCron,
|
||||
Logger: logger.NewLogger(nil, false),
|
||||
}
|
||||
blobMock := NewMockBlob(tt.scheme, func() *Sync {
|
||||
return blobSync
|
||||
})
|
||||
blobSync.BlobURLMux = blobMock.URLMux()
|
||||
|
||||
ctx := context.Background()
|
||||
dataSyncChan := make(chan sync.DataSync, 1)
|
||||
|
||||
blobMock.AddObject(tt.object, tt.content)
|
||||
|
||||
go func() {
|
||||
err := blobSync.Sync(ctx, dataSyncChan)
|
||||
if err != nil {
|
||||
log.Fatalf("Error start sync: %s", err.Error())
|
||||
return
|
||||
}
|
||||
}()
|
||||
|
||||
data := <-dataSyncChan // initial sync
|
||||
if data.FlagData != tt.convertedContent {
|
||||
t.Errorf("expected content: %s, but received content: %s", tt.convertedContent, data.FlagData)
|
||||
}
|
||||
tickWithConfigChange(t, mockCron, dataSyncChan, blobMock, tt.object, tt.convertedContent)
|
||||
tickWithoutConfigChange(t, mockCron, dataSyncChan)
|
||||
tickWithConfigChange(t, mockCron, dataSyncChan, blobMock, tt.object, tt.convertedContent)
|
||||
tickWithoutConfigChange(t, mockCron, dataSyncChan)
|
||||
tickWithoutConfigChange(t, mockCron, dataSyncChan)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func tickWithConfigChange(t *testing.T, mockCron *synctesting.MockCron, dataSyncChan chan sync.DataSync, blobMock *MockBlob, object string, newConfig string) {
|
||||
time.Sleep(1 * time.Millisecond) // sleep so the new file has different modification date
|
||||
blobMock.AddObject(object, newConfig)
|
||||
mockCron.Tick()
|
||||
select {
|
||||
case data, ok := <-dataSyncChan:
|
||||
if ok {
|
||||
if data.FlagData != newConfig {
|
||||
t.Errorf("expected content: %s, but received content: %s", newConfig, data.FlagData)
|
||||
}
|
||||
} else {
|
||||
t.Errorf("data channel unexpectedly closed")
|
||||
}
|
||||
default:
|
||||
t.Errorf("data channel has no expected update")
|
||||
}
|
||||
}
|
||||
|
||||
func tickWithoutConfigChange(t *testing.T, mockCron *synctesting.MockCron, dataSyncChan chan sync.DataSync) {
|
||||
mockCron.Tick()
|
||||
select {
|
||||
case data, ok := <-dataSyncChan:
|
||||
if ok {
|
||||
t.Errorf("unexpected update: %s", data.FlagData)
|
||||
} else {
|
||||
t.Errorf("data channel unexpectedly closed")
|
||||
}
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
func TestReSync(t *testing.T) {
|
||||
const (
|
||||
scheme = "xyz"
|
||||
bucket = "b"
|
||||
object = "flags.json"
|
||||
)
|
||||
ctrl := gomock.NewController(t)
|
||||
mockCron := synctesting.NewMockCron(ctrl)
|
||||
|
||||
blobSync := &Sync{
|
||||
Bucket: scheme + "://" + bucket,
|
||||
Object: object,
|
||||
Cron: mockCron,
|
||||
Logger: logger.NewLogger(nil, false),
|
||||
}
|
||||
blobMock := NewMockBlob(scheme, func() *Sync {
|
||||
return blobSync
|
||||
})
|
||||
blobSync.BlobURLMux = blobMock.URLMux()
|
||||
|
||||
ctx := context.Background()
|
||||
dataSyncChan := make(chan sync.DataSync, 1)
|
||||
|
||||
config := "my-config"
|
||||
blobMock.AddObject(object, config)
|
||||
|
||||
err := blobSync.ReSync(ctx, dataSyncChan)
|
||||
if err != nil {
|
||||
log.Fatalf("Error start sync: %s", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
data := <-dataSyncChan
|
||||
if data.FlagData != config {
|
||||
t.Errorf("expected content: %s, but received content: %s", config, data.FlagData)
|
||||
}
|
||||
}
|
|
@ -1,72 +0,0 @@
|
|||
package blob
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"net/url"
|
||||
|
||||
"gocloud.dev/blob"
|
||||
"gocloud.dev/blob/memblob"
|
||||
)
|
||||
|
||||
type MockBlob struct {
|
||||
mux *blob.URLMux
|
||||
scheme string
|
||||
opener *fakeOpener
|
||||
}
|
||||
|
||||
type fakeOpener struct {
|
||||
object string
|
||||
content string
|
||||
keepModTime bool
|
||||
getSync func() *Sync
|
||||
}
|
||||
|
||||
func (f *fakeOpener) OpenBucketURL(ctx context.Context, _ *url.URL) (*blob.Bucket, error) {
|
||||
bucketURL, err := url.Parse("mem://")
|
||||
if err != nil {
|
||||
log.Fatalf("couldn't parse url: %s: %v", "mem://", err)
|
||||
}
|
||||
opener := &memblob.URLOpener{}
|
||||
bucket, err := opener.OpenBucketURL(ctx, bucketURL)
|
||||
if err != nil {
|
||||
log.Fatalf("couldn't open in memory bucket: %v", err)
|
||||
}
|
||||
if f.object != "" {
|
||||
err = bucket.WriteAll(ctx, f.object, []byte(f.content), nil)
|
||||
if err != nil {
|
||||
log.Fatalf("couldn't write in memory file: %v", err)
|
||||
}
|
||||
}
|
||||
if f.keepModTime && f.object != "" {
|
||||
attrs, err := bucket.Attributes(ctx, f.object)
|
||||
if err != nil {
|
||||
log.Fatalf("couldn't get memory file attributes: %v", err)
|
||||
}
|
||||
f.getSync().lastUpdated = attrs.ModTime
|
||||
} else {
|
||||
f.keepModTime = true
|
||||
}
|
||||
return bucket, nil
|
||||
}
|
||||
|
||||
func NewMockBlob(scheme string, getSync func() *Sync) *MockBlob {
|
||||
mux := new(blob.URLMux)
|
||||
opener := &fakeOpener{getSync: getSync}
|
||||
mux.RegisterBucket(scheme, opener)
|
||||
return &MockBlob{
|
||||
mux: mux,
|
||||
scheme: scheme,
|
||||
opener: opener,
|
||||
}
|
||||
}
|
||||
|
||||
func (mb *MockBlob) URLMux() *blob.URLMux {
|
||||
return mb.mux
|
||||
}
|
||||
|
||||
func (mb *MockBlob) AddObject(object, content string) {
|
||||
mb.opener.object = object
|
||||
mb.opener.content = content
|
||||
mb.opener.keepModTime = false
|
||||
}
|
|
@ -5,11 +5,11 @@ import (
|
|||
"net/http"
|
||||
"os"
|
||||
"regexp"
|
||||
msync "sync"
|
||||
"time"
|
||||
|
||||
"github.com/open-feature/flagd/core/pkg/logger"
|
||||
"github.com/open-feature/flagd/core/pkg/sync"
|
||||
blobSync "github.com/open-feature/flagd/core/pkg/sync/blob"
|
||||
"github.com/open-feature/flagd/core/pkg/sync/file"
|
||||
"github.com/open-feature/flagd/core/pkg/sync/grpc"
|
||||
"github.com/open-feature/flagd/core/pkg/sync/grpc/credentials"
|
||||
|
@ -17,7 +17,6 @@ import (
|
|||
"github.com/open-feature/flagd/core/pkg/sync/kubernetes"
|
||||
"github.com/robfig/cron"
|
||||
"go.uber.org/zap"
|
||||
"gocloud.dev/blob"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
|
@ -25,26 +24,17 @@ import (
|
|||
|
||||
const (
|
||||
syncProviderFile = "file"
|
||||
syncProviderFsNotify = "fsnotify"
|
||||
syncProviderFileInfo = "fileinfo"
|
||||
syncProviderGrpc = "grpc"
|
||||
syncProviderKubernetes = "kubernetes"
|
||||
syncProviderHTTP = "http"
|
||||
syncProviderGcs = "gcs"
|
||||
syncProviderAzblob = "azblob"
|
||||
syncProviderS3 = "s3"
|
||||
)
|
||||
|
||||
var (
|
||||
regCrd *regexp.Regexp
|
||||
regURL *regexp.Regexp
|
||||
regGRPC *regexp.Regexp
|
||||
regGRPCSecure *regexp.Regexp
|
||||
regGRPCCustomResolver *regexp.Regexp
|
||||
regFile *regexp.Regexp
|
||||
regGcs *regexp.Regexp
|
||||
regAzblob *regexp.Regexp
|
||||
regS3 *regexp.Regexp
|
||||
regCrd *regexp.Regexp
|
||||
regURL *regexp.Regexp
|
||||
regGRPC *regexp.Regexp
|
||||
regGRPCSecure *regexp.Regexp
|
||||
regFile *regexp.Regexp
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
@ -52,11 +42,7 @@ func init() {
|
|||
regURL = regexp.MustCompile("^https?://")
|
||||
regGRPC = regexp.MustCompile("^" + grpc.Prefix)
|
||||
regGRPCSecure = regexp.MustCompile("^" + grpc.PrefixSecure)
|
||||
regGRPCCustomResolver = regexp.MustCompile("^" + grpc.SupportedScheme)
|
||||
regFile = regexp.MustCompile("^file:")
|
||||
regGcs = regexp.MustCompile("^gs://.+?/")
|
||||
regAzblob = regexp.MustCompile("^azblob://.+?/")
|
||||
regS3 = regexp.MustCompile("^s3://.+?/")
|
||||
}
|
||||
|
||||
type ISyncBuilder interface {
|
||||
|
@ -100,13 +86,8 @@ func (sb *SyncBuilder) SyncsFromConfig(sourceConfigs []sync.SourceConfig, logger
|
|||
func (sb *SyncBuilder) syncFromConfig(sourceConfig sync.SourceConfig, logger *logger.Logger) (sync.ISync, error) {
|
||||
switch sourceConfig.Provider {
|
||||
case syncProviderFile:
|
||||
logger.Debug(fmt.Sprintf("using filepath sync-provider for: %q", sourceConfig.URI))
|
||||
return sb.newFile(sourceConfig.URI, logger), nil
|
||||
case syncProviderFsNotify:
|
||||
logger.Debug(fmt.Sprintf("using fsnotify sync-provider for: %q", sourceConfig.URI))
|
||||
return sb.newFsNotify(sourceConfig.URI, logger), nil
|
||||
case syncProviderFileInfo:
|
||||
logger.Debug(fmt.Sprintf("using fileinfo sync-provider for: %q", sourceConfig.URI))
|
||||
return sb.newFileInfo(sourceConfig.URI, logger), nil
|
||||
case syncProviderKubernetes:
|
||||
logger.Debug(fmt.Sprintf("using kubernetes sync-provider for: %s", sourceConfig.URI))
|
||||
return sb.newK8s(sourceConfig.URI, logger)
|
||||
|
@ -116,60 +97,24 @@ func (sb *SyncBuilder) syncFromConfig(sourceConfig sync.SourceConfig, logger *lo
|
|||
case syncProviderGrpc:
|
||||
logger.Debug(fmt.Sprintf("using grpc sync-provider for: %s", sourceConfig.URI))
|
||||
return sb.newGRPC(sourceConfig, logger), nil
|
||||
case syncProviderGcs:
|
||||
logger.Debug(fmt.Sprintf("using blob sync-provider with gcs driver for: %s", sourceConfig.URI))
|
||||
return sb.newGcs(sourceConfig, logger), nil
|
||||
case syncProviderAzblob:
|
||||
logger.Debug(fmt.Sprintf("using blob sync-provider with azblob driver for: %s", sourceConfig.URI))
|
||||
return sb.newAzblob(sourceConfig, logger)
|
||||
case syncProviderS3:
|
||||
logger.Debug(fmt.Sprintf("using blob sync-provider with s3 driver for: %s", sourceConfig.URI))
|
||||
return sb.newS3(sourceConfig, logger), nil
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("invalid sync provider: %s, must be one of with "+
|
||||
"'%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s' or '%s'",
|
||||
sourceConfig.Provider, syncProviderFile, syncProviderFsNotify, syncProviderFileInfo,
|
||||
syncProviderKubernetes, syncProviderHTTP, syncProviderGrpc, syncProviderGcs, syncProviderAzblob, syncProviderS3)
|
||||
return nil, fmt.Errorf("invalid sync provider: %s, must be one of with '%s', '%s', '%s' or '%s'",
|
||||
sourceConfig.Provider, syncProviderFile, syncProviderKubernetes, syncProviderHTTP, syncProviderKubernetes)
|
||||
}
|
||||
}
|
||||
|
||||
// newFile returns an fsinfo sync if we are in k8s or fileinfo if not
|
||||
func (sb *SyncBuilder) newFile(uri string, logger *logger.Logger) *file.Sync {
|
||||
switch os.Getenv("KUBERNETES_SERVICE_HOST") {
|
||||
case "":
|
||||
// no k8s service host env; use fileinfo
|
||||
return sb.newFileInfo(uri, logger)
|
||||
default:
|
||||
// default to fsnotify
|
||||
return sb.newFsNotify(uri, logger)
|
||||
return &file.Sync{
|
||||
URI: regFile.ReplaceAllString(uri, ""),
|
||||
Logger: logger.WithFields(
|
||||
zap.String("component", "sync"),
|
||||
zap.String("sync", "filepath"),
|
||||
),
|
||||
Mux: &msync.RWMutex{},
|
||||
}
|
||||
}
|
||||
|
||||
// return a new file.Sync that uses fsnotify under the hood
|
||||
func (sb *SyncBuilder) newFsNotify(uri string, logger *logger.Logger) *file.Sync {
|
||||
return file.NewFileSync(
|
||||
regFile.ReplaceAllString(uri, ""),
|
||||
file.FSNOTIFY,
|
||||
logger.WithFields(
|
||||
zap.String("component", "sync"),
|
||||
zap.String("sync", syncProviderFsNotify),
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
// return a new file.Sync that uses os.Stat/fs.FileInfo under the hood
|
||||
func (sb *SyncBuilder) newFileInfo(uri string, logger *logger.Logger) *file.Sync {
|
||||
return file.NewFileSync(
|
||||
regFile.ReplaceAllString(uri, ""),
|
||||
file.FILEINFO,
|
||||
logger.WithFields(
|
||||
zap.String("component", "sync"),
|
||||
zap.String("sync", syncProviderFileInfo),
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
func (sb *SyncBuilder) newK8s(uri string, logger *logger.Logger) (*kubernetes.Sync, error) {
|
||||
dynamicClient, err := sb.k8sClientBuilder.GetK8sClient()
|
||||
if err != nil {
|
||||
|
@ -221,100 +166,6 @@ func (sb *SyncBuilder) newGRPC(config sync.SourceConfig, logger *logger.Logger)
|
|||
ProviderID: config.ProviderID,
|
||||
Secure: config.TLS,
|
||||
Selector: config.Selector,
|
||||
MaxMsgSize: config.MaxMsgSize,
|
||||
}
|
||||
}
|
||||
|
||||
func (sb *SyncBuilder) newGcs(config sync.SourceConfig, logger *logger.Logger) *blobSync.Sync {
|
||||
// Extract bucket uri and object name from the full URI:
|
||||
// gs://bucket/path/to/object results in gs://bucket/ as bucketUri and
|
||||
// path/to/object as an object name.
|
||||
bucketURI := regGcs.FindString(config.URI)
|
||||
objectName := regGcs.ReplaceAllString(config.URI, "")
|
||||
|
||||
// Defaults to 5 seconds if interval is not set.
|
||||
var interval uint32 = 5
|
||||
if config.Interval != 0 {
|
||||
interval = config.Interval
|
||||
}
|
||||
|
||||
return &blobSync.Sync{
|
||||
Bucket: bucketURI,
|
||||
Object: objectName,
|
||||
|
||||
BlobURLMux: blob.DefaultURLMux(),
|
||||
|
||||
Logger: logger.WithFields(
|
||||
zap.String("component", "sync"),
|
||||
zap.String("sync", "gcs"),
|
||||
),
|
||||
Interval: interval,
|
||||
Cron: cron.New(),
|
||||
}
|
||||
}
|
||||
|
||||
func (sb *SyncBuilder) newAzblob(config sync.SourceConfig, logger *logger.Logger) (*blobSync.Sync, error) {
|
||||
// Required to generate the azblob service URL
|
||||
storageAccountName := os.Getenv("AZURE_STORAGE_ACCOUNT")
|
||||
if storageAccountName == "" {
|
||||
return nil, fmt.Errorf("environment variable AZURE_STORAGE_ACCOUNT not set or is blank")
|
||||
}
|
||||
if regexp.MustCompile(`\s`).MatchString(storageAccountName) {
|
||||
return nil, fmt.Errorf("environment variable AZURE_STORAGE_ACCOUNT contains whitespace")
|
||||
}
|
||||
|
||||
// Extract bucket uri and object name from the full URI:
|
||||
// azblob://bucket/path/to/object results in azblob://bucket/ as bucketUri and
|
||||
// path/to/object as an object name.
|
||||
bucketURI := regAzblob.FindString(config.URI)
|
||||
objectName := regAzblob.ReplaceAllString(config.URI, "")
|
||||
|
||||
// Defaults to 5 seconds if interval is not set.
|
||||
var interval uint32 = 5
|
||||
if config.Interval != 0 {
|
||||
interval = config.Interval
|
||||
}
|
||||
|
||||
return &blobSync.Sync{
|
||||
Bucket: bucketURI,
|
||||
Object: objectName,
|
||||
|
||||
BlobURLMux: blob.DefaultURLMux(),
|
||||
|
||||
Logger: logger.WithFields(
|
||||
zap.String("component", "sync"),
|
||||
zap.String("sync", "azblob"),
|
||||
),
|
||||
Interval: interval,
|
||||
Cron: cron.New(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (sb *SyncBuilder) newS3(config sync.SourceConfig, logger *logger.Logger) *blobSync.Sync {
|
||||
// Extract bucket uri and object name from the full URI:
|
||||
// gs://bucket/path/to/object results in gs://bucket/ as bucketUri and
|
||||
// path/to/object as an object name.
|
||||
bucketURI := regS3.FindString(config.URI)
|
||||
objectName := regS3.ReplaceAllString(config.URI, "")
|
||||
|
||||
// Defaults to 5 seconds if interval is not set.
|
||||
var interval uint32 = 5
|
||||
if config.Interval != 0 {
|
||||
interval = config.Interval
|
||||
}
|
||||
|
||||
return &blobSync.Sync{
|
||||
Bucket: bucketURI,
|
||||
Object: objectName,
|
||||
|
||||
BlobURLMux: blob.DefaultURLMux(),
|
||||
|
||||
Logger: logger.WithFields(
|
||||
zap.String("component", "sync"),
|
||||
zap.String("sync", "s3"),
|
||||
),
|
||||
Interval: interval,
|
||||
Cron: cron.New(),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -6,7 +6,6 @@ import (
|
|||
|
||||
"github.com/open-feature/flagd/core/pkg/logger"
|
||||
"github.com/open-feature/flagd/core/pkg/sync"
|
||||
"github.com/open-feature/flagd/core/pkg/sync/blob"
|
||||
buildermock "github.com/open-feature/flagd/core/pkg/sync/builder/mock"
|
||||
"github.com/open-feature/flagd/core/pkg/sync/file"
|
||||
"github.com/open-feature/flagd/core/pkg/sync/grpc"
|
||||
|
@ -174,31 +173,9 @@ func Test_SyncsFromFromConfig(t *testing.T) {
|
|||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "grpc-with-msg-size",
|
||||
args: args{
|
||||
logger: lg,
|
||||
sources: []sync.SourceConfig{
|
||||
{
|
||||
URI: "grpc://host:port",
|
||||
Provider: syncProviderGrpc,
|
||||
ProviderID: "myapp",
|
||||
CertPath: "/tmp/ca.cert",
|
||||
Selector: "source=database",
|
||||
MaxMsgSize: 10,
|
||||
},
|
||||
},
|
||||
},
|
||||
wantSyncs: []sync.ISync{
|
||||
&grpc.Sync{},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "combined",
|
||||
injectFunc: func(builder *SyncBuilder) {
|
||||
t.Setenv("AZURE_STORAGE_ACCOUNT", "myaccount")
|
||||
|
||||
ctrl := gomock.NewController(t)
|
||||
|
||||
mockClientBuilder := buildermock.NewMockIK8sClientBuilder(ctrl)
|
||||
|
@ -234,18 +211,6 @@ func Test_SyncsFromFromConfig(t *testing.T) {
|
|||
URI: "my-namespace/my-flags",
|
||||
Provider: syncProviderKubernetes,
|
||||
},
|
||||
{
|
||||
URI: "gs://bucket/path/to/file",
|
||||
Provider: syncProviderGcs,
|
||||
},
|
||||
{
|
||||
URI: "azblob://bucket/path/to/file",
|
||||
Provider: syncProviderAzblob,
|
||||
},
|
||||
{
|
||||
URI: "s3://bucket/path/to/file",
|
||||
Provider: syncProviderS3,
|
||||
},
|
||||
},
|
||||
},
|
||||
wantSyncs: []sync.ISync{
|
||||
|
@ -254,9 +219,6 @@ func Test_SyncsFromFromConfig(t *testing.T) {
|
|||
&http.Sync{},
|
||||
&file.Sync{},
|
||||
&kubernetes.Sync{},
|
||||
&blob.Sync{},
|
||||
&blob.Sync{},
|
||||
&blob.Sync{},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
|
@ -282,198 +244,3 @@ func Test_SyncsFromFromConfig(t *testing.T) {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_GcsConfig(t *testing.T) {
|
||||
lg := logger.NewLogger(nil, false)
|
||||
defaultInterval := uint32(5)
|
||||
tests := []struct {
|
||||
name string
|
||||
uri string
|
||||
interval uint32
|
||||
expectedBucket string
|
||||
expectedObject string
|
||||
expectedInterval uint32
|
||||
}{
|
||||
{
|
||||
name: "simple path",
|
||||
uri: "gs://bucket/path/to/object",
|
||||
interval: 10,
|
||||
expectedBucket: "gs://bucket/",
|
||||
expectedObject: "path/to/object",
|
||||
expectedInterval: 10,
|
||||
},
|
||||
{
|
||||
name: "default interval",
|
||||
uri: "gs://bucket/path/to/object",
|
||||
expectedBucket: "gs://bucket/",
|
||||
expectedObject: "path/to/object",
|
||||
expectedInterval: defaultInterval,
|
||||
},
|
||||
{
|
||||
name: "no object set", // Blob syncer will return error when fetching
|
||||
uri: "gs://bucket/",
|
||||
expectedBucket: "gs://bucket/",
|
||||
expectedObject: "",
|
||||
expectedInterval: defaultInterval,
|
||||
},
|
||||
{
|
||||
name: "malformed uri", // Blob syncer will return error when opening bucket
|
||||
uri: "malformed",
|
||||
expectedBucket: "",
|
||||
expectedObject: "malformed",
|
||||
expectedInterval: defaultInterval,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
gcsSync := NewSyncBuilder().newGcs(sync.SourceConfig{
|
||||
URI: tt.uri,
|
||||
Interval: tt.interval,
|
||||
}, lg)
|
||||
require.Equal(t, tt.expectedBucket, gcsSync.Bucket)
|
||||
require.Equal(t, tt.expectedObject, gcsSync.Object)
|
||||
require.Equal(t, int(tt.expectedInterval), int(gcsSync.Interval))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_AzblobConfig(t *testing.T) {
|
||||
lg := logger.NewLogger(nil, false)
|
||||
defaultInterval := uint32(5)
|
||||
tests := []struct {
|
||||
name string
|
||||
uri string
|
||||
interval uint32
|
||||
storageAccount string
|
||||
expectedBucket string
|
||||
expectedObject string
|
||||
expectedInterval uint32
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "simple path",
|
||||
uri: "azblob://bucket/path/to/object",
|
||||
interval: 10,
|
||||
storageAccount: "myaccount",
|
||||
expectedBucket: "azblob://bucket/",
|
||||
expectedObject: "path/to/object",
|
||||
expectedInterval: 10,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "default interval",
|
||||
uri: "azblob://bucket/path/to/object",
|
||||
storageAccount: "myaccount",
|
||||
expectedBucket: "azblob://bucket/",
|
||||
expectedObject: "path/to/object",
|
||||
expectedInterval: defaultInterval,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "no object set", // Blob syncer will return error when fetching
|
||||
uri: "azblob://bucket/",
|
||||
storageAccount: "myaccount",
|
||||
expectedBucket: "azblob://bucket/",
|
||||
expectedObject: "",
|
||||
expectedInterval: defaultInterval,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "malformed uri", // Blob syncer will return error when opening bucket
|
||||
uri: "malformed",
|
||||
storageAccount: "myaccount",
|
||||
expectedBucket: "",
|
||||
expectedObject: "malformed",
|
||||
expectedInterval: defaultInterval,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "storage account not set", // Sync builder will fail and return error
|
||||
uri: "azblob://bucket/path/to/object",
|
||||
storageAccount: "",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "storage account contains whitespace", // Sync builder will fail and return error
|
||||
uri: "azblob://bucket/path/to/object",
|
||||
storageAccount: "my account",
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Setenv("AZURE_STORAGE_ACCOUNT", tt.storageAccount)
|
||||
azblobSync, err := NewSyncBuilder().newAzblob(sync.SourceConfig{
|
||||
URI: tt.uri,
|
||||
Interval: tt.interval,
|
||||
}, lg)
|
||||
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("newAzblob() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
|
||||
if (err != nil) && (tt.wantErr == true) {
|
||||
return
|
||||
}
|
||||
|
||||
require.Equal(t, tt.expectedBucket, azblobSync.Bucket)
|
||||
require.Equal(t, tt.expectedObject, azblobSync.Object)
|
||||
require.Equal(t, int(tt.expectedInterval), int(azblobSync.Interval))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_S3Config(t *testing.T) {
|
||||
lg := logger.NewLogger(nil, false)
|
||||
defaultInterval := uint32(5)
|
||||
tests := []struct {
|
||||
name string
|
||||
uri string
|
||||
interval uint32
|
||||
expectedBucket string
|
||||
expectedObject string
|
||||
expectedInterval uint32
|
||||
}{
|
||||
{
|
||||
name: "simple path",
|
||||
uri: "s3://bucket/path/to/object",
|
||||
interval: 10,
|
||||
expectedBucket: "s3://bucket/",
|
||||
expectedObject: "path/to/object",
|
||||
expectedInterval: 10,
|
||||
},
|
||||
{
|
||||
name: "default interval",
|
||||
uri: "s3://bucket/path/to/object",
|
||||
expectedBucket: "s3://bucket/",
|
||||
expectedObject: "path/to/object",
|
||||
expectedInterval: defaultInterval,
|
||||
},
|
||||
{
|
||||
name: "no object set", // Blob syncer will return error when fetching
|
||||
uri: "s3://bucket/",
|
||||
expectedBucket: "s3://bucket/",
|
||||
expectedObject: "",
|
||||
expectedInterval: defaultInterval,
|
||||
},
|
||||
{
|
||||
name: "malformed uri", // Blob syncer will return error when opening bucket
|
||||
uri: "malformed",
|
||||
expectedBucket: "",
|
||||
expectedObject: "malformed",
|
||||
expectedInterval: defaultInterval,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
s3Sync := NewSyncBuilder().newS3(sync.SourceConfig{
|
||||
URI: tt.uri,
|
||||
Interval: tt.interval,
|
||||
}, lg)
|
||||
require.Equal(t, tt.expectedBucket, s3Sync.Bucket)
|
||||
require.Equal(t, tt.expectedObject, s3Sync.Object)
|
||||
require.Equal(t, int(tt.expectedInterval), int(s3Sync.Interval))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -64,29 +64,9 @@ func ParseSyncProviderURIs(uris []string) ([]sync.SourceConfig, error) {
|
|||
Provider: syncProviderGrpc,
|
||||
TLS: true,
|
||||
})
|
||||
case regGRPCCustomResolver.Match(uriB):
|
||||
syncProvidersParsed = append(syncProvidersParsed, sync.SourceConfig{
|
||||
URI: uri,
|
||||
Provider: syncProviderGrpc,
|
||||
})
|
||||
case regGcs.Match(uriB):
|
||||
syncProvidersParsed = append(syncProvidersParsed, sync.SourceConfig{
|
||||
URI: uri,
|
||||
Provider: syncProviderGcs,
|
||||
})
|
||||
case regAzblob.Match(uriB):
|
||||
syncProvidersParsed = append(syncProvidersParsed, sync.SourceConfig{
|
||||
URI: uri,
|
||||
Provider: syncProviderAzblob,
|
||||
})
|
||||
case regS3.Match(uriB):
|
||||
syncProvidersParsed = append(syncProvidersParsed, sync.SourceConfig{
|
||||
URI: uri,
|
||||
Provider: syncProviderS3,
|
||||
})
|
||||
default:
|
||||
return syncProvidersParsed, fmt.Errorf("invalid sync uri argument: %s, must start with 'file:', "+
|
||||
"'http(s)://', 'grpc(s)://', 'gs://', 'azblob://' or 'core.openfeature.dev'", uri)
|
||||
"'http(s)://', 'grpc(s)://', or 'core.openfeature.dev'", uri)
|
||||
}
|
||||
}
|
||||
return syncProvidersParsed, nil
|
||||
|
|
|
@ -28,10 +28,7 @@ func TestParseSource(t *testing.T) {
|
|||
{"uri":"config/samples/example_flags.json","provider":"file"},
|
||||
{"uri":"http://test.com","provider":"http","bearerToken":":)"},
|
||||
{"uri":"host:port","provider":"grpc"},
|
||||
{"uri":"default/my-crd","provider":"kubernetes"},
|
||||
{"uri":"gs://bucket-name/path/to/file","provider":"gcs"},
|
||||
{"uri":"azblob://bucket-name/path/to/file","provider":"azblob"},
|
||||
{"uri":"s3://bucket-name/path/to/file","provider":"s3"}
|
||||
{"uri":"default/my-crd","provider":"kubernetes"}
|
||||
]`,
|
||||
expectErr: false,
|
||||
out: []sync.SourceConfig{
|
||||
|
@ -52,18 +49,6 @@ func TestParseSource(t *testing.T) {
|
|||
URI: "default/my-crd",
|
||||
Provider: syncProviderKubernetes,
|
||||
},
|
||||
{
|
||||
URI: "gs://bucket-name/path/to/file",
|
||||
Provider: syncProviderGcs,
|
||||
},
|
||||
{
|
||||
URI: "azblob://bucket-name/path/to/file",
|
||||
Provider: syncProviderAzblob,
|
||||
},
|
||||
{
|
||||
URI: "s3://bucket-name/path/to/file",
|
||||
Provider: syncProviderS3,
|
||||
},
|
||||
},
|
||||
},
|
||||
"multiple-syncs-with-options": {
|
||||
|
@ -197,9 +182,6 @@ func TestParseSyncProviderURIs(t *testing.T) {
|
|||
"grpc://host:port",
|
||||
"grpcs://secure-grpc",
|
||||
"core.openfeature.dev/default/my-crd",
|
||||
"gs://bucket-name/path/to/file",
|
||||
"azblob://bucket-name/path/to/file",
|
||||
"s3://bucket-name/path/to/file",
|
||||
},
|
||||
expectErr: false,
|
||||
out: []sync.SourceConfig{
|
||||
|
@ -225,18 +207,6 @@ func TestParseSyncProviderURIs(t *testing.T) {
|
|||
URI: "default/my-crd",
|
||||
Provider: "kubernetes",
|
||||
},
|
||||
{
|
||||
URI: "gs://bucket-name/path/to/file",
|
||||
Provider: syncProviderGcs,
|
||||
},
|
||||
{
|
||||
URI: "azblob://bucket-name/path/to/file",
|
||||
Provider: syncProviderAzblob,
|
||||
},
|
||||
{
|
||||
URI: "s3://bucket-name/path/to/file",
|
||||
Provider: syncProviderS3,
|
||||
},
|
||||
},
|
||||
},
|
||||
"empty": {
|
||||
|
|
|
@ -1,202 +0,0 @@
|
|||
package file
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/fsnotify/fsnotify"
|
||||
"github.com/open-feature/flagd/core/pkg/logger"
|
||||
)
|
||||
|
||||
// Implements file.Watcher using a timer and os.FileInfo
|
||||
type fileInfoWatcher struct {
|
||||
// Event Chan
|
||||
evChan chan fsnotify.Event
|
||||
// Errors Chan
|
||||
erChan chan error
|
||||
// logger
|
||||
logger *logger.Logger
|
||||
// Func to wrap os.Stat (injection point for test helpers)
|
||||
statFunc func(string) (fs.FileInfo, error)
|
||||
// thread-safe interface to underlying files we are watching
|
||||
mu sync.RWMutex
|
||||
watches map[string]fs.FileInfo // filename -> info
|
||||
}
|
||||
|
||||
// NewFsNotifyWatcher returns a new fsNotifyWatcher
|
||||
func NewFileInfoWatcher(ctx context.Context, logger *logger.Logger) Watcher {
|
||||
fiw := &fileInfoWatcher{
|
||||
evChan: make(chan fsnotify.Event, 32),
|
||||
erChan: make(chan error, 32),
|
||||
statFunc: getFileInfo,
|
||||
logger: logger,
|
||||
watches: make(map[string]fs.FileInfo),
|
||||
}
|
||||
fiw.run(ctx, (1 * time.Second))
|
||||
return fiw
|
||||
}
|
||||
|
||||
// fileInfoWatcher explicitly implements file.Watcher
|
||||
var _ Watcher = &fileInfoWatcher{}
|
||||
|
||||
// Close calls close on the underlying fsnotify.Watcher
|
||||
func (f *fileInfoWatcher) Close() error {
|
||||
// close all channels and exit
|
||||
close(f.evChan)
|
||||
close(f.erChan)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Add calls Add on the underlying fsnotify.Watcher
|
||||
func (f *fileInfoWatcher) Add(name string) error {
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
|
||||
// exit early if name already exists
|
||||
if _, ok := f.watches[name]; ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
info, err := f.statFunc(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f.watches[name] = info
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove calls Remove on the underlying fsnotify.Watcher
|
||||
func (f *fileInfoWatcher) Remove(name string) error {
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
|
||||
// no need to exit early, deleting non-existent key is a no-op
|
||||
delete(f.watches, name)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Watchlist calls watchlist on the underlying fsnotify.Watcher
|
||||
func (f *fileInfoWatcher) WatchList() []string {
|
||||
f.mu.RLock()
|
||||
defer f.mu.RUnlock()
|
||||
out := []string{}
|
||||
for name := range f.watches {
|
||||
n := name
|
||||
out = append(out, n)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// Events returns the underlying watcher's Events chan
|
||||
func (f *fileInfoWatcher) Events() chan fsnotify.Event {
|
||||
return f.evChan
|
||||
}
|
||||
|
||||
// Errors returns the underlying watcher's Errors chan
|
||||
func (f *fileInfoWatcher) Errors() chan error {
|
||||
return f.erChan
|
||||
}
|
||||
|
||||
// run is a blocking function that starts the filewatcher's timer thread
|
||||
func (f *fileInfoWatcher) run(ctx context.Context, s time.Duration) {
|
||||
// timer thread
|
||||
go func() {
|
||||
// execute update on the configured interval of time
|
||||
ticker := time.NewTicker(s)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-ticker.C:
|
||||
if err := f.update(); err != nil {
|
||||
f.erChan <- err
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (f *fileInfoWatcher) update() error {
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
|
||||
for path, info := range f.watches {
|
||||
newInfo, err := f.statFunc(path)
|
||||
if err != nil {
|
||||
// if the file isn't there, it must have been removed
|
||||
// fire off a remove event and remove it from the watches
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
f.evChan <- fsnotify.Event{
|
||||
Name: path,
|
||||
Op: fsnotify.Remove,
|
||||
}
|
||||
delete(f.watches, path)
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// if the new stat doesn't match the old stat, figure out what changed
|
||||
if info != newInfo {
|
||||
event := f.generateEvent(path, newInfo)
|
||||
if event != nil {
|
||||
f.evChan <- *event
|
||||
}
|
||||
f.watches[path] = newInfo
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// generateEvent figures out what changed and generates an fsnotify.Event for it. (if we care)
|
||||
// file removal are handled above in the update() method
|
||||
func (f *fileInfoWatcher) generateEvent(path string, newInfo fs.FileInfo) *fsnotify.Event {
|
||||
info := f.watches[path]
|
||||
switch {
|
||||
// new mod time is more recent than old mod time, generate a write event
|
||||
case newInfo.ModTime().After(info.ModTime()):
|
||||
return &fsnotify.Event{
|
||||
Name: path,
|
||||
Op: fsnotify.Write,
|
||||
}
|
||||
// the file modes changed, generate a chmod event
|
||||
case info.Mode() != newInfo.Mode():
|
||||
return &fsnotify.Event{
|
||||
Name: path,
|
||||
Op: fsnotify.Chmod,
|
||||
}
|
||||
// nothing changed that we care about
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// getFileInfo returns the fs.FileInfo for the given path
|
||||
func getFileInfo(path string) (fs.FileInfo, error) {
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error from os.Open(%s): %w", path, err)
|
||||
}
|
||||
|
||||
info, err := f.Stat()
|
||||
if err != nil {
|
||||
return info, fmt.Errorf("error from fs.Stat(%s): %w", path, err)
|
||||
}
|
||||
|
||||
if err := f.Close(); err != nil {
|
||||
return info, fmt.Errorf("err from fs.Close(%s): %w", path, err)
|
||||
}
|
||||
|
||||
return info, nil
|
||||
}
|
|
@ -1,248 +0,0 @@
|
|||
package file
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/fsnotify/fsnotify"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
)
|
||||
|
||||
func Test_fileInfoWatcher_Close(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
watcher *fileInfoWatcher
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "all chans close",
|
||||
watcher: makeTestWatcher(t, map[string]fs.FileInfo{}),
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if err := tt.watcher.Close(); (err != nil) != tt.wantErr {
|
||||
t.Errorf("fileInfoWatcher.Close() error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
if _, ok := (<-tt.watcher.Errors()); ok != false {
|
||||
t.Error("fileInfoWatcher.Close() failed to close error chan")
|
||||
}
|
||||
if _, ok := (<-tt.watcher.Events()); ok != false {
|
||||
t.Error("fileInfoWatcher.Close() failed to close events chan")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_fileInfoWatcher_Add(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
watcher *fileInfoWatcher
|
||||
add []string
|
||||
want map[string]fs.FileInfo
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "add one watch",
|
||||
watcher: makeTestWatcher(t, map[string]fs.FileInfo{}),
|
||||
add: []string{"/foo"},
|
||||
want: map[string]fs.FileInfo{
|
||||
"/foo": &mockFileInfo{},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
tt.watcher.statFunc = makeStatFunc(t, &mockFileInfo{})
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
for _, path := range tt.add {
|
||||
if err := tt.watcher.Add(path); (err != nil) != tt.wantErr {
|
||||
t.Errorf("fileInfoWatcher.Add() error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
}
|
||||
if !cmp.Equal(tt.watcher.watches, tt.want, cmp.AllowUnexported(mockFileInfo{})) {
|
||||
t.Errorf("fileInfoWatcher.Add(): want-, got+: %v ", cmp.Diff(tt.want, tt.watcher.watches))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_fileInfoWatcher_Remove(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
watcher *fileInfoWatcher
|
||||
removeThis string
|
||||
want []string
|
||||
}{{
|
||||
name: "remove foo",
|
||||
watcher: makeTestWatcher(t, map[string]fs.FileInfo{"foo": &mockFileInfo{}, "bar": &mockFileInfo{}}),
|
||||
removeThis: "foo",
|
||||
want: []string{"bar"},
|
||||
}}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := tt.watcher.Remove(tt.removeThis)
|
||||
if err != nil {
|
||||
t.Errorf("fileInfoWatcher.Remove() error = %v", err)
|
||||
}
|
||||
if !cmp.Equal(tt.watcher.WatchList(), tt.want) {
|
||||
t.Errorf("fileInfoWatcher.Add(): want-, got+: %v ", cmp.Diff(tt.want, tt.watcher.WatchList()))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_fileInfoWatcher_update(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
watcher *fileInfoWatcher
|
||||
statFunc func(string) (fs.FileInfo, error)
|
||||
wantErr bool
|
||||
want *fsnotify.Event
|
||||
}{
|
||||
{
|
||||
name: "chmod",
|
||||
watcher: makeTestWatcher(t,
|
||||
map[string]fs.FileInfo{
|
||||
"foo": &mockFileInfo{
|
||||
name: "foo",
|
||||
mode: 0,
|
||||
},
|
||||
},
|
||||
),
|
||||
statFunc: func(_ string) (fs.FileInfo, error) {
|
||||
return &mockFileInfo{
|
||||
name: "foo",
|
||||
mode: 1,
|
||||
}, nil
|
||||
},
|
||||
want: &fsnotify.Event{Name: "foo", Op: fsnotify.Chmod},
|
||||
},
|
||||
{
|
||||
name: "write",
|
||||
watcher: makeTestWatcher(t,
|
||||
map[string]fs.FileInfo{
|
||||
"foo": &mockFileInfo{
|
||||
name: "foo",
|
||||
modTime: time.Now().Local(),
|
||||
},
|
||||
},
|
||||
),
|
||||
statFunc: func(_ string) (fs.FileInfo, error) {
|
||||
return &mockFileInfo{
|
||||
name: "foo",
|
||||
modTime: (time.Now().Local().Add(5 * time.Minute)),
|
||||
}, nil
|
||||
},
|
||||
want: &fsnotify.Event{Name: "foo", Op: fsnotify.Write},
|
||||
},
|
||||
{
|
||||
name: "remove",
|
||||
watcher: makeTestWatcher(t,
|
||||
map[string]fs.FileInfo{
|
||||
"foo": &mockFileInfo{
|
||||
name: "foo",
|
||||
},
|
||||
},
|
||||
),
|
||||
statFunc: func(_ string) (fs.FileInfo, error) {
|
||||
return nil, fmt.Errorf("mock file-no-existy error: %w", os.ErrNotExist)
|
||||
},
|
||||
want: &fsnotify.Event{Name: "foo", Op: fsnotify.Remove},
|
||||
},
|
||||
{
|
||||
name: "unknown error",
|
||||
watcher: makeTestWatcher(t,
|
||||
map[string]fs.FileInfo{
|
||||
"foo": &mockFileInfo{
|
||||
name: "foo",
|
||||
},
|
||||
},
|
||||
),
|
||||
statFunc: func(_ string) (fs.FileInfo, error) {
|
||||
return nil, errors.New("unhandled error")
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// set the statFunc
|
||||
tt.watcher.statFunc = tt.statFunc
|
||||
// run an update
|
||||
// this also flexes fileinfowatcher.generateEvent()
|
||||
err := tt.watcher.update()
|
||||
if err != nil {
|
||||
if tt.wantErr {
|
||||
return
|
||||
}
|
||||
t.Errorf("fileInfoWatcher.update() unexpected error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
// slurp an event off the event chan
|
||||
out := <-tt.watcher.Events()
|
||||
if out != *tt.want {
|
||||
t.Errorf("fileInfoWatcher.update() wanted %v, got %v", tt.want, out)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Helpers
|
||||
|
||||
// makeTestWatcher returns a pointer to a fileInfoWatcher suitable for testing
|
||||
func makeTestWatcher(t *testing.T, watches map[string]fs.FileInfo) *fileInfoWatcher {
|
||||
t.Helper()
|
||||
|
||||
return &fileInfoWatcher{
|
||||
evChan: make(chan fsnotify.Event, 512),
|
||||
erChan: make(chan error, 512),
|
||||
watches: watches,
|
||||
}
|
||||
}
|
||||
|
||||
// makeStateFunc returns an os.Stat wrapper that parrots back whatever its
|
||||
// constructor is given
|
||||
func makeStatFunc(t *testing.T, fi fs.FileInfo) func(string) (fs.FileInfo, error) {
|
||||
t.Helper()
|
||||
return func(_ string) (fs.FileInfo, error) {
|
||||
return fi, nil
|
||||
}
|
||||
}
|
||||
|
||||
// mockFileInfo implements fs.FileInfo for mocks
|
||||
type mockFileInfo struct {
|
||||
name string // base name of the file
|
||||
size int64 // length in bytes for regular files; system-dependent for others
|
||||
mode fs.FileMode // file mode bits
|
||||
modTime time.Time // modification time
|
||||
}
|
||||
|
||||
// explicitly impements fs.FileInfo
|
||||
var _ fs.FileInfo = &mockFileInfo{}
|
||||
|
||||
func (mfi *mockFileInfo) Name() string {
|
||||
return mfi.name
|
||||
}
|
||||
|
||||
func (mfi *mockFileInfo) Size() int64 {
|
||||
return mfi.size
|
||||
}
|
||||
|
||||
func (mfi *mockFileInfo) Mode() fs.FileMode {
|
||||
return mfi.mode
|
||||
}
|
||||
|
||||
func (mfi *mockFileInfo) ModTime() time.Time {
|
||||
return mfi.modTime
|
||||
}
|
||||
|
||||
func (mfi *mockFileInfo) IsDir() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (mfi *mockFileInfo) Sys() any {
|
||||
return "foo"
|
||||
}
|
|
@ -2,49 +2,34 @@ package file
|
|||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
msync "sync"
|
||||
|
||||
"github.com/fsnotify/fsnotify"
|
||||
"github.com/open-feature/flagd/core/pkg/logger"
|
||||
"github.com/open-feature/flagd/core/pkg/sync"
|
||||
"github.com/open-feature/flagd/core/pkg/utils"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
const (
|
||||
FSNOTIFY = "fsnotify"
|
||||
FILEINFO = "fileinfo"
|
||||
)
|
||||
|
||||
type Watcher interface {
|
||||
Close() error
|
||||
Add(name string) error
|
||||
Remove(name string) error
|
||||
WatchList() []string
|
||||
Events() chan fsnotify.Event
|
||||
Errors() chan error
|
||||
}
|
||||
|
||||
type Sync struct {
|
||||
URI string
|
||||
Logger *logger.Logger
|
||||
// watchType indicates how to watch the file FSNOTIFY|FILEINFO
|
||||
watchType string
|
||||
watcher Watcher
|
||||
ready bool
|
||||
Mux *msync.RWMutex
|
||||
// FileType indicates the file type e.g., json, yaml/yml etc.,
|
||||
fileType string
|
||||
watcher *fsnotify.Watcher
|
||||
ready bool
|
||||
Mux *msync.RWMutex
|
||||
}
|
||||
|
||||
func NewFileSync(uri string, watchType string, logger *logger.Logger) *Sync {
|
||||
func NewFileSync(uri string, logger *logger.Logger) *Sync {
|
||||
return &Sync{
|
||||
URI: uri,
|
||||
watchType: watchType,
|
||||
Logger: logger,
|
||||
Mux: &msync.RWMutex{},
|
||||
URI: uri,
|
||||
Logger: logger,
|
||||
Mux: &msync.RWMutex{},
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -52,28 +37,18 @@ func NewFileSync(uri string, watchType string, logger *logger.Logger) *Sync {
|
|||
const defaultState = "{}"
|
||||
|
||||
func (fs *Sync) ReSync(ctx context.Context, dataSync chan<- sync.DataSync) error {
|
||||
fs.sendDataSync(ctx, dataSync)
|
||||
fs.sendDataSync(ctx, sync.ALL, dataSync)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fs *Sync) Init(ctx context.Context) error {
|
||||
func (fs *Sync) Init(_ context.Context) error {
|
||||
fs.Logger.Info("Starting filepath sync notifier")
|
||||
|
||||
switch fs.watchType {
|
||||
case FSNOTIFY, "":
|
||||
w, err := NewFSNotifyWatcher()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating fsnotify watcher: %w", err)
|
||||
}
|
||||
fs.watcher = w
|
||||
case FILEINFO:
|
||||
w := NewFileInfoWatcher(ctx, fs.Logger)
|
||||
fs.watcher = w
|
||||
default:
|
||||
return fmt.Errorf("unknown watcher type: '%s'", fs.watchType)
|
||||
w, err := fsnotify.NewWatcher()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating filepath watcher: %w", err)
|
||||
}
|
||||
|
||||
if err := fs.watcher.Add(fs.URI); err != nil {
|
||||
fs.watcher = w
|
||||
if err = fs.watcher.Add(fs.URI); err != nil {
|
||||
return fmt.Errorf("error adding watcher %s: %w", fs.URI, err)
|
||||
}
|
||||
return nil
|
||||
|
@ -94,12 +69,12 @@ func (fs *Sync) setReady(val bool) {
|
|||
//nolint:funlen
|
||||
func (fs *Sync) Sync(ctx context.Context, dataSync chan<- sync.DataSync) error {
|
||||
defer fs.watcher.Close()
|
||||
fs.sendDataSync(ctx, dataSync)
|
||||
fs.sendDataSync(ctx, sync.ALL, dataSync)
|
||||
fs.setReady(true)
|
||||
fs.Logger.Info(fmt.Sprintf("watching filepath: %s", fs.URI))
|
||||
for {
|
||||
select {
|
||||
case event, ok := <-fs.watcher.Events():
|
||||
case event, ok := <-fs.watcher.Events:
|
||||
if !ok {
|
||||
fs.Logger.Info("filepath notifier closed")
|
||||
return errors.New("filepath notifier closed")
|
||||
|
@ -108,7 +83,7 @@ func (fs *Sync) Sync(ctx context.Context, dataSync chan<- sync.DataSync) error {
|
|||
fs.Logger.Info(fmt.Sprintf("filepath event: %s %s", event.Name, event.Op.String()))
|
||||
switch {
|
||||
case event.Has(fsnotify.Create) || event.Has(fsnotify.Write):
|
||||
fs.sendDataSync(ctx, dataSync)
|
||||
fs.sendDataSync(ctx, sync.ALL, dataSync)
|
||||
case event.Has(fsnotify.Remove):
|
||||
// K8s exposes config maps as symlinks.
|
||||
// Updates cause a remove event, we need to re-add the watcher in this case.
|
||||
|
@ -116,24 +91,24 @@ func (fs *Sync) Sync(ctx context.Context, dataSync chan<- sync.DataSync) error {
|
|||
if err != nil {
|
||||
// the watcher could not be re-added, so the file must have been deleted
|
||||
fs.Logger.Error(fmt.Sprintf("error restoring watcher, file may have been deleted: %s", err.Error()))
|
||||
fs.sendDataSync(ctx, dataSync)
|
||||
fs.sendDataSync(ctx, sync.DELETE, dataSync)
|
||||
continue
|
||||
}
|
||||
|
||||
// Counterintuitively, remove events are the only meaningful ones seen in K8s.
|
||||
// K8s handles mounted ConfigMap updates by modifying symbolic links, which is an atomic operation.
|
||||
// At the point the remove event is fired, we have our new data, so we can send it down the channel.
|
||||
fs.sendDataSync(ctx, dataSync)
|
||||
fs.sendDataSync(ctx, sync.ALL, dataSync)
|
||||
case event.Has(fsnotify.Chmod):
|
||||
// on linux the REMOVE event will not fire until all file descriptors are closed, this cannot happen
|
||||
// while the file is being watched, os.Stat is used here to infer deletion
|
||||
if _, err := os.Stat(fs.URI); errors.Is(err, os.ErrNotExist) {
|
||||
fs.Logger.Error(fmt.Sprintf("file has been deleted: %s", err.Error()))
|
||||
fs.sendDataSync(ctx, dataSync)
|
||||
fs.sendDataSync(ctx, sync.DELETE, dataSync)
|
||||
}
|
||||
}
|
||||
|
||||
case err, ok := <-fs.watcher.Errors():
|
||||
case err, ok := <-fs.watcher.Errors:
|
||||
if !ok {
|
||||
fs.setReady(false)
|
||||
return errors.New("watcher error")
|
||||
|
@ -147,8 +122,14 @@ func (fs *Sync) Sync(ctx context.Context, dataSync chan<- sync.DataSync) error {
|
|||
}
|
||||
}
|
||||
|
||||
func (fs *Sync) sendDataSync(ctx context.Context, dataSync chan<- sync.DataSync) {
|
||||
fs.Logger.Debug(fmt.Sprintf("Data sync received for %s", fs.URI))
|
||||
func (fs *Sync) sendDataSync(ctx context.Context, syncType sync.Type, dataSync chan<- sync.DataSync) {
|
||||
fs.Logger.Debug(fmt.Sprintf("Configuration %s: %s", fs.URI, syncType.String()))
|
||||
|
||||
if syncType == sync.DELETE {
|
||||
// Skip fetching and emit default state to avoid EOF errors
|
||||
dataSync <- sync.DataSync{FlagData: defaultState, Source: fs.URI, Type: syncType}
|
||||
return
|
||||
}
|
||||
|
||||
msg := defaultState
|
||||
m, err := fs.fetch(ctx)
|
||||
|
@ -161,29 +142,49 @@ func (fs *Sync) sendDataSync(ctx context.Context, dataSync chan<- sync.DataSync)
|
|||
msg = m
|
||||
}
|
||||
|
||||
dataSync <- sync.DataSync{FlagData: msg, Source: fs.URI}
|
||||
dataSync <- sync.DataSync{FlagData: msg, Source: fs.URI, Type: syncType}
|
||||
}
|
||||
|
||||
func (fs *Sync) fetch(_ context.Context) (string, error) {
|
||||
if fs.URI == "" {
|
||||
return "", errors.New("no filepath string set")
|
||||
}
|
||||
|
||||
file, err := os.Open(fs.URI)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error opening file %s: %w", fs.URI, err)
|
||||
if fs.fileType == "" {
|
||||
uriSplit := strings.Split(fs.URI, ".")
|
||||
fs.fileType = uriSplit[len(uriSplit)-1]
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
data, err := io.ReadAll(file)
|
||||
rawFile, err := os.ReadFile(fs.URI)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error reading file %s: %w", fs.URI, err)
|
||||
}
|
||||
|
||||
// File extension is used to determine the content type, so media type is unnecessary
|
||||
json, err := utils.ConvertToJSON(data, filepath.Ext(fs.URI), "")
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error converting file content to json: %w", err)
|
||||
switch fs.fileType {
|
||||
case "yaml", "yml":
|
||||
return yamlToJSON(rawFile)
|
||||
case "json":
|
||||
return string(rawFile), nil
|
||||
default:
|
||||
return "", fmt.Errorf("filepath extension for URI: '%s' is not supported", fs.URI)
|
||||
}
|
||||
return json, nil
|
||||
}
|
||||
|
||||
// yamlToJSON is a generic helper function to convert
|
||||
// yaml to json
|
||||
func yamlToJSON(rawFile []byte) (string, error) {
|
||||
if len(rawFile) == 0 {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
var ms map[string]interface{}
|
||||
// yaml.Unmarshal unmarshals to map[interface]interface{}
|
||||
if err := yaml.Unmarshal(rawFile, &ms); err != nil {
|
||||
return "", fmt.Errorf("unmarshal yaml: %w", err)
|
||||
}
|
||||
|
||||
r, err := json.Marshal(ms)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("convert yaml to json: %w", err)
|
||||
}
|
||||
|
||||
return string(r), err
|
||||
}
|
||||
|
|
|
@ -26,6 +26,7 @@ func TestSimpleReSync(t *testing.T) {
|
|||
expectedDataSync := sync.DataSync{
|
||||
FlagData: "hello",
|
||||
Source: source,
|
||||
Type: sync.ALL,
|
||||
}
|
||||
handler := Sync{
|
||||
URI: source,
|
||||
|
@ -75,6 +76,7 @@ func TestSimpleSync(t *testing.T) {
|
|||
{
|
||||
FlagData: fetchFileContents,
|
||||
Source: fmt.Sprintf("%s/%s", readDirName, fetchFileName),
|
||||
Type: sync.ALL,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -92,10 +94,12 @@ func TestSimpleSync(t *testing.T) {
|
|||
{
|
||||
FlagData: fetchFileContents,
|
||||
Source: fmt.Sprintf("%s/%s", updateDirName, fetchFileName),
|
||||
Type: sync.ALL,
|
||||
},
|
||||
{
|
||||
FlagData: "new content",
|
||||
Source: fmt.Sprintf("%s/%s", updateDirName, fetchFileName),
|
||||
Type: sync.ALL,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -113,10 +117,12 @@ func TestSimpleSync(t *testing.T) {
|
|||
{
|
||||
FlagData: fetchFileContents,
|
||||
Source: fmt.Sprintf("%s/%s", deleteDirName, fetchFileName),
|
||||
Type: sync.ALL,
|
||||
},
|
||||
{
|
||||
FlagData: defaultState,
|
||||
Source: fmt.Sprintf("%s/%s", deleteDirName, fetchFileName),
|
||||
Type: sync.DELETE,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -166,6 +172,9 @@ func TestSimpleSync(t *testing.T) {
|
|||
if data.Source != syncEvent.Source {
|
||||
t.Errorf("expected source: %s, but received source: %s", syncEvent.Source, data.Source)
|
||||
}
|
||||
if data.Type != syncEvent.Type {
|
||||
t.Errorf("expected type: %b, but received type: %b", syncEvent.Type, data.Type)
|
||||
}
|
||||
case <-time.After(10 * time.Second):
|
||||
t.Errorf("event not found, timeout out after 10 seconds")
|
||||
}
|
||||
|
@ -181,7 +190,7 @@ func TestSimpleSync(t *testing.T) {
|
|||
|
||||
func TestFilePathSync_Fetch(t *testing.T) {
|
||||
successDirName := t.TempDir()
|
||||
failureDirName := t.TempDir()
|
||||
falureDirName := t.TempDir()
|
||||
tests := map[string]struct {
|
||||
fpSync Sync
|
||||
handleResponse func(t *testing.T, fetched string, err error)
|
||||
|
@ -204,9 +213,9 @@ func TestFilePathSync_Fetch(t *testing.T) {
|
|||
},
|
||||
},
|
||||
"not found": {
|
||||
fetchDirName: failureDirName,
|
||||
fetchDirName: falureDirName,
|
||||
fpSync: Sync{
|
||||
URI: fmt.Sprintf("%s/%s", failureDirName, "not_found"),
|
||||
URI: fmt.Sprintf("%s/%s", falureDirName, "not_found"),
|
||||
Logger: logger.NewLogger(nil, false),
|
||||
},
|
||||
handleResponse: func(t *testing.T, fetched string, err error) {
|
||||
|
@ -300,3 +309,31 @@ func writeToFile(t *testing.T, fetchDirName, fileContents string) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilePathSync_yamlToJSON(t *testing.T) {
|
||||
tests := map[string]struct {
|
||||
input []byte
|
||||
handleResponse func(t *testing.T, output string, err error)
|
||||
}{
|
||||
"empty": {
|
||||
input: []byte(""),
|
||||
handleResponse: func(t *testing.T, output string, err error) {
|
||||
if err != nil {
|
||||
t.Fatalf("expect no err, got err = %v", err)
|
||||
}
|
||||
|
||||
if output != "" {
|
||||
t.Fatalf("expect output = '', got output = '%v'", output)
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for name, tt := range tests {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
output, err := yamlToJSON(tt.input)
|
||||
|
||||
tt.handleResponse(t, output, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,67 +0,0 @@
|
|||
package file
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/fsnotify/fsnotify"
|
||||
)
|
||||
|
||||
// Implements file.Watcher by wrapping fsnotify.Watcher
|
||||
// This is only necessary because fsnotify.Watcher directly exposes its Errors
|
||||
// and Events channels rather than returning them by method invocation
|
||||
type fsNotifyWatcher struct {
|
||||
watcher *fsnotify.Watcher
|
||||
}
|
||||
|
||||
// NewFsNotifyWatcher returns a new fsNotifyWatcher
|
||||
func NewFSNotifyWatcher() (Watcher, error) {
|
||||
fsn, err := fsnotify.NewWatcher()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("fsnotify: %w", err)
|
||||
}
|
||||
return &fsNotifyWatcher{
|
||||
watcher: fsn,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// explicitly implements file.Watcher
|
||||
var _ Watcher = &fsNotifyWatcher{}
|
||||
|
||||
// Close calls close on the underlying fsnotify.Watcher
|
||||
func (f *fsNotifyWatcher) Close() error {
|
||||
if err := f.watcher.Close(); err != nil {
|
||||
return fmt.Errorf("fsnotify: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Add calls Add on the underlying fsnotify.Watcher
|
||||
func (f *fsNotifyWatcher) Add(name string) error {
|
||||
if err := f.watcher.Add(name); err != nil {
|
||||
return fmt.Errorf("fsnotify: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove calls Remove on the underlying fsnotify.Watcher
|
||||
func (f *fsNotifyWatcher) Remove(name string) error {
|
||||
if err := f.watcher.Remove(name); err != nil {
|
||||
return fmt.Errorf("fsnotify: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Watchlist calls watchlist on the underlying fsnotify.Watcher
|
||||
func (f *fsNotifyWatcher) WatchList() []string {
|
||||
return f.watcher.WatchList()
|
||||
}
|
||||
|
||||
// Events returns the underlying watcher's Events chan
|
||||
func (f *fsNotifyWatcher) Events() chan fsnotify.Event {
|
||||
return f.watcher.Events
|
||||
}
|
||||
|
||||
// Errors returns the underlying watcher's Errors chan
|
||||
func (f *fsNotifyWatcher) Errors() chan error {
|
||||
return f.watcher.Errors
|
||||
}
|
|
@ -12,17 +12,14 @@ import (
|
|||
"github.com/open-feature/flagd/core/pkg/logger"
|
||||
"github.com/open-feature/flagd/core/pkg/sync"
|
||||
grpccredential "github.com/open-feature/flagd/core/pkg/sync/grpc/credentials"
|
||||
_ "github.com/open-feature/flagd/core/pkg/sync/grpc/nameresolvers" // initialize custom resolvers e.g. envoy.Init()
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials"
|
||||
)
|
||||
|
||||
const (
|
||||
// Prefix for GRPC URL inputs. GRPC does not define a standard prefix. This prefix helps to differentiate remote
|
||||
// URLs for REST APIs (i.e - HTTP) from GRPC endpoints.
|
||||
Prefix = "grpc://"
|
||||
PrefixSecure = "grpcs://"
|
||||
SupportedScheme = "(envoy|dns|uds|xds)"
|
||||
Prefix = "grpc://"
|
||||
PrefixSecure = "grpcs://"
|
||||
|
||||
// Connection retry constants
|
||||
// Back off period is calculated with backOffBase ^ #retry-iteration. However, when #retry-iteration count reach
|
||||
|
@ -44,46 +41,28 @@ type FlagSyncServiceClientResponse interface {
|
|||
var once msync.Once
|
||||
|
||||
type Sync struct {
|
||||
GrpcDialOptionsOverride []grpc.DialOption
|
||||
CertPath string
|
||||
CredentialBuilder grpccredential.Builder
|
||||
Logger *logger.Logger
|
||||
ProviderID string
|
||||
Secure bool
|
||||
Selector string
|
||||
URI string
|
||||
MaxMsgSize int
|
||||
CertPath string
|
||||
CredentialBuilder grpccredential.Builder
|
||||
Logger *logger.Logger
|
||||
ProviderID string
|
||||
Secure bool
|
||||
Selector string
|
||||
URI string
|
||||
|
||||
client FlagSyncServiceClient
|
||||
ready bool
|
||||
}
|
||||
|
||||
func (g *Sync) Init(_ context.Context) error {
|
||||
var rpcCon *grpc.ClientConn // Reusable client connection
|
||||
var err error
|
||||
|
||||
if len(g.GrpcDialOptionsOverride) > 0 {
|
||||
g.Logger.Debug("GRPC DialOptions override provided")
|
||||
rpcCon, err = grpc.NewClient(g.URI, g.GrpcDialOptionsOverride...)
|
||||
} else {
|
||||
var tCredentials credentials.TransportCredentials
|
||||
tCredentials, err = g.CredentialBuilder.Build(g.Secure, g.CertPath)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("error building transport credentials: %w", err)
|
||||
g.Logger.Error(err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
// Set MaxMsgSize if passed
|
||||
if g.MaxMsgSize > 0 {
|
||||
g.Logger.Info(fmt.Sprintf("setting max receive message size %d bytes default 4MB", g.MaxMsgSize))
|
||||
dialOptions := grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(g.MaxMsgSize))
|
||||
rpcCon, err = grpc.NewClient(g.URI, grpc.WithTransportCredentials(tCredentials), dialOptions)
|
||||
} else {
|
||||
rpcCon, err = grpc.NewClient(g.URI, grpc.WithTransportCredentials(tCredentials))
|
||||
}
|
||||
tCredentials, err := g.CredentialBuilder.Build(g.Secure, g.CertPath)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("error building transport credentials: %w", err)
|
||||
g.Logger.Error(err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
// Derive reusable client connection
|
||||
rpcCon, err := grpc.NewClient(g.URI, grpc.WithTransportCredentials(tCredentials))
|
||||
if err != nil {
|
||||
err := fmt.Errorf("error initiating grpc client connection: %w", err)
|
||||
g.Logger.Error(err.Error())
|
||||
|
@ -106,6 +85,7 @@ func (g *Sync) ReSync(ctx context.Context, dataSync chan<- sync.DataSync) error
|
|||
dataSync <- sync.DataSync{
|
||||
FlagData: res.GetFlagConfiguration(),
|
||||
Source: g.URI,
|
||||
Type: sync.ALL,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -199,10 +179,10 @@ func (g *Sync) handleFlagSync(stream syncv1grpc.FlagSyncService_SyncFlagsClient,
|
|||
}
|
||||
|
||||
dataSync <- sync.DataSync{
|
||||
FlagData: data.FlagConfiguration,
|
||||
SyncContext: data.SyncContext,
|
||||
Source: g.URI,
|
||||
Selector: g.Selector,
|
||||
FlagData: data.FlagConfiguration,
|
||||
Source: g.URI,
|
||||
Selector: g.Selector,
|
||||
Type: sync.ALL,
|
||||
}
|
||||
|
||||
g.Logger.Debug("received full configuration payload")
|
||||
|
|
|
@ -16,17 +16,14 @@ import (
|
|||
"github.com/open-feature/flagd/core/pkg/logger"
|
||||
"github.com/open-feature/flagd/core/pkg/sync"
|
||||
credendialsmock "github.com/open-feature/flagd/core/pkg/sync/grpc/credentials/mock"
|
||||
grpcmock "github.com/open-feature/flagd/core/pkg/sync/grpc/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/mock/gomock"
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zaptest/observer"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
"google.golang.org/grpc/test/bufconn"
|
||||
"google.golang.org/protobuf/proto"
|
||||
"google.golang.org/protobuf/types/known/structpb"
|
||||
)
|
||||
|
||||
func Test_InitWithMockCredentialBuilder(t *testing.T) {
|
||||
|
@ -81,30 +78,6 @@ func Test_InitWithMockCredentialBuilder(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func Test_InitWithSizeOverride(t *testing.T) {
|
||||
observedZapCore, observedLogs := observer.New(zap.InfoLevel)
|
||||
observedLogger := zap.New(observedZapCore)
|
||||
|
||||
mockCtrl := gomock.NewController(t)
|
||||
mockCredentialBulder := credendialsmock.NewMockBuilder(mockCtrl)
|
||||
|
||||
mockCredentialBulder.EXPECT().
|
||||
Build(gomock.Any(), gomock.Any()).
|
||||
Return(insecure.NewCredentials(), nil)
|
||||
|
||||
grpcSync := Sync{
|
||||
URI: "grpc-target",
|
||||
Logger: logger.NewLogger(observedLogger, false),
|
||||
CredentialBuilder: mockCredentialBulder,
|
||||
MaxMsgSize: 10,
|
||||
}
|
||||
|
||||
err := grpcSync.Init(context.Background())
|
||||
|
||||
require.Nilf(t, err, "%s: expected no error, but got non nil error", t.Name())
|
||||
require.Equal(t, "setting max receive message size 10 bytes default 4MB", observedLogs.All()[0].Message)
|
||||
}
|
||||
|
||||
func Test_ReSyncTests(t *testing.T) {
|
||||
const target = "localBufCon"
|
||||
|
||||
|
@ -123,6 +96,7 @@ func Test_ReSyncTests(t *testing.T) {
|
|||
notifications: []sync.DataSync{
|
||||
{
|
||||
FlagData: "success",
|
||||
Type: sync.ALL,
|
||||
},
|
||||
},
|
||||
shouldError: false,
|
||||
|
@ -179,6 +153,9 @@ func Test_ReSyncTests(t *testing.T) {
|
|||
|
||||
for _, expected := range test.notifications {
|
||||
out := <-syncChan
|
||||
if expected.Type != out.Type {
|
||||
t.Errorf("Returned sync type = %v, wanted %v", out.Type, expected.Type)
|
||||
}
|
||||
|
||||
if expected.FlagData != out.FlagData {
|
||||
t.Errorf("Returned sync data = %v, wanted %v", out.FlagData, expected.FlagData)
|
||||
|
@ -192,14 +169,98 @@ func Test_ReSyncTests(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestSync_BasicFlagSyncStates(t *testing.T) {
|
||||
grpcSyncImpl := Sync{
|
||||
URI: "grpc://test",
|
||||
ProviderID: "",
|
||||
Logger: logger.NewLogger(nil, false),
|
||||
}
|
||||
|
||||
mockError := errors.New("could not sync")
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
stream syncv1grpc.FlagSyncService_SyncFlagsClient
|
||||
setup func(t *testing.T, client *grpcmock.MockFlagSyncServiceClient, clientResponse *grpcmock.MockFlagSyncServiceClientResponse)
|
||||
want sync.Type
|
||||
wantError error
|
||||
ready bool
|
||||
}{
|
||||
{
|
||||
name: "State All maps to Sync All",
|
||||
setup: func(t *testing.T, client *grpcmock.MockFlagSyncServiceClient, clientResponse *grpcmock.MockFlagSyncServiceClientResponse) {
|
||||
client.EXPECT().SyncFlags(gomock.Any(), gomock.Any(), gomock.Any()).Return(clientResponse, nil)
|
||||
gomock.InOrder(
|
||||
clientResponse.EXPECT().Recv().Return(
|
||||
&v1.SyncFlagsResponse{
|
||||
FlagConfiguration: "{}",
|
||||
},
|
||||
nil,
|
||||
),
|
||||
clientResponse.EXPECT().Recv().Return(
|
||||
nil, io.EOF,
|
||||
),
|
||||
)
|
||||
},
|
||||
want: sync.ALL,
|
||||
ready: true,
|
||||
},
|
||||
{
|
||||
name: "Error during flag sync",
|
||||
setup: func(t *testing.T, client *grpcmock.MockFlagSyncServiceClient, clientResponse *grpcmock.MockFlagSyncServiceClientResponse) {
|
||||
client.EXPECT().SyncFlags(gomock.Any(), gomock.Any(), gomock.Any()).Return(clientResponse, nil)
|
||||
clientResponse.EXPECT().Recv().Return(
|
||||
nil,
|
||||
mockError,
|
||||
)
|
||||
},
|
||||
ready: true,
|
||||
want: -1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
syncChan := make(chan sync.DataSync, 1)
|
||||
|
||||
mockClient := grpcmock.NewMockFlagSyncServiceClient(ctrl)
|
||||
mockClientResponse := grpcmock.NewMockFlagSyncServiceClientResponse(ctrl)
|
||||
test.setup(t, mockClient, mockClientResponse)
|
||||
|
||||
waitChan := make(chan struct{})
|
||||
go func() {
|
||||
grpcSyncImpl.client = mockClient
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
|
||||
defer cancel()
|
||||
err := grpcSyncImpl.Sync(ctx, syncChan)
|
||||
if err != nil {
|
||||
t.Errorf("Error handling flag sync: %v", err)
|
||||
}
|
||||
close(waitChan)
|
||||
}()
|
||||
<-waitChan
|
||||
|
||||
if test.want < 0 {
|
||||
require.Empty(t, syncChan)
|
||||
return
|
||||
}
|
||||
data := <-syncChan
|
||||
|
||||
if grpcSyncImpl.IsReady() != test.ready {
|
||||
t.Errorf("expected grpcSyncImpl.ready to be: '%v', got: '%v'", test.ready, grpcSyncImpl.ready)
|
||||
}
|
||||
|
||||
if data.Type != test.want {
|
||||
t.Errorf("Returned data sync state = %v, wanted %v", data.Type, test.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_StreamListener(t *testing.T) {
|
||||
const target = "localBufCon"
|
||||
|
||||
metadata, err := structpb.NewStruct(map[string]any{"sources": "A,B,C"})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create sync context: %v", err)
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
input []serverPayload
|
||||
|
@ -214,8 +275,8 @@ func Test_StreamListener(t *testing.T) {
|
|||
},
|
||||
output: []sync.DataSync{
|
||||
{
|
||||
FlagData: "{\"flags\": {}}",
|
||||
SyncContext: metadata,
|
||||
FlagData: "{\"flags\": {}}",
|
||||
Type: sync.ALL,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -231,12 +292,12 @@ func Test_StreamListener(t *testing.T) {
|
|||
},
|
||||
output: []sync.DataSync{
|
||||
{
|
||||
FlagData: "{}",
|
||||
SyncContext: metadata,
|
||||
FlagData: "{}",
|
||||
Type: sync.ALL,
|
||||
},
|
||||
{
|
||||
FlagData: "{\"flags\": {}}",
|
||||
SyncContext: metadata,
|
||||
FlagData: "{\"flags\": {}}",
|
||||
Type: sync.ALL,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -289,12 +350,12 @@ func Test_StreamListener(t *testing.T) {
|
|||
for _, expected := range test.output {
|
||||
out := <-syncChan
|
||||
|
||||
if expected.FlagData != out.FlagData {
|
||||
t.Errorf("Returned sync data = %v, wanted %v", out.FlagData, expected.FlagData)
|
||||
if expected.Type != out.Type {
|
||||
t.Errorf("Returned sync type = %v, wanted %v", out.Type, expected.Type)
|
||||
}
|
||||
|
||||
if !proto.Equal(expected.SyncContext, out.SyncContext) {
|
||||
t.Errorf("Returned sync context = %v, wanted = %v", out.SyncContext, expected.SyncContext)
|
||||
if expected.FlagData != out.FlagData {
|
||||
t.Errorf("Returned sync data = %v, wanted %v", out.FlagData, expected.FlagData)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -379,7 +440,8 @@ func Test_SyncRetry(t *testing.T) {
|
|||
// Setup
|
||||
target := "grpc://local"
|
||||
bufListener := bufconn.Listen(1)
|
||||
emptyFlagData := "{}"
|
||||
|
||||
expectType := sync.ALL
|
||||
|
||||
// buffer based server. response ignored purposefully
|
||||
bServer := bufferedServer{listener: bufListener, mockResponses: []serverPayload{
|
||||
|
@ -433,7 +495,7 @@ func Test_SyncRetry(t *testing.T) {
|
|||
t.Errorf("timeout waiting for conditions to fulfil")
|
||||
break
|
||||
case data := <-syncChan:
|
||||
if data.FlagData != emptyFlagData {
|
||||
if data.Type != expectType {
|
||||
t.Errorf("sync start error: %s", err.Error())
|
||||
}
|
||||
}
|
||||
|
@ -453,9 +515,9 @@ func Test_SyncRetry(t *testing.T) {
|
|||
case <-tCtx.Done():
|
||||
cancelFunc()
|
||||
t.Error("timeout waiting for conditions to fulfil")
|
||||
case data := <-syncChan:
|
||||
if data.FlagData != emptyFlagData {
|
||||
t.Errorf("sync start error: %s", err.Error())
|
||||
case rsp := <-syncChan:
|
||||
if rsp.Type != expectType {
|
||||
t.Errorf("expected response: %s, but got: %s", expectType, rsp.Type)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -487,10 +549,8 @@ type bufferedServer struct {
|
|||
|
||||
func (b *bufferedServer) SyncFlags(_ *v1.SyncFlagsRequest, stream syncv1grpc.FlagSyncService_SyncFlagsServer) error {
|
||||
for _, response := range b.mockResponses {
|
||||
metadata, _ := structpb.NewStruct(map[string]any{"sources": "A,B,C"})
|
||||
err := stream.Send(&v1.SyncFlagsResponse{
|
||||
FlagConfiguration: response.flags,
|
||||
SyncContext: metadata,
|
||||
})
|
||||
if err != nil {
|
||||
fmt.Printf("Error with stream: %s", err.Error())
|
||||
|
|
|
@ -1,84 +0,0 @@
|
|||
package nameresolvers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"google.golang.org/grpc/resolver"
|
||||
)
|
||||
|
||||
const scheme = "envoy"
|
||||
|
||||
type envoyBuilder struct{}
|
||||
|
||||
// Build A custom NameResolver to resolve gRPC target uri for envoy in the
|
||||
// format of.
|
||||
//
|
||||
// Custom URI Scheme:
|
||||
//
|
||||
// envoy://[proxy-agent-host]:[proxy-agent-port]/[service-name]
|
||||
func (*envoyBuilder) Build(target resolver.Target,
|
||||
cc resolver.ClientConn, _ resolver.BuildOptions,
|
||||
) (resolver.Resolver, error) {
|
||||
_, err := isValidTarget(target)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
r := &envoyResolver{
|
||||
target: target,
|
||||
cc: cc,
|
||||
}
|
||||
r.start()
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func (*envoyBuilder) Scheme() string {
|
||||
return scheme
|
||||
}
|
||||
|
||||
type envoyResolver struct {
|
||||
target resolver.Target
|
||||
cc resolver.ClientConn
|
||||
}
|
||||
|
||||
// Envoy NameResolver, will always override the authority with the specified authority i.e. URL.path and
|
||||
// use the socketAddress i.e. Host:Port to connect.
|
||||
func (r *envoyResolver) start() {
|
||||
addr := fmt.Sprintf("%s:%s", r.target.URL.Hostname(), r.target.URL.Port())
|
||||
err := r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: addr}}})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (*envoyResolver) ResolveNow(resolver.ResolveNowOptions) {}
|
||||
|
||||
func (*envoyResolver) Close() {}
|
||||
|
||||
// Validate user specified target
|
||||
//
|
||||
// Sample target string: envoy://localhost:9211/test.service
|
||||
//
|
||||
// return `true` if the target string used match the scheme and format
|
||||
func isValidTarget(target resolver.Target) (bool, error) {
|
||||
// make sure and host and port not empty
|
||||
// used as resolver.Address
|
||||
if target.URL.Scheme != "envoy" || target.URL.Hostname() == "" || target.URL.Port() == "" {
|
||||
return false, fmt.Errorf("envoy-resolver: invalid scheme or missing host/port, target: %s",
|
||||
target)
|
||||
}
|
||||
|
||||
// make sure the path is valid
|
||||
// used as :authority e.g. test.service
|
||||
path := target.Endpoint()
|
||||
if path == "" || strings.Contains(path, "/") {
|
||||
return false, fmt.Errorf("envoy-resolver: invalid path %s", path)
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
resolver.Register(&envoyBuilder{})
|
||||
}
|
|
@ -1,103 +0,0 @@
|
|||
package nameresolvers
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"google.golang.org/grpc/resolver"
|
||||
)
|
||||
|
||||
func Test_EnvoyTargetString(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
mockURL url.URL
|
||||
mockError string
|
||||
shouldError bool
|
||||
}{
|
||||
{
|
||||
name: "Should be valid string",
|
||||
mockURL: url.URL{
|
||||
Scheme: "envoy",
|
||||
Host: "localhost:8080",
|
||||
Path: "/test.service",
|
||||
},
|
||||
mockError: "",
|
||||
shouldError: false,
|
||||
},
|
||||
{
|
||||
name: "Should be valid scheme",
|
||||
mockURL: url.URL{
|
||||
Scheme: "invalid",
|
||||
Host: "localhost:8080",
|
||||
Path: "/test.service",
|
||||
},
|
||||
mockError: "envoy-resolver: invalid scheme or missing host/port, target: invalid://localhost:8080/test.service",
|
||||
shouldError: true,
|
||||
},
|
||||
{
|
||||
name: "Should be valid path",
|
||||
mockURL: url.URL{
|
||||
Scheme: "envoy",
|
||||
Host: "localhost:8080",
|
||||
Path: "/test.service/test",
|
||||
},
|
||||
mockError: "envoy-resolver: invalid path test.service/test",
|
||||
shouldError: true,
|
||||
},
|
||||
{
|
||||
name: "Should be valid path",
|
||||
mockURL: url.URL{
|
||||
Scheme: "envoy",
|
||||
Host: "localhost:8080",
|
||||
Path: "/test.service/",
|
||||
},
|
||||
mockError: "envoy-resolver: invalid path test.service/",
|
||||
shouldError: true,
|
||||
},
|
||||
{
|
||||
name: "Hostname should not be empty",
|
||||
mockURL: url.URL{
|
||||
Scheme: "envoy",
|
||||
Host: ":8080",
|
||||
Path: "/test.service",
|
||||
},
|
||||
mockError: "envoy-resolver: invalid scheme or missing host/port, target: envoy://:8080/test.service",
|
||||
shouldError: true,
|
||||
},
|
||||
{
|
||||
name: "Port should not be empty",
|
||||
mockURL: url.URL{
|
||||
Scheme: "envoy",
|
||||
Host: "localhost",
|
||||
Path: "/test.service",
|
||||
},
|
||||
mockError: "envoy-resolver: invalid scheme or missing host/port, target: envoy://localhost/test.service",
|
||||
shouldError: true,
|
||||
},
|
||||
{
|
||||
name: "Hostname and Port should not be empty",
|
||||
mockURL: url.URL{
|
||||
Scheme: "envoy",
|
||||
Path: "/test.service",
|
||||
},
|
||||
mockError: "envoy-resolver: invalid scheme or missing host/port, target: envoy:///test.service",
|
||||
shouldError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
target := resolver.Target{URL: test.mockURL}
|
||||
|
||||
isValid, err := isValidTarget(target)
|
||||
|
||||
if test.shouldError {
|
||||
require.False(t, isValid, "Should not be valid")
|
||||
require.NotNilf(t, err, "Error should not be nil")
|
||||
require.Containsf(t, err.Error(), test.mockError, "Error should contains %s", test.mockError)
|
||||
} else {
|
||||
require.True(t, isValid, "Should be valid")
|
||||
require.NoErrorf(t, err, "Error should be nil")
|
||||
}
|
||||
}
|
||||
}
|
|
@ -8,12 +8,9 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
parseUrl "net/url"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/open-feature/flagd/core/pkg/logger"
|
||||
"github.com/open-feature/flagd/core/pkg/sync"
|
||||
"github.com/open-feature/flagd/core/pkg/utils"
|
||||
"golang.org/x/crypto/sha3" //nolint:gosec
|
||||
)
|
||||
|
||||
|
@ -27,7 +24,6 @@ type Sync struct {
|
|||
AuthHeader string
|
||||
Interval uint32
|
||||
ready bool
|
||||
eTag string
|
||||
}
|
||||
|
||||
// Client defines the behaviour required of a http client
|
||||
|
@ -43,11 +39,11 @@ type Cron interface {
|
|||
}
|
||||
|
||||
func (hs *Sync) ReSync(ctx context.Context, dataSync chan<- sync.DataSync) error {
|
||||
msg, _, err := hs.fetchBody(ctx, true)
|
||||
msg, err := hs.Fetch(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dataSync <- sync.DataSync{FlagData: msg, Source: hs.URI}
|
||||
dataSync <- sync.DataSync{FlagData: msg, Source: hs.URI, Type: sync.ALL}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -64,7 +60,7 @@ func (hs *Sync) IsReady() bool {
|
|||
|
||||
func (hs *Sync) Sync(ctx context.Context, dataSync chan<- sync.DataSync) error {
|
||||
// Initial fetch
|
||||
fetch, _, err := hs.fetchBody(ctx, true)
|
||||
fetch, err := hs.Fetch(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -75,30 +71,43 @@ func (hs *Sync) Sync(ctx context.Context, dataSync chan<- sync.DataSync) error {
|
|||
hs.Logger.Debug(fmt.Sprintf("polling %s every %d seconds", hs.URI, hs.Interval))
|
||||
_ = hs.Cron.AddFunc(fmt.Sprintf("*/%d * * * *", hs.Interval), func() {
|
||||
hs.Logger.Debug(fmt.Sprintf("fetching configuration from %s", hs.URI))
|
||||
previousBodySHA := hs.LastBodySHA
|
||||
body, noChange, err := hs.fetchBody(ctx, false)
|
||||
body, err := hs.fetchBodyFromURL(ctx, hs.URI)
|
||||
if err != nil {
|
||||
hs.Logger.Error(fmt.Sprintf("error fetching: %s", err.Error()))
|
||||
hs.Logger.Error(err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
if body == "" && !noChange {
|
||||
if len(body) == 0 {
|
||||
hs.Logger.Debug("configuration deleted")
|
||||
return
|
||||
}
|
||||
} else {
|
||||
if hs.LastBodySHA == "" {
|
||||
hs.Logger.Debug("new configuration created")
|
||||
msg, err := hs.Fetch(ctx)
|
||||
if err != nil {
|
||||
hs.Logger.Error(fmt.Sprintf("error fetching: %s", err.Error()))
|
||||
} else {
|
||||
dataSync <- sync.DataSync{FlagData: msg, Source: hs.URI, Type: sync.ALL}
|
||||
}
|
||||
} else {
|
||||
currentSHA := hs.generateSha(body)
|
||||
if hs.LastBodySHA != currentSHA {
|
||||
hs.Logger.Debug("configuration modified")
|
||||
msg, err := hs.Fetch(ctx)
|
||||
if err != nil {
|
||||
hs.Logger.Error(fmt.Sprintf("error fetching: %s", err.Error()))
|
||||
} else {
|
||||
dataSync <- sync.DataSync{FlagData: msg, Source: hs.URI, Type: sync.ALL}
|
||||
}
|
||||
}
|
||||
|
||||
if previousBodySHA == "" {
|
||||
hs.Logger.Debug("configuration created")
|
||||
dataSync <- sync.DataSync{FlagData: body, Source: hs.URI}
|
||||
} else if previousBodySHA != hs.LastBodySHA {
|
||||
hs.Logger.Debug("configuration updated")
|
||||
dataSync <- sync.DataSync{FlagData: body, Source: hs.URI}
|
||||
hs.LastBodySHA = currentSHA
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
hs.Cron.Start()
|
||||
|
||||
dataSync <- sync.DataSync{FlagData: fetch, Source: hs.URI}
|
||||
dataSync <- sync.DataSync{FlagData: fetch, Source: hs.URI, Type: sync.ALL}
|
||||
|
||||
<-ctx.Done()
|
||||
hs.Cron.Stop()
|
||||
|
@ -106,18 +115,13 @@ func (hs *Sync) Sync(ctx context.Context, dataSync chan<- sync.DataSync) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (hs *Sync) fetchBody(ctx context.Context, fetchAll bool) (string, bool, error) {
|
||||
if hs.URI == "" {
|
||||
return "", false, errors.New("no HTTP URL string set")
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", hs.URI, bytes.NewBuffer(nil))
|
||||
func (hs *Sync) fetchBodyFromURL(ctx context.Context, url string) ([]byte, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", url, bytes.NewBuffer(nil))
|
||||
if err != nil {
|
||||
return "", false, fmt.Errorf("error creating request for url %s: %w", hs.URI, err)
|
||||
return nil, fmt.Errorf("error creating request for url %s: %w", url, err)
|
||||
}
|
||||
|
||||
req.Header.Add("Accept", "application/json")
|
||||
req.Header.Add("Accept", "application/yaml")
|
||||
|
||||
if hs.AuthHeader != "" {
|
||||
req.Header.Set("Authorization", hs.AuthHeader)
|
||||
|
@ -126,60 +130,23 @@ func (hs *Sync) fetchBody(ctx context.Context, fetchAll bool) (string, bool, err
|
|||
req.Header.Set("Authorization", bearer)
|
||||
}
|
||||
|
||||
if hs.eTag != "" && !fetchAll {
|
||||
req.Header.Set("If-None-Match", hs.eTag)
|
||||
}
|
||||
|
||||
resp, err := hs.Client.Do(req)
|
||||
if err != nil {
|
||||
return "", false, fmt.Errorf("error calling endpoint %s: %w", hs.URI, err)
|
||||
return nil, fmt.Errorf("error calling endpoint %s: %w", url, err)
|
||||
}
|
||||
defer func() {
|
||||
err = resp.Body.Close()
|
||||
if err != nil {
|
||||
hs.Logger.Error(fmt.Sprintf("error closing the response body: %s", err.Error()))
|
||||
hs.Logger.Debug(fmt.Sprintf("error closing the response body: %s", err.Error()))
|
||||
}
|
||||
}()
|
||||
|
||||
if resp.StatusCode == 304 {
|
||||
hs.Logger.Debug("no changes detected")
|
||||
return "", true, nil
|
||||
}
|
||||
|
||||
statusOK := resp.StatusCode >= 200 && resp.StatusCode < 300
|
||||
if !statusOK {
|
||||
return "", false, fmt.Errorf("error fetching from url %s: %s", hs.URI, resp.Status)
|
||||
}
|
||||
|
||||
if resp.Header.Get("ETag") != "" {
|
||||
hs.eTag = resp.Header.Get("ETag")
|
||||
}
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return "", false, fmt.Errorf("unable to read body to bytes: %w", err)
|
||||
return nil, fmt.Errorf("unable to read body to bytes: %w", err)
|
||||
}
|
||||
|
||||
json, err := utils.ConvertToJSON(body, getFileExtensions(hs.URI), resp.Header.Get("Content-Type"))
|
||||
if err != nil {
|
||||
return "", false, fmt.Errorf("error converting response body to json: %w", err)
|
||||
}
|
||||
|
||||
if json != "" {
|
||||
hs.LastBodySHA = hs.generateSha([]byte(body))
|
||||
}
|
||||
|
||||
return json, false, nil
|
||||
}
|
||||
|
||||
// getFileExtensions returns the file extension from the URL path
|
||||
func getFileExtensions(url string) string {
|
||||
u, err := parseUrl.Parse(url)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
return filepath.Ext(u.Path)
|
||||
return body, nil
|
||||
}
|
||||
|
||||
func (hs *Sync) generateSha(body []byte) string {
|
||||
|
@ -189,6 +156,17 @@ func (hs *Sync) generateSha(body []byte) string {
|
|||
}
|
||||
|
||||
func (hs *Sync) Fetch(ctx context.Context) (string, error) {
|
||||
body, _, err := hs.fetchBody(ctx, false)
|
||||
return body, err
|
||||
if hs.URI == "" {
|
||||
return "", errors.New("no HTTP URL string set")
|
||||
}
|
||||
|
||||
body, err := hs.fetchBodyFromURL(ctx, hs.URI)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if len(body) != 0 {
|
||||
hs.LastBodySHA = hs.generateSha(body)
|
||||
}
|
||||
|
||||
return string(body), nil
|
||||
}
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
@ -12,29 +13,24 @@ import (
|
|||
"github.com/open-feature/flagd/core/pkg/logger"
|
||||
"github.com/open-feature/flagd/core/pkg/sync"
|
||||
syncmock "github.com/open-feature/flagd/core/pkg/sync/http/mock"
|
||||
synctesting "github.com/open-feature/flagd/core/pkg/sync/testing"
|
||||
"go.uber.org/mock/gomock"
|
||||
)
|
||||
|
||||
func TestSimpleSync(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
mockCron := synctesting.NewMockCron(ctrl)
|
||||
mockCron.EXPECT().AddFunc(gomock.Any(), gomock.Any()).DoAndReturn(func(_ string, _ func()) error {
|
||||
resp := "test response"
|
||||
|
||||
mockCron := syncmock.NewMockCron(ctrl)
|
||||
mockCron.EXPECT().AddFunc(gomock.Any(), gomock.Any()).DoAndReturn(func(spec string, cmd func()) error {
|
||||
return nil
|
||||
})
|
||||
mockCron.EXPECT().Start().Times(1)
|
||||
|
||||
mockClient := syncmock.NewMockClient(ctrl)
|
||||
responseBody := "test response"
|
||||
resp := &http.Response{
|
||||
Header: map[string][]string{"Content-Type": {"application/json"}},
|
||||
Body: io.NopCloser(strings.NewReader(responseBody)),
|
||||
StatusCode: http.StatusOK,
|
||||
}
|
||||
mockClient.EXPECT().Do(gomock.Any()).Return(resp, nil)
|
||||
mockClient.EXPECT().Do(gomock.Any()).Return(&http.Response{Body: io.NopCloser(strings.NewReader(resp))}, nil)
|
||||
|
||||
httpSync := Sync{
|
||||
URI: "http://localhost/flags",
|
||||
URI: "http://localhost",
|
||||
Client: mockClient,
|
||||
Cron: mockCron,
|
||||
LastBodySHA: "",
|
||||
|
@ -54,51 +50,8 @@ func TestSimpleSync(t *testing.T) {
|
|||
|
||||
data := <-dataSyncChan
|
||||
|
||||
if data.FlagData != responseBody {
|
||||
t.Errorf("expected content: %s, but received content: %s", responseBody, data.FlagData)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtensionWithQSSync(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
mockCron := synctesting.NewMockCron(ctrl)
|
||||
mockCron.EXPECT().AddFunc(gomock.Any(), gomock.Any()).DoAndReturn(func(_ string, _ func()) error {
|
||||
return nil
|
||||
})
|
||||
mockCron.EXPECT().Start().Times(1)
|
||||
|
||||
mockClient := syncmock.NewMockClient(ctrl)
|
||||
responseBody := "test response"
|
||||
resp := &http.Response{
|
||||
Header: map[string][]string{"Content-Type": {"application/json"}},
|
||||
Body: io.NopCloser(strings.NewReader(responseBody)),
|
||||
StatusCode: http.StatusOK,
|
||||
}
|
||||
mockClient.EXPECT().Do(gomock.Any()).Return(resp, nil)
|
||||
|
||||
httpSync := Sync{
|
||||
URI: "http://localhost/flags.json?env=dev",
|
||||
Client: mockClient,
|
||||
Cron: mockCron,
|
||||
LastBodySHA: "",
|
||||
Logger: logger.NewLogger(nil, false),
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
dataSyncChan := make(chan sync.DataSync)
|
||||
|
||||
go func() {
|
||||
err := httpSync.Sync(ctx, dataSyncChan)
|
||||
if err != nil {
|
||||
log.Fatalf("Error start sync: %s", err.Error())
|
||||
return
|
||||
}
|
||||
}()
|
||||
|
||||
data := <-dataSyncChan
|
||||
|
||||
if data.FlagData != responseBody {
|
||||
t.Errorf("expected content: %s, but received content: %s", responseBody, data.FlagData)
|
||||
if data.FlagData != resp {
|
||||
t.Errorf("expected content: %s, but received content: %s", resp, data.FlagData)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -110,16 +63,13 @@ func TestHTTPSync_Fetch(t *testing.T) {
|
|||
uri string
|
||||
bearerToken string
|
||||
authHeader string
|
||||
eTagHeader string
|
||||
lastBodySHA string
|
||||
handleResponse func(*testing.T, Sync, string, error)
|
||||
}{
|
||||
"success": {
|
||||
setup: func(_ *testing.T, client *syncmock.MockClient) {
|
||||
setup: func(t *testing.T, client *syncmock.MockClient) {
|
||||
client.EXPECT().Do(gomock.Any()).Return(&http.Response{
|
||||
Header: map[string][]string{"Content-Type": {"application/json"}},
|
||||
Body: io.NopCloser(strings.NewReader("test response")),
|
||||
StatusCode: http.StatusOK,
|
||||
Body: io.NopCloser(strings.NewReader("test response")),
|
||||
}, nil)
|
||||
},
|
||||
uri: "http://localhost",
|
||||
|
@ -134,19 +84,17 @@ func TestHTTPSync_Fetch(t *testing.T) {
|
|||
},
|
||||
},
|
||||
"return an error if no uri": {
|
||||
setup: func(_ *testing.T, _ *syncmock.MockClient) {},
|
||||
handleResponse: func(t *testing.T, _ Sync, _ string, err error) {
|
||||
setup: func(t *testing.T, client *syncmock.MockClient) {},
|
||||
handleResponse: func(t *testing.T, _ Sync, fetched string, err error) {
|
||||
if err == nil {
|
||||
t.Error("expected err, got nil")
|
||||
}
|
||||
},
|
||||
},
|
||||
"update last body sha": {
|
||||
setup: func(_ *testing.T, client *syncmock.MockClient) {
|
||||
setup: func(t *testing.T, client *syncmock.MockClient) {
|
||||
client.EXPECT().Do(gomock.Any()).Return(&http.Response{
|
||||
Header: map[string][]string{"Content-Type": {"application/json"}},
|
||||
Body: io.NopCloser(strings.NewReader("test response")),
|
||||
StatusCode: http.StatusOK,
|
||||
Body: io.NopCloser(strings.NewReader("test response")),
|
||||
}, nil)
|
||||
},
|
||||
uri: "http://localhost",
|
||||
|
@ -172,11 +120,7 @@ func TestHTTPSync_Fetch(t *testing.T) {
|
|||
if actualAuthHeader != "Bearer "+expectedToken {
|
||||
t.Fatalf("expected Authorization header to be 'Bearer %s', got %s", expectedToken, actualAuthHeader)
|
||||
}
|
||||
return &http.Response{
|
||||
Header: map[string][]string{"Content-Type": {"application/json"}},
|
||||
Body: io.NopCloser(strings.NewReader("test response")),
|
||||
StatusCode: http.StatusOK,
|
||||
}, nil
|
||||
return &http.Response{Body: io.NopCloser(strings.NewReader("test response"))}, nil
|
||||
})
|
||||
},
|
||||
uri: "http://localhost",
|
||||
|
@ -203,11 +147,7 @@ func TestHTTPSync_Fetch(t *testing.T) {
|
|||
if actualAuthHeader != expectedHeader {
|
||||
t.Fatalf("expected Authorization header to be '%s', got %s", expectedHeader, actualAuthHeader)
|
||||
}
|
||||
return &http.Response{
|
||||
Header: map[string][]string{"Content-Type": {"application/json"}},
|
||||
Body: io.NopCloser(strings.NewReader("test response")),
|
||||
StatusCode: http.StatusOK,
|
||||
}, nil
|
||||
return &http.Response{Body: io.NopCloser(strings.NewReader("test response"))}, nil
|
||||
})
|
||||
},
|
||||
uri: "http://localhost",
|
||||
|
@ -226,100 +166,6 @@ func TestHTTPSync_Fetch(t *testing.T) {
|
|||
}
|
||||
},
|
||||
},
|
||||
"unauthorized request": {
|
||||
setup: func(_ *testing.T, client *syncmock.MockClient) {
|
||||
client.EXPECT().Do(gomock.Any()).Return(&http.Response{
|
||||
Header: map[string][]string{"Content-Type": {"application/json"}},
|
||||
Body: io.NopCloser(strings.NewReader("test response")),
|
||||
StatusCode: http.StatusUnauthorized,
|
||||
}, nil)
|
||||
},
|
||||
uri: "http://localhost",
|
||||
handleResponse: func(t *testing.T, _ Sync, _ string, err error) {
|
||||
if err == nil {
|
||||
t.Fatalf("expected unauthorized request to return an error")
|
||||
}
|
||||
},
|
||||
},
|
||||
"not modified response etag matched": {
|
||||
setup: func(t *testing.T, client *syncmock.MockClient) {
|
||||
expectedIfNoneMatch := `"1af17a664e3fa8e419b8ba05c2a173169df76162a5a286e0c405b460d478f7ef"`
|
||||
client.EXPECT().Do(gomock.Any()).DoAndReturn(func(req *http.Request) (*http.Response, error) {
|
||||
actualIfNoneMatch := req.Header.Get("If-None-Match")
|
||||
if actualIfNoneMatch != expectedIfNoneMatch {
|
||||
t.Fatalf("expected If-None-Match header to be '%s', got %s", expectedIfNoneMatch, actualIfNoneMatch)
|
||||
}
|
||||
return &http.Response{
|
||||
Header: map[string][]string{"ETag": {expectedIfNoneMatch}},
|
||||
Body: io.NopCloser(strings.NewReader("")),
|
||||
StatusCode: http.StatusNotModified,
|
||||
}, nil
|
||||
})
|
||||
},
|
||||
uri: "http://localhost",
|
||||
eTagHeader: `"1af17a664e3fa8e419b8ba05c2a173169df76162a5a286e0c405b460d478f7ef"`,
|
||||
handleResponse: func(t *testing.T, httpSync Sync, _ string, err error) {
|
||||
if err != nil {
|
||||
t.Fatalf("fetch: %v", err)
|
||||
}
|
||||
|
||||
expectedLastBodySHA := ""
|
||||
expectedETag := `"1af17a664e3fa8e419b8ba05c2a173169df76162a5a286e0c405b460d478f7ef"`
|
||||
if httpSync.LastBodySHA != expectedLastBodySHA {
|
||||
t.Errorf(
|
||||
"expected last body sha to be: '%s', got: '%s'", expectedLastBodySHA, httpSync.LastBodySHA,
|
||||
)
|
||||
}
|
||||
if httpSync.eTag != expectedETag {
|
||||
t.Errorf(
|
||||
"expected last etag to be: '%s', got: '%s'", expectedETag, httpSync.eTag,
|
||||
)
|
||||
}
|
||||
},
|
||||
},
|
||||
"modified response etag mismatched": {
|
||||
setup: func(t *testing.T, client *syncmock.MockClient) {
|
||||
expectedIfNoneMatch := `"1af17a664e3fa8e419b8ba05c2a173169df76162a5a286e0c405b460d478f7ef"`
|
||||
client.EXPECT().Do(gomock.Any()).DoAndReturn(func(req *http.Request) (*http.Response, error) {
|
||||
actualIfNoneMatch := req.Header.Get("If-None-Match")
|
||||
if actualIfNoneMatch != expectedIfNoneMatch {
|
||||
t.Fatalf("expected If-None-Match header to be '%s', got %s", expectedIfNoneMatch, actualIfNoneMatch)
|
||||
}
|
||||
|
||||
newContent := "\"Hey there!\""
|
||||
newETag := `"c2e01ce63d90109c4c7f4f6dcea97ed1bb2b51e3647f36caf5acbe27413a24bb"`
|
||||
|
||||
return &http.Response{
|
||||
Header: map[string][]string{
|
||||
"Content-Type": {"application/json"},
|
||||
"Etag": {newETag},
|
||||
},
|
||||
Body: io.NopCloser(strings.NewReader(newContent)),
|
||||
StatusCode: http.StatusOK,
|
||||
}, nil
|
||||
})
|
||||
},
|
||||
uri: "http://localhost",
|
||||
eTagHeader: `"1af17a664e3fa8e419b8ba05c2a173169df76162a5a286e0c405b460d478f7ef"`,
|
||||
handleResponse: func(t *testing.T, httpSync Sync, _ string, err error) {
|
||||
if err != nil {
|
||||
t.Fatalf("fetch: %v", err)
|
||||
}
|
||||
|
||||
expectedLastBodySHA := "wuAc5j2QEJxMf09tzql-0bsrUeNkfzbK9ay-J0E6JLs="
|
||||
expectedETag := `"c2e01ce63d90109c4c7f4f6dcea97ed1bb2b51e3647f36caf5acbe27413a24bb"`
|
||||
if httpSync.LastBodySHA != expectedLastBodySHA {
|
||||
t.Errorf(
|
||||
"expected last body sha to be: '%s', got: '%s'", expectedLastBodySHA, httpSync.LastBodySHA,
|
||||
)
|
||||
}
|
||||
if httpSync.eTag != expectedETag {
|
||||
t.Errorf(
|
||||
"expected last etag to be: '%s', got: '%s'", expectedETag, httpSync.eTag,
|
||||
)
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for name, tt := range tests {
|
||||
|
@ -335,7 +181,6 @@ func TestHTTPSync_Fetch(t *testing.T) {
|
|||
AuthHeader: tt.authHeader,
|
||||
LastBodySHA: tt.lastBodySHA,
|
||||
Logger: logger.NewLogger(nil, false),
|
||||
eTag: tt.eTagHeader,
|
||||
}
|
||||
|
||||
fetched, err := httpSync.Fetch(context.Background())
|
||||
|
@ -369,8 +214,6 @@ func TestSync_Init(t *testing.T) {
|
|||
|
||||
func TestHTTPSync_Resync(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
source := "http://localhost"
|
||||
emptyFlagData := "{}"
|
||||
|
||||
tests := map[string]struct {
|
||||
setup func(t *testing.T, client *syncmock.MockClient)
|
||||
|
@ -382,14 +225,12 @@ func TestHTTPSync_Resync(t *testing.T) {
|
|||
wantNotifications []sync.DataSync
|
||||
}{
|
||||
"success": {
|
||||
setup: func(_ *testing.T, client *syncmock.MockClient) {
|
||||
setup: func(t *testing.T, client *syncmock.MockClient) {
|
||||
client.EXPECT().Do(gomock.Any()).Return(&http.Response{
|
||||
Header: map[string][]string{"Content-Type": {"application/json"}},
|
||||
Body: io.NopCloser(strings.NewReader(emptyFlagData)),
|
||||
StatusCode: http.StatusOK,
|
||||
Body: io.NopCloser(strings.NewReader("test response")),
|
||||
}, nil)
|
||||
},
|
||||
uri: source,
|
||||
uri: "http://localhost",
|
||||
handleResponse: func(t *testing.T, _ Sync, fetched string, err error) {
|
||||
if err != nil {
|
||||
t.Fatalf("fetch: %v", err)
|
||||
|
@ -402,14 +243,15 @@ func TestHTTPSync_Resync(t *testing.T) {
|
|||
wantErr: false,
|
||||
wantNotifications: []sync.DataSync{
|
||||
{
|
||||
FlagData: emptyFlagData,
|
||||
Source: source,
|
||||
Type: sync.ALL,
|
||||
FlagData: "",
|
||||
Source: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
"error response": {
|
||||
setup: func(_ *testing.T, _ *syncmock.MockClient) {},
|
||||
handleResponse: func(t *testing.T, _ Sync, _ string, err error) {
|
||||
setup: func(t *testing.T, client *syncmock.MockClient) {},
|
||||
handleResponse: func(t *testing.T, _ Sync, fetched string, err error) {
|
||||
if err == nil {
|
||||
t.Error("expected err, got nil")
|
||||
}
|
||||
|
@ -445,8 +287,8 @@ func TestHTTPSync_Resync(t *testing.T) {
|
|||
for _, dataSync := range tt.wantNotifications {
|
||||
select {
|
||||
case x := <-d:
|
||||
if x.FlagData != dataSync.FlagData || x.Source != dataSync.Source {
|
||||
t.Errorf("unexpected datasync received %v vs %v", x, dataSync)
|
||||
if !reflect.DeepEqual(x.String(), dataSync.String()) {
|
||||
t.Error("unexpected datasync received", x, dataSync)
|
||||
}
|
||||
case <-time.After(2 * time.Second):
|
||||
t.Error("expected datasync not received", dataSync)
|
||||
|
|
|
@ -53,3 +53,64 @@ func (mr *MockClientMockRecorder) Do(req any) *gomock.Call {
|
|||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Do", reflect.TypeOf((*MockClient)(nil).Do), req)
|
||||
}
|
||||
|
||||
// MockCron is a mock of Cron interface.
|
||||
type MockCron struct {
|
||||
ctrl *gomock.Controller
|
||||
recorder *MockCronMockRecorder
|
||||
}
|
||||
|
||||
// MockCronMockRecorder is the mock recorder for MockCron.
|
||||
type MockCronMockRecorder struct {
|
||||
mock *MockCron
|
||||
}
|
||||
|
||||
// NewMockCron creates a new mock instance.
|
||||
func NewMockCron(ctrl *gomock.Controller) *MockCron {
|
||||
mock := &MockCron{ctrl: ctrl}
|
||||
mock.recorder = &MockCronMockRecorder{mock}
|
||||
return mock
|
||||
}
|
||||
|
||||
// EXPECT returns an object that allows the caller to indicate expected use.
|
||||
func (m *MockCron) EXPECT() *MockCronMockRecorder {
|
||||
return m.recorder
|
||||
}
|
||||
|
||||
// AddFunc mocks base method.
|
||||
func (m *MockCron) AddFunc(spec string, cmd func()) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "AddFunc", spec, cmd)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// AddFunc indicates an expected call of AddFunc.
|
||||
func (mr *MockCronMockRecorder) AddFunc(spec, cmd any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddFunc", reflect.TypeOf((*MockCron)(nil).AddFunc), spec, cmd)
|
||||
}
|
||||
|
||||
// Start mocks base method.
|
||||
func (m *MockCron) Start() {
|
||||
m.ctrl.T.Helper()
|
||||
m.ctrl.Call(m, "Start")
|
||||
}
|
||||
|
||||
// Start indicates an expected call of Start.
|
||||
func (mr *MockCronMockRecorder) Start() *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Start", reflect.TypeOf((*MockCron)(nil).Start))
|
||||
}
|
||||
|
||||
// Stop mocks base method.
|
||||
func (m *MockCron) Stop() {
|
||||
m.ctrl.T.Helper()
|
||||
m.ctrl.Call(m, "Stop")
|
||||
}
|
||||
|
||||
// Stop indicates an expected call of Stop.
|
||||
func (mr *MockCronMockRecorder) Stop() *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stop", reflect.TypeOf((*MockCron)(nil).Stop))
|
||||
}
|
||||
|
|
|
@ -2,10 +2,37 @@ package sync
|
|||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"google.golang.org/protobuf/types/known/structpb"
|
||||
)
|
||||
|
||||
type Type int
|
||||
|
||||
// Type of the sync operation
|
||||
const (
|
||||
// ALL - All flags of sync provider. This is the default if unset due to primitive default
|
||||
ALL Type = iota
|
||||
// ADD - Additional flags from sync provider
|
||||
ADD
|
||||
// UPDATE - Update for flag(s) previously provided
|
||||
UPDATE
|
||||
// DELETE - Delete for flag(s) previously provided
|
||||
DELETE
|
||||
)
|
||||
|
||||
func (t Type) String() string {
|
||||
switch t {
|
||||
case ALL:
|
||||
return "ALL"
|
||||
case ADD:
|
||||
return "ADD"
|
||||
case UPDATE:
|
||||
return "UPDATE"
|
||||
case DELETE:
|
||||
return "DELETE"
|
||||
default:
|
||||
return "UNKNOWN"
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
ISync implementations watch for changes in the flag sources (HTTP backend, local file, K8s CRDs ...),fetch the latest
|
||||
value and communicate to the Runtime with DataSync channel
|
||||
|
@ -28,10 +55,10 @@ type ISync interface {
|
|||
|
||||
// DataSync is the data contract between Runtime and sync implementations
|
||||
type DataSync struct {
|
||||
FlagData string
|
||||
SyncContext *structpb.Struct
|
||||
Source string
|
||||
Selector string
|
||||
FlagData string
|
||||
Source string
|
||||
Selector string
|
||||
Type
|
||||
}
|
||||
|
||||
// SourceConfig is configuration option for flagd. This maps to startup parameter sources
|
||||
|
@ -46,5 +73,4 @@ type SourceConfig struct {
|
|||
ProviderID string `json:"providerID,omitempty"`
|
||||
Selector string `json:"selector,omitempty"`
|
||||
Interval uint32 `json:"interval,omitempty"`
|
||||
MaxMsgSize int `json:"maxMsgSize,omitempty"`
|
||||
}
|
||||
|
|
|
@ -57,7 +57,7 @@ func (k *Sync) ReSync(ctx context.Context, dataSync chan<- sync.DataSync) error
|
|||
if err != nil {
|
||||
return fmt.Errorf("unable to fetch flag configuration: %w", err)
|
||||
}
|
||||
dataSync <- sync.DataSync{FlagData: fetch, Source: k.URI}
|
||||
dataSync <- sync.DataSync{FlagData: fetch, Source: k.URI, Type: sync.ALL}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -97,7 +97,7 @@ func (k *Sync) Sync(ctx context.Context, dataSync chan<- sync.DataSync) error {
|
|||
return err
|
||||
}
|
||||
|
||||
dataSync <- sync.DataSync{FlagData: fetch, Source: k.URI}
|
||||
dataSync <- sync.DataSync{FlagData: fetch, Source: k.URI, Type: sync.ALL}
|
||||
|
||||
notifies := make(chan INotify)
|
||||
|
||||
|
@ -136,7 +136,7 @@ func (k *Sync) watcher(ctx context.Context, notifies chan INotify, dataSync chan
|
|||
continue
|
||||
}
|
||||
|
||||
dataSync <- sync.DataSync{FlagData: msg, Source: k.URI}
|
||||
dataSync <- sync.DataSync{FlagData: msg, Source: k.URI, Type: sync.ALL}
|
||||
case DefaultEventTypeModify:
|
||||
k.logger.Debug("Configuration modified")
|
||||
msg, err := k.fetch(ctx)
|
||||
|
@ -145,7 +145,7 @@ func (k *Sync) watcher(ctx context.Context, notifies chan INotify, dataSync chan
|
|||
continue
|
||||
}
|
||||
|
||||
dataSync <- sync.DataSync{FlagData: msg, Source: k.URI}
|
||||
dataSync <- sync.DataSync{FlagData: msg, Source: k.URI, Type: sync.ALL}
|
||||
case DefaultEventTypeDelete:
|
||||
k.logger.Debug("configuration deleted")
|
||||
case DefaultEventTypeReady:
|
||||
|
|
|
@ -607,7 +607,6 @@ func TestInit(t *testing.T) {
|
|||
func TestSync_ReSync(t *testing.T) {
|
||||
const name = "myFF"
|
||||
const ns = "myNS"
|
||||
const payload = "{\"flags\":null}"
|
||||
s := runtime.NewScheme()
|
||||
ff := &unstructured.Unstructured{}
|
||||
ff.SetUnstructuredContent(getCFG(name, ns))
|
||||
|
@ -669,8 +668,8 @@ func TestSync_ReSync(t *testing.T) {
|
|||
i := tt.countMsg
|
||||
for i > 0 {
|
||||
d := <-dataChannel
|
||||
if d.FlagData != payload {
|
||||
t.Errorf("Expected %v, got %v", payload, d.FlagData)
|
||||
if d.Type != sync.ALL {
|
||||
t.Errorf("Expected %v, got %v", sync.ALL, d)
|
||||
}
|
||||
i--
|
||||
}
|
||||
|
|
|
@ -1,74 +0,0 @@
|
|||
package testing
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
"go.uber.org/mock/gomock"
|
||||
)
|
||||
|
||||
// MockCron is a mock of Cron interface.
|
||||
type MockCron struct {
|
||||
ctrl *gomock.Controller
|
||||
recorder *MockCronMockRecorder
|
||||
cmd func()
|
||||
}
|
||||
|
||||
// MockCronMockRecorder is the mock recorder for MockCron.
|
||||
type MockCronMockRecorder struct {
|
||||
mock *MockCron
|
||||
}
|
||||
|
||||
// NewMockCron creates a new mock instance.
|
||||
func NewMockCron(ctrl *gomock.Controller) *MockCron {
|
||||
mock := &MockCron{ctrl: ctrl}
|
||||
mock.recorder = &MockCronMockRecorder{mock}
|
||||
return mock
|
||||
}
|
||||
|
||||
// EXPECT returns an object that allows the caller to indicate expected use.
|
||||
func (m *MockCron) EXPECT() *MockCronMockRecorder {
|
||||
return m.recorder
|
||||
}
|
||||
|
||||
// AddFunc mocks base method.
|
||||
func (m *MockCron) AddFunc(spec string, cmd func()) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "AddFunc", spec, cmd)
|
||||
ret0, _ := ret[0].(error)
|
||||
m.cmd = cmd
|
||||
return ret0
|
||||
}
|
||||
|
||||
func (m *MockCron) Tick() {
|
||||
m.cmd()
|
||||
}
|
||||
|
||||
// AddFunc indicates an expected call of AddFunc.
|
||||
func (mr *MockCronMockRecorder) AddFunc(spec, cmd any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddFunc", reflect.TypeOf((*MockCron)(nil).AddFunc), spec, cmd)
|
||||
}
|
||||
|
||||
// Start mocks base method.
|
||||
func (m *MockCron) Start() {
|
||||
m.ctrl.T.Helper()
|
||||
m.ctrl.Call(m, "Start")
|
||||
}
|
||||
|
||||
// Start indicates an expected call of Start.
|
||||
func (mr *MockCronMockRecorder) Start() *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Start", reflect.TypeOf((*MockCron)(nil).Start))
|
||||
}
|
||||
|
||||
// Stop mocks base method.
|
||||
func (m *MockCron) Stop() {
|
||||
m.ctrl.T.Helper()
|
||||
m.ctrl.Call(m, "Stop")
|
||||
}
|
||||
|
||||
// Stop indicates an expected call of Stop.
|
||||
func (mr *MockCronMockRecorder) Stop() *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stop", reflect.TypeOf((*MockCron)(nil).Stop))
|
||||
}
|
|
@ -2,15 +2,11 @@ package telemetry
|
|||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"connectrpc.com/connect"
|
||||
"connectrpc.com/otelconnect"
|
||||
"github.com/open-feature/flagd/core/pkg/certreloader"
|
||||
"github.com/open-feature/flagd/core/pkg/logger"
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc"
|
||||
|
@ -21,29 +17,21 @@ import (
|
|||
"go.opentelemetry.io/otel/sdk/metric"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
"go.opentelemetry.io/otel/sdk/trace"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.34.0"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.18.0"
|
||||
"go.uber.org/zap"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
)
|
||||
|
||||
const (
|
||||
metricsExporterOtel = "otel"
|
||||
exportInterval = 2 * time.Second
|
||||
)
|
||||
|
||||
type CollectorConfig struct {
|
||||
Target string
|
||||
CertPath string
|
||||
KeyPath string
|
||||
ReloadInterval time.Duration
|
||||
CAPath string
|
||||
}
|
||||
|
||||
// Config of the telemetry runtime. These are expected to be mapped to start-up arguments
|
||||
type Config struct {
|
||||
MetricsExporter string
|
||||
CollectorConfig CollectorConfig
|
||||
CollectorTarget string
|
||||
}
|
||||
|
||||
func RegisterErrorHandling(log *logger.Logger) {
|
||||
|
@ -76,13 +64,13 @@ func BuildMetricsRecorder(
|
|||
// provide the grpc collector target. Providing empty target results in skipping provider & propagator registration.
|
||||
// This results in tracers having NoopTracerProvider and propagator having No-Op TextMapPropagator performing no action
|
||||
func BuildTraceProvider(ctx context.Context, logger *logger.Logger, svc string, svcVersion string, cfg Config) error {
|
||||
if cfg.CollectorConfig.Target == "" {
|
||||
if cfg.CollectorTarget == "" {
|
||||
logger.Debug("skipping trace provider setup as collector target is not set." +
|
||||
" Traces will use NoopTracerProvider provider and propagator will use no-Op TextMapPropagator")
|
||||
return nil
|
||||
}
|
||||
|
||||
exporter, err := buildOtlpExporter(ctx, cfg.CollectorConfig)
|
||||
exporter, err := buildOtlpExporter(ctx, cfg.CollectorTarget)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -107,7 +95,7 @@ func BuildConnectOptions(cfg Config) ([]connect.HandlerOption, error) {
|
|||
options := []connect.HandlerOption{}
|
||||
|
||||
// add interceptor if configuration is available for collector
|
||||
if cfg.CollectorConfig.Target != "" {
|
||||
if cfg.CollectorTarget != "" {
|
||||
interceptor, err := otelconnect.NewInterceptor(otelconnect.WithTrustRemote())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating interceptor, %w", err)
|
||||
|
@ -119,47 +107,6 @@ func BuildConnectOptions(cfg Config) ([]connect.HandlerOption, error) {
|
|||
return options, nil
|
||||
}
|
||||
|
||||
func buildTransportCredentials(_ context.Context, cfg CollectorConfig) (credentials.TransportCredentials, error) {
|
||||
creds := insecure.NewCredentials()
|
||||
if cfg.KeyPath != "" || cfg.CertPath != "" || cfg.CAPath != "" {
|
||||
capool := x509.NewCertPool()
|
||||
if cfg.CAPath != "" {
|
||||
ca, err := os.ReadFile(cfg.CAPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't read ca file from %s", cfg.CAPath)
|
||||
}
|
||||
if !capool.AppendCertsFromPEM(ca) {
|
||||
return nil, fmt.Errorf("can't add CA '%s' to pool", cfg.CAPath)
|
||||
}
|
||||
}
|
||||
|
||||
reloader, err := certreloader.NewCertReloader(certreloader.Config{
|
||||
KeyPath: cfg.KeyPath,
|
||||
CertPath: cfg.CertPath,
|
||||
ReloadInterval: cfg.ReloadInterval,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create certreloader: %w", err)
|
||||
}
|
||||
|
||||
tlsConfig := &tls.Config{
|
||||
RootCAs: capool,
|
||||
MinVersion: tls.VersionTLS12,
|
||||
GetCertificate: func(chi *tls.ClientHelloInfo) (*tls.Certificate, error) {
|
||||
certs, err := reloader.GetCertificate()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to reload certs: %w", err)
|
||||
}
|
||||
return certs, nil
|
||||
},
|
||||
}
|
||||
|
||||
creds = credentials.NewTLS(tlsConfig)
|
||||
}
|
||||
|
||||
return creds, nil
|
||||
}
|
||||
|
||||
// buildMetricReader builds a metric reader based on provided configurations
|
||||
func buildMetricReader(ctx context.Context, cfg Config) (metric.Reader, error) {
|
||||
if cfg.MetricsExporter == "" {
|
||||
|
@ -173,18 +120,13 @@ func buildMetricReader(ctx context.Context, cfg Config) (metric.Reader, error) {
|
|||
}
|
||||
|
||||
// Otel override require target configuration
|
||||
if cfg.CollectorConfig.Target == "" {
|
||||
if cfg.CollectorTarget == "" {
|
||||
return nil, fmt.Errorf("metric exporter is set(%s) without providing otel collector target."+
|
||||
" collector target is required for this option", cfg.MetricsExporter)
|
||||
}
|
||||
|
||||
transportCredentials, err := buildTransportCredentials(ctx, cfg.CollectorConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("metric export would not build transport credentials: %w", err)
|
||||
}
|
||||
|
||||
// Non-blocking, insecure grpc connection
|
||||
conn, err := grpc.NewClient(cfg.CollectorConfig.Target, grpc.WithTransportCredentials(transportCredentials))
|
||||
conn, err := grpc.NewClient(cfg.CollectorTarget, grpc.WithTransportCredentials(insecure.NewCredentials()))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating client connection: %w", err)
|
||||
}
|
||||
|
@ -195,18 +137,13 @@ func buildMetricReader(ctx context.Context, cfg Config) (metric.Reader, error) {
|
|||
return nil, fmt.Errorf("error creating otel metric exporter: %w", err)
|
||||
}
|
||||
|
||||
return metric.NewPeriodicReader(otelExporter), nil
|
||||
return metric.NewPeriodicReader(otelExporter, metric.WithInterval(exportInterval)), nil
|
||||
}
|
||||
|
||||
// buildOtlpExporter is a helper to build grpc backed otlp trace exporter
|
||||
func buildOtlpExporter(ctx context.Context, cfg CollectorConfig) (*otlptrace.Exporter, error) {
|
||||
transportCredentials, err := buildTransportCredentials(ctx, cfg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("metric export would not build transport credentials: %w", err)
|
||||
}
|
||||
|
||||
// Non-blocking, grpc connection
|
||||
conn, err := grpc.NewClient(cfg.Target, grpc.WithTransportCredentials(transportCredentials))
|
||||
func buildOtlpExporter(ctx context.Context, collectorTarget string) (*otlptrace.Exporter, error) {
|
||||
// Non-blocking, insecure grpc connection
|
||||
conn, err := grpc.NewClient(collectorTarget, grpc.WithTransportCredentials(insecure.NewCredentials()))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating client connection: %w", err)
|
||||
}
|
||||
|
|
|
@ -12,7 +12,7 @@ import (
|
|||
"go.opentelemetry.io/otel/sdk/metric"
|
||||
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.34.0"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.18.0"
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zaptest/observer"
|
||||
)
|
||||
|
@ -21,9 +21,7 @@ func TestBuildMetricsRecorder(t *testing.T) {
|
|||
// Simple happy-path test
|
||||
recorder, err := BuildMetricsRecorder(context.Background(), "service", "0.0.1", Config{
|
||||
MetricsExporter: "otel",
|
||||
CollectorConfig: CollectorConfig{
|
||||
Target: "localhost:8080",
|
||||
},
|
||||
CollectorTarget: "localhost:8080",
|
||||
})
|
||||
|
||||
require.Nil(t, err, "expected no error, but got: %v", err)
|
||||
|
@ -54,9 +52,7 @@ func TestBuildMetricReader(t *testing.T) {
|
|||
name: "Metric exporter overriding require valid configuration combination",
|
||||
cfg: Config{
|
||||
MetricsExporter: metricsExporterOtel,
|
||||
CollectorConfig: CollectorConfig{
|
||||
Target: "", // collector target is unset
|
||||
},
|
||||
CollectorTarget: "", // collector target is unset
|
||||
},
|
||||
error: true,
|
||||
},
|
||||
|
@ -64,9 +60,7 @@ func TestBuildMetricReader(t *testing.T) {
|
|||
name: "Metric exporter overriding with valid configurations",
|
||||
cfg: Config{
|
||||
MetricsExporter: metricsExporterOtel,
|
||||
CollectorConfig: CollectorConfig{
|
||||
Target: "localhost:8080",
|
||||
},
|
||||
CollectorTarget: "localhost:8080",
|
||||
},
|
||||
error: false,
|
||||
},
|
||||
|
@ -96,9 +90,7 @@ func TestBuildSpanProcessor(t *testing.T) {
|
|||
{
|
||||
name: "Valid configurations yield a valid processor",
|
||||
cfg: Config{
|
||||
CollectorConfig: CollectorConfig{
|
||||
Target: "localhost:8080",
|
||||
},
|
||||
CollectorTarget: "localhost:8080",
|
||||
},
|
||||
error: false,
|
||||
},
|
||||
|
@ -135,9 +127,7 @@ func TestBuildConnectOptions(t *testing.T) {
|
|||
{
|
||||
name: "Connect option is set when telemetry target is set",
|
||||
cfg: Config{
|
||||
CollectorConfig: CollectorConfig{
|
||||
Target: "localhost:8080",
|
||||
},
|
||||
CollectorTarget: "localhost:8080",
|
||||
},
|
||||
optionCount: 1,
|
||||
},
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
"go.opentelemetry.io/otel/sdk/instrumentation"
|
||||
msdk "go.opentelemetry.io/otel/sdk/metric"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.34.0"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.18.0"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -19,15 +19,15 @@ const (
|
|||
FeatureFlagReasonKey = attribute.Key("feature_flag.reason")
|
||||
ExceptionTypeKey = attribute.Key("ExceptionTypeKeyName")
|
||||
|
||||
httpRequestDurationMetric = "http.server.request.duration"
|
||||
httpResponseSizeMetric = "http.server.response.body.size"
|
||||
httpRequestDurationMetric = "http.server.duration"
|
||||
httpResponseSizeMetric = "http.server.response.size"
|
||||
httpActiveRequestsMetric = "http.server.active_requests"
|
||||
impressionMetric = "feature_flag." + ProviderName + ".impression"
|
||||
reasonMetric = "feature_flag." + ProviderName + ".result.reason"
|
||||
reasonMetric = "feature_flag." + ProviderName + ".evaluation.reason"
|
||||
)
|
||||
|
||||
type IMetricsRecorder interface {
|
||||
HTTPAttributes(svcName, url, method, code, scheme string) []attribute.KeyValue
|
||||
HTTPAttributes(svcName, url, method, code string) []attribute.KeyValue
|
||||
HTTPRequestDuration(ctx context.Context, duration time.Duration, attrs []attribute.KeyValue)
|
||||
HTTPResponseSize(ctx context.Context, sizeBytes int64, attrs []attribute.KeyValue)
|
||||
InFlightRequestStart(ctx context.Context, attrs []attribute.KeyValue)
|
||||
|
@ -38,7 +38,7 @@ type IMetricsRecorder interface {
|
|||
|
||||
type NoopMetricsRecorder struct{}
|
||||
|
||||
func (NoopMetricsRecorder) HTTPAttributes(_, _, _, _, _ string) []attribute.KeyValue {
|
||||
func (NoopMetricsRecorder) HTTPAttributes(_, _, _, _ string) []attribute.KeyValue {
|
||||
return []attribute.KeyValue{}
|
||||
}
|
||||
|
||||
|
@ -68,13 +68,12 @@ type MetricsRecorder struct {
|
|||
reasons metric.Int64Counter
|
||||
}
|
||||
|
||||
func (r MetricsRecorder) HTTPAttributes(svcName, url, method, code, scheme string) []attribute.KeyValue {
|
||||
func (r MetricsRecorder) HTTPAttributes(svcName, url, method, code string) []attribute.KeyValue {
|
||||
return []attribute.KeyValue{
|
||||
semconv.ServiceNameKey.String(svcName),
|
||||
semconv.HTTPRouteKey.String(url),
|
||||
semconv.HTTPRequestMethodKey.String(method),
|
||||
semconv.HTTPResponseStatusCodeKey.String(code),
|
||||
semconv.URLSchemeKey.String(scheme),
|
||||
semconv.HTTPURLKey.String(url),
|
||||
semconv.HTTPMethodKey.String(method),
|
||||
semconv.HTTPStatusCodeKey.String(code),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
"go.opentelemetry.io/otel/sdk/metric"
|
||||
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.34.0"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.13.0"
|
||||
)
|
||||
|
||||
const svcName = "mySvc"
|
||||
|
@ -38,10 +38,9 @@ func TestHTTPAttributes(t *testing.T) {
|
|||
},
|
||||
want: []attribute.KeyValue{
|
||||
semconv.ServiceNameKey.String(""),
|
||||
semconv.HTTPRouteKey.String(""),
|
||||
semconv.HTTPRequestMethodKey.String(""),
|
||||
semconv.HTTPResponseStatusCodeKey.String(""),
|
||||
semconv.URLSchemeKey.String("http"),
|
||||
semconv.HTTPURLKey.String(""),
|
||||
semconv.HTTPMethodKey.String(""),
|
||||
semconv.HTTPStatusCodeKey.String(""),
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -54,10 +53,9 @@ func TestHTTPAttributes(t *testing.T) {
|
|||
},
|
||||
want: []attribute.KeyValue{
|
||||
semconv.ServiceNameKey.String("myService"),
|
||||
semconv.HTTPRouteKey.String("#123"),
|
||||
semconv.HTTPRequestMethodKey.String("POST"),
|
||||
semconv.HTTPResponseStatusCodeKey.String("300"),
|
||||
semconv.URLSchemeKey.String("http"),
|
||||
semconv.HTTPURLKey.String("#123"),
|
||||
semconv.HTTPMethodKey.String("POST"),
|
||||
semconv.HTTPStatusCodeKey.String("300"),
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -70,17 +68,16 @@ func TestHTTPAttributes(t *testing.T) {
|
|||
},
|
||||
want: []attribute.KeyValue{
|
||||
semconv.ServiceNameKey.String("!@#$%^&*()_+|}{[];',./<>"),
|
||||
semconv.HTTPRouteKey.String(""),
|
||||
semconv.HTTPRequestMethodKey.String(""),
|
||||
semconv.HTTPResponseStatusCodeKey.String(""),
|
||||
semconv.URLSchemeKey.String("http"),
|
||||
semconv.HTTPURLKey.String(""),
|
||||
semconv.HTTPMethodKey.String(""),
|
||||
semconv.HTTPStatusCodeKey.String(""),
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
rec := MetricsRecorder{}
|
||||
res := rec.HTTPAttributes(tt.req.Service, tt.req.ID, tt.req.Method, tt.req.Code, "http")
|
||||
res := rec.HTTPAttributes(tt.req.Service, tt.req.ID, tt.req.Method, tt.req.Code)
|
||||
require.Equal(t, tt.want, res)
|
||||
})
|
||||
}
|
||||
|
@ -211,7 +208,7 @@ func TestMetrics(t *testing.T) {
|
|||
// some really simple tests just to make sure all methods are actually implemented and nothing panics
|
||||
func TestNoopMetricsRecorder_HTTPAttributes(t *testing.T) {
|
||||
no := NoopMetricsRecorder{}
|
||||
got := no.HTTPAttributes("", "", "", "", "")
|
||||
got := no.HTTPAttributes("", "", "", "")
|
||||
require.Empty(t, got)
|
||||
}
|
||||
|
||||
|
|
|
@ -2,7 +2,7 @@ package telemetry
|
|||
|
||||
import (
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.34.0"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.18.0"
|
||||
)
|
||||
|
||||
// utils contain common utilities to help with telemetry
|
||||
|
@ -14,7 +14,7 @@ const provider = "flagd"
|
|||
func SemConvFeatureFlagAttributes(ffKey string, ffVariant string) []attribute.KeyValue {
|
||||
return []attribute.KeyValue{
|
||||
semconv.FeatureFlagKey(ffKey),
|
||||
semconv.FeatureFlagResultVariant(ffVariant),
|
||||
semconv.FeatureFlagVariant(ffVariant),
|
||||
semconv.FeatureFlagProviderName(provider),
|
||||
}
|
||||
}
|
||||
|
|
|
@ -4,7 +4,7 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.34.0"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.18.0"
|
||||
)
|
||||
|
||||
func TestSemConvFeatureFlagAttributes(t *testing.T) {
|
||||
|
@ -35,7 +35,7 @@ func TestSemConvFeatureFlagAttributes(t *testing.T) {
|
|||
case semconv.FeatureFlagKeyKey:
|
||||
require.Equal(t, test.key, attribute.Value.AsString(),
|
||||
"expected flag key: %s, but received: %s", test.key, attribute.Value.AsString())
|
||||
case semconv.FeatureFlagResultVariantKey:
|
||||
case semconv.FeatureFlagVariantKey:
|
||||
require.Equal(t, test.variant, attribute.Value.AsString(),
|
||||
"expected flag variant: %s, but received %s", test.variant, attribute.Value.AsString())
|
||||
case semconv.FeatureFlagProviderNameKey:
|
||||
|
|
|
@ -1,42 +0,0 @@
|
|||
package utils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"mime"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var alphanumericRegex = regexp.MustCompile("[^a-zA-Z0-9]+")
|
||||
|
||||
// ConvertToJSON attempts to convert the content of a file to JSON based on the file extension.
|
||||
// The media type is used as a fallback in case the file extension is not recognized.
|
||||
func ConvertToJSON(data []byte, fileExtension string, mediaType string) (string, error) {
|
||||
var detectedType string
|
||||
if fileExtension != "" {
|
||||
// file extension only contains alphanumeric characters
|
||||
detectedType = alphanumericRegex.ReplaceAllString(fileExtension, "")
|
||||
} else {
|
||||
parsedMediaType, _, err := mime.ParseMediaType(mediaType)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("unable to determine file format: %w", err)
|
||||
}
|
||||
detectedType = parsedMediaType
|
||||
}
|
||||
|
||||
// Normalize the detected type
|
||||
detectedType = strings.ToLower(detectedType)
|
||||
|
||||
switch detectedType {
|
||||
case "yaml", "yml", "application/yaml", "application/x-yaml":
|
||||
str, err := YAMLToJSON(data)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error converting blob from yaml to json: %w", err)
|
||||
}
|
||||
return str, nil
|
||||
case "json", "application/json":
|
||||
return string(data), nil
|
||||
default:
|
||||
return "", fmt.Errorf("unsupported file format: '%s'", detectedType)
|
||||
}
|
||||
}
|
|
@ -1,107 +0,0 @@
|
|||
package utils
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestConvertToJSON(t *testing.T) {
|
||||
tests := map[string]struct {
|
||||
data []byte
|
||||
fileExtension string
|
||||
mediaType string
|
||||
want string
|
||||
wantErr bool
|
||||
errContains string
|
||||
}{
|
||||
"json file type": {
|
||||
data: []byte(`{"flags": {"foo": "bar"}}`),
|
||||
fileExtension: "json",
|
||||
mediaType: "application/json",
|
||||
want: `{"flags": {"foo": "bar"}}`,
|
||||
wantErr: false,
|
||||
},
|
||||
"json file type using in http path": {
|
||||
data: []byte(`{"flags": {"foo": "bar"}}`),
|
||||
fileExtension: ".json/",
|
||||
mediaType: "",
|
||||
want: `{"flags": {"foo": "bar"}}`,
|
||||
wantErr: false,
|
||||
},
|
||||
"json file type with encoding": {
|
||||
data: []byte(`{"flags": {"foo": "bar"}}`),
|
||||
fileExtension: "json",
|
||||
mediaType: "application/json; charset=utf-8",
|
||||
want: `{"flags": {"foo": "bar"}}`,
|
||||
wantErr: false,
|
||||
},
|
||||
"yaml file type": {
|
||||
data: []byte("flags:\n foo: bar"),
|
||||
fileExtension: "yaml",
|
||||
mediaType: "application/yaml",
|
||||
want: `{"flags":{"foo":"bar"}}`,
|
||||
wantErr: false,
|
||||
},
|
||||
"yaml file type with encoding": {
|
||||
data: []byte("flags:\n foo: bar"),
|
||||
fileExtension: "yaml",
|
||||
mediaType: "application/yaml; charset=utf-8",
|
||||
want: `{"flags":{"foo":"bar"}}`,
|
||||
wantErr: false,
|
||||
},
|
||||
"yml file type": {
|
||||
data: []byte("flags:\n foo: bar"),
|
||||
fileExtension: "yml",
|
||||
mediaType: "application/x-yaml",
|
||||
want: `{"flags":{"foo":"bar"}}`,
|
||||
wantErr: false,
|
||||
},
|
||||
"invalid yaml": {
|
||||
data: []byte("invalid: [yaml: content"),
|
||||
fileExtension: "yaml",
|
||||
mediaType: "application/yaml",
|
||||
wantErr: true,
|
||||
errContains: "error converting blob from yaml to json",
|
||||
},
|
||||
"unsupported file type": {
|
||||
data: []byte("some content"),
|
||||
fileExtension: "txt",
|
||||
mediaType: "text/plain",
|
||||
wantErr: true,
|
||||
errContains: "unsupported file format",
|
||||
},
|
||||
"empty file type with valid media type": {
|
||||
data: []byte(`{"flags": {"foo": "bar"}}`),
|
||||
fileExtension: "",
|
||||
mediaType: "application/json",
|
||||
want: `{"flags": {"foo": "bar"}}`,
|
||||
wantErr: false,
|
||||
},
|
||||
"invalid media type": {
|
||||
data: []byte("some content"),
|
||||
fileExtension: "",
|
||||
mediaType: "invalid/\\type",
|
||||
wantErr: true,
|
||||
errContains: "unable to determine file format",
|
||||
},
|
||||
}
|
||||
|
||||
for name, tt := range tests {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
got, err := ConvertToJSON(tt.data, tt.fileExtension, tt.mediaType)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("ConvertToJSON() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if tt.wantErr {
|
||||
if err == nil || !strings.Contains(err.Error(), tt.errContains) {
|
||||
t.Errorf("ConvertToJSON() expected error containing %q, got %v", tt.errContains, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
if got != tt.want {
|
||||
t.Errorf("ConvertToJSON() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
|
@ -1,27 +0,0 @@
|
|||
package utils
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
// converts YAML byte array to JSON string
|
||||
func YAMLToJSON(rawFile []byte) (string, error) {
|
||||
if len(rawFile) == 0 {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
var ms map[string]interface{}
|
||||
if err := yaml.Unmarshal(rawFile, &ms); err != nil {
|
||||
return "", fmt.Errorf("error unmarshaling yaml: %w", err)
|
||||
}
|
||||
|
||||
r, err := json.Marshal(ms)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error marshaling json: %w", err)
|
||||
}
|
||||
|
||||
return string(r), err
|
||||
}
|
|
@ -1,57 +0,0 @@
|
|||
package utils
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestYAMLToJSON(t *testing.T) {
|
||||
tests := map[string]struct {
|
||||
input []byte
|
||||
expected string
|
||||
expectedError bool
|
||||
}{
|
||||
"empty": {
|
||||
input: []byte(""),
|
||||
expected: "",
|
||||
expectedError: false,
|
||||
},
|
||||
"simple yaml": {
|
||||
input: []byte("key: value"),
|
||||
expected: `{"key":"value"}`,
|
||||
expectedError: false,
|
||||
},
|
||||
"nested yaml": {
|
||||
input: []byte("parent:\n child: value"),
|
||||
expected: `{"parent":{"child":"value"}}`,
|
||||
expectedError: false,
|
||||
},
|
||||
"invalid yaml": {
|
||||
input: []byte("invalid: yaml: : :"),
|
||||
expectedError: true,
|
||||
},
|
||||
"array yaml": {
|
||||
input: []byte("items:\n - item1\n - item2"),
|
||||
expected: `{"items":["item1","item2"]}`,
|
||||
expectedError: false,
|
||||
},
|
||||
"complex yaml": {
|
||||
input: []byte("bool: true\nnum: 123\nstr: hello\nobj:\n nested: value\narr:\n - 1\n - 2"),
|
||||
expected: `{"arr":[1,2],"bool":true,"num":123,"obj":{"nested":"value"},"str":"hello"}`,
|
||||
expectedError: false,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tt := range tests {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
output, err := YAMLToJSON(tt.input)
|
||||
|
||||
if tt.expectedError && err == nil {
|
||||
t.Error("expected error but got none")
|
||||
}
|
||||
if !tt.expectedError && err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if output != tt.expected {
|
||||
t.Errorf("expected output '%v', got '%v'", tt.expected, output)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
|
@ -1,98 +0,0 @@
|
|||
---
|
||||
# Valid statuses: draft | proposed | rejected | accepted | superseded
|
||||
status: draft
|
||||
author: Your Name
|
||||
created: YYYY-MM-DD
|
||||
updated: YYYY-MM-DD
|
||||
---
|
||||
|
||||
# Title
|
||||
|
||||
<!--
|
||||
This section should be one or two paragraphs that just explains what the goal of this decision is going to be, but without diving too deeply into the "why", "why now", "how", etc.
|
||||
Ensure anyone opening the document will form a clear understanding of the intent from reading this paragraph(s).
|
||||
-->
|
||||
|
||||
## Background
|
||||
|
||||
<!--
|
||||
The next section is the "Background" section. This section should be at least two paragraphs and can take up to a whole page in some cases.
|
||||
The guiding goal of the background section is: as a newcomer to this project (new employee, team transfer), can I read the background section and follow any links to get the full context of why this change is necessary?
|
||||
|
||||
If you can't show a random engineer the background section and have them acquire nearly full context on the necessity for the RFC, then the background section is not full enough. To help achieve this, link to prior RFCs, discussions, and more here as necessary to provide context so you don't have to simply repeat yourself.
|
||||
-->
|
||||
|
||||
## Requirements
|
||||
|
||||
<!--
|
||||
This section outlines the requirements that the proposal must meet.
|
||||
These requirements should be derived from the background section and should be clear, concise, and actionable.
|
||||
This is where you can specify the goals and constraints that the proposal must satisfy.
|
||||
This could include performance metrics, security considerations, user experience goals, and any other relevant criteria.
|
||||
-->
|
||||
* {Requirement 1}
|
||||
* {Requirement 2}
|
||||
* {Requirement 3}
|
||||
* … <!-- numbers of requirements can vary -->
|
||||
|
||||
## Considered Options
|
||||
|
||||
<!--
|
||||
This section lists all the options that were considered for addressing the need outlined in the background section.
|
||||
Each option should be clearly defined with a descriptive title.
|
||||
This provides a comprehensive overview of the solution space that was explored before making a decision.
|
||||
The options will be evaluated in the proposal section, where the chosen approach is justified.
|
||||
-->
|
||||
|
||||
* {title of option 1}
|
||||
* {title of option 2}
|
||||
* {title of option 3}
|
||||
* … <!-- numbers of options can vary -->
|
||||
|
||||
## Proposal
|
||||
|
||||
<!--
|
||||
The next required section is "Proposal" or "Goal".
|
||||
Given the background above, this section proposes a solution.
|
||||
This should be an overview of the "how" for the solution.
|
||||
Include content like diagrams, prototypes, and high-level requirements.
|
||||
-->
|
||||
|
||||
<!-- This is an optional element. Feel free to remove. -->
|
||||
### API changes
|
||||
|
||||
<!--
|
||||
This section should describe any API changes that are part of the proposal.
|
||||
This includes any new endpoints, changes to existing endpoints, or modifications to the data model.
|
||||
It should provide enough detail for developers to understand how the API will evolve and what impact it will have on existing clients.
|
||||
-->
|
||||
|
||||
<!-- This is an optional element. Feel free to remove. -->
|
||||
### Consequences
|
||||
|
||||
* Good, because {positive consequence, e.g., improvement of one or more desired qualities, …}
|
||||
* Bad, because {negative consequence, e.g., compromising one or more desired qualities, …}
|
||||
* … <!-- numbers of consequences can vary -->
|
||||
|
||||
### Timeline
|
||||
|
||||
<!--
|
||||
This section outlines a high level timeline for implementing the proposal.
|
||||
It should include key milestones, deadlines, and any dependencies that need to be addressed.
|
||||
This helps to set expectations for the size of the change and the expected timeline for completion.
|
||||
-->
|
||||
|
||||
<!-- This is an optional element. Feel free to remove. -->
|
||||
### Open questions
|
||||
|
||||
* {Question 1}
|
||||
* … <!-- numbers of question can vary -->
|
||||
|
||||
<!-- This is an optional element. Feel free to remove. -->
|
||||
## More Information
|
||||
|
||||
<!--
|
||||
This section provides additional context, evidence, or documentation to support the decision.
|
||||
Use this space to provide any supplementary information that would be helpful for future readers
|
||||
to fully understand the decision and its implications.
|
||||
-->
|
|
@ -1,81 +0,0 @@
|
|||
---
|
||||
status: accepted
|
||||
author: @toddbaert
|
||||
created: 2025-05-16
|
||||
updated: --
|
||||
---
|
||||
|
||||
# Adoption of Cucumber/Gherkin for `flagd` Testing Suite
|
||||
|
||||
This decision document outlines the rationale behind adopting the Cucumber/Gherkin testing framework for the `flagd` project’s testing suite. The goal is to establish a clear, maintainable, and language-agnostic approach for writing integration and behavior-driven tests.
|
||||
|
||||
By leveraging Gherkin’s natural language syntax and Cucumber’s mature ecosystem, we aim to improve test clarity and accessibility across teams, enabling both developers and non-developers to contribute to test case development and validation.
|
||||
|
||||
## Background
|
||||
|
||||
`flagd` is an open-source feature flagging engine that forms a core part of the OpenFeature ecosystem. As such, it includes many clients (providers) written in multiple languages and it needs robust, readable, and accessible testing frameworks that allow for scalable behavior-driven testing.
|
||||
|
||||
Previously, test cases for `flagd` providers were written in language-specific test frameworks, which created fragmentation and limited contributions from engineers who weren’t familiar with the language in question. Furthermore, the ability to validate consistent feature flag behavior across multiple SDKs and environments became increasingly important as adoption grew, and in-process evaluation was implemented.
|
||||
|
||||
To address this, the engineering team investigated frameworks that would enable:
|
||||
|
||||
- Behavior-driven development (BDD) to validate consistent flag evaluation behavior, configuration, and provider life-cycle (connection, etc).
|
||||
- High cross-language support to integrate with multiple SDKs and tools.
|
||||
- Ease of use for writing, understanding, enhancing and maintaining tests.
|
||||
|
||||
After evaluating our options and experimenting with prototypes, we adopted Cucumber with Gherkin syntax for our testing strategy.
|
||||
|
||||
## Requirements
|
||||
|
||||
- Must be supported across a wide variety of programming languages.
|
||||
- Must offer mature tooling and documentation.
|
||||
- Must enable the writing of easily understandable, high-level test cases.
|
||||
- Must be open source.
|
||||
- Should support automated integration in CI pipelines.
|
||||
- Should support parameterized and reusable test definitions.
|
||||
|
||||
## Considered Options
|
||||
|
||||
- Adoption of Cucumber/Gherkin e2e testing framework
|
||||
- No cross-implementation e2e testing framework (rely on unit tests)
|
||||
- Custom e2e testing framework, perhaps based on csv or other tabular input/output assertions
|
||||
|
||||
## Proposal
|
||||
|
||||
We adopted the Cucumber testing framework, using Gherkin syntax to define feature specifications and test behaviors. Gherkin offers a structured and readable DSL (domain-specific language) that enables concise expression of feature behaviors in plain English, making test scenarios accessible to both technical and non-technical contributors.
|
||||
|
||||
We use Cucumber’s tooling in combination with language bindings (e.g., Go, JavaScript, Python) to execute these scenarios across different environments and SDKs. Step definitions are implemented using the idiomatic tools of each language, while test scenarios remain shared and version-controlled.
|
||||
|
||||
### API changes
|
||||
|
||||
N/A – this decision does not introduce API-level changes but applies to test infrastructure and development workflow.
|
||||
|
||||
### Consequences
|
||||
|
||||
#### Pros
|
||||
|
||||
- Test scenarios are readable and accessible to a broad range of contributors.
|
||||
- Cucumber and Gherkin are supported in most major programming languages.
|
||||
- Tests are partially decoupled from the underlying implementation language.
|
||||
- Parameterized and reuseable test definitions mean new validations and assertions can often be added in providers without writing any code.
|
||||
|
||||
#### Cons
|
||||
|
||||
- Adding a new framework introduces some complexity and a learning curve.
|
||||
- In some cases/runtimes, debugging failed tests in Gherkin can be more difficult than traditional unit tests.
|
||||
|
||||
### Timeline
|
||||
|
||||
N/A - this is a retrospective document, timeline was not recorded.
|
||||
|
||||
### Open questions
|
||||
|
||||
- Should we enforce Gherkin for all providers?
|
||||
|
||||
## More Information
|
||||
|
||||
- [flagd Testbed Repository](https://github.com/open-feature/flagd-testbed)
|
||||
- [Cucumber Documentation](https://cucumber.io/docs/)
|
||||
- [Gherkin Syntax Guide](https://cucumber.io/docs/gherkin/)
|
||||
- [flagd GitHub Repository](https://github.com/open-feature/flagd)
|
||||
- [OpenFeature Project Overview](https://openfeature.dev/)
|
|
@ -1,77 +0,0 @@
|
|||
---
|
||||
status: accepted
|
||||
author: @tangenti
|
||||
created: 2025-06-16
|
||||
updated: 2025-06-16
|
||||
---
|
||||
|
||||
# Decouple flag sync sources and flag sets
|
||||
|
||||
The goal is to support dynamic flag sets for flagd providers and decouple sources and flag sets.
|
||||
|
||||
## Background
|
||||
|
||||
Flagd daemon syncs flag configurations from multiple sources. A single source provides a single config, which has an optional flag set ID that may or may not change in the following syncs of the same source.
|
||||
|
||||
The in-process provider uses `selector` to specify the desired source. In order to get a desired flag set, a provider has to stick to a source that provides that flag set. In this case, the flagd daemon cannot remove a source without breaking the dependant flagd providers.
|
||||
|
||||
Assumptions of the current model
|
||||
|
||||
- `flagSetId`s must be unique across different sources or the configuration is considered invalid.
|
||||
- In-process providers request at most one flag set.
|
||||
|
||||
## Requirements
|
||||
|
||||
- Flagd daemon can remove a source without breaking in-process providers that depend on the flag set the source provides.
|
||||
- In-process providers can select based on flag sets.
|
||||
- No breaking changes for the current usage of `selector`
|
||||
|
||||
## Proposal
|
||||
|
||||
### API change
|
||||
|
||||
#### Flag Configuration Schema
|
||||
|
||||
Add an optional field `flagsetID` under `flag` or `flag.metadata`. The flag set ID cannot be specified if a flag set ID is specified for the config.
|
||||
|
||||
### Flagd Sync Selector
|
||||
|
||||
Selector will be extended for generic flags selection, starting with checking the equivalence of `source` and `flagsetID` of flags.
|
||||
|
||||
Example
|
||||
|
||||
```yaml
|
||||
# Flags from the source `override`
|
||||
selector: override
|
||||
|
||||
# Flags from the source `override`
|
||||
selector: source=override
|
||||
|
||||
# Flags from the flag set `project-42`
|
||||
selector: flagsetID=project-42
|
||||
```
|
||||
|
||||
The semantic can later be extended with a more complex design, such as AIP-160 filter or Kubernetes selections. This is out of the scope of this ADR.
|
||||
|
||||
### Flagd Daemon Storage
|
||||
|
||||
1. Flagd will have separate stores for `flags` and `sources`
|
||||
|
||||
2. `selector` will be removed from the store
|
||||
|
||||
3. `flagSetID` will be added as part of `model.Flag` or under `model.Flag.Metadata` for better consistency with the API.
|
||||
|
||||
### Flags Sync
|
||||
|
||||
Sync server would count the extended syntax of `selector` and filter the list of flags on-the-fly answering the requests from the providers.
|
||||
|
||||
The existing conflict resolving based on sources remains the same. Resyncs on removing flags remains unchanged as well.
|
||||
|
||||
## Consequences
|
||||
|
||||
### The good
|
||||
|
||||
- One source can have multiple flag sets.
|
||||
- `selector` works on a more grandular level.
|
||||
- No breaking change
|
||||
- Sync servers and clients now hold the same understanding of the `selector` semantic.
|
|
@ -1,144 +0,0 @@
|
|||
---
|
||||
status: accepted
|
||||
author: @tangenti
|
||||
created: 2025-06-27
|
||||
updated: 2025-06-27
|
||||
---
|
||||
|
||||
# Support for Duplicate Flag Keys
|
||||
|
||||
This ADR proposes allowing a single sync source to provide multiple flags that share the same key. This enables greater flexibility for modularizing flag configurations.
|
||||
|
||||
## Background
|
||||
|
||||
Currently, the `flagd` [flag configuration](https://flagd.dev/schema/v0/flags.json) stores flags in a JSON object (a map), where each key must be unique. While the JSON specification technically allows duplicate keys, it's not recommended and not well-supported in the implementations.
|
||||
|
||||
This limitation prevents use cases for flag modularization and multi-tenancy, such as:
|
||||
|
||||
- **Component-based Flags:** Two different services, each with its own in-process provider, cannot independently define a flag with the same key when communicating with the same `flagd` daemon.
|
||||
- **Multi-Tenant Targeting:** A single flagd daemon cannot use the same flag key with different targeting rules for different tenants
|
||||
|
||||
## Requirements
|
||||
|
||||
- Allow a single sync source to define multiple flags that have the same key.
|
||||
- Flags from a sync source with the same keys can have different types and targeting rules.
|
||||
- No breaking changes for the current flagd flag configuration schema or flagd sync services.
|
||||
|
||||
## Proposal
|
||||
|
||||
We will update the `flagd` flag configuration schema to support receiving flags as an **array of flag objects**. The existing schema will remain fully supported.
|
||||
|
||||
### API Change
|
||||
|
||||
#### Flag Configuration Schema
|
||||
|
||||
We'll add a new schema as a [subschema](https://json-schema.org/learn/glossary#subschema) to the existing flagd flag configuration schema. It will be a composite of the original schema except `flags` (`#/definitions/base`), with a new schema for `flags` that allows flags array in addition to the currently supported flags object. The existing main schema will be the composite of `#/definitions/base` and the subschema for the flags object.
|
||||
|
||||
```json
|
||||
...
|
||||
"flagsArray": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/flag"
|
||||
},
|
||||
{
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"key": {
|
||||
"description": "Key of the flag",
|
||||
"type": "string",
|
||||
"minLength": 1
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"key"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"flagsArraySchema": {
|
||||
"$id": "https://flagd.dev/schema/v0/flags.json#flagsarray",
|
||||
"type": "object",
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/base"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"flags": {
|
||||
"oneOf": [
|
||||
{
|
||||
"$ref": "#/definitions/flagsArray"
|
||||
},
|
||||
{
|
||||
"$ref": "#/definitions/flagsMap"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"flags"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
...
|
||||
```
|
||||
|
||||
If the config level flag set ID is not specified, `metadata.flagSetId` of each flag will be interpreted as its flag set ID.
|
||||
|
||||
A flag will be uniquely identified by the composite key `(flagKey, flagSetId)`. The following three flags will be considered as three different flags.
|
||||
|
||||
1. `{"flagKey": "enable-feature", "flagSetId": ""}`
|
||||
2. `{"flagKey": "enable-feature", "flagSetId": "default"}`
|
||||
3. `{"flagKey": "enable-feature", "flagSetId": "beta"}`
|
||||
|
||||
### Flagd daemon
|
||||
|
||||
Flagd daemon will perform the JSON schema checks with the reference to `https://flagd.dev/schema/v0/flags.json#flagsarray`, allowing both flags as an object and as an array.
|
||||
|
||||
If the flag array contains two or more flags with the same composite key, the config will be considered as invalid.
|
||||
|
||||
If the request from in-process flagd providers result in a config that has duplicate flag keys, the flagd daemon will only keep one of them in the response.
|
||||
|
||||
### Flagd Daemon Storage
|
||||
|
||||
1. Flagd will have separate stores for `flags` and `sources`.
|
||||
|
||||
1. The `flags` store will use the composite key for flags.
|
||||
|
||||
1. `selector` will be removed from the store
|
||||
|
||||
1. `flagSetId` will be moved from `source` metadata to `flag` metadata.
|
||||
|
||||
### Flags Lifecycle
|
||||
|
||||
Currently, the flags configurations from the latest update of a source will trigger a `sync.ALL` sync. If a flag was presented in the previous configuration but not in the current configuration, it will be removed. In another word, the latest source that provides the config for a flag will take the ownership of a flag, and any subsequent configs are considered as the full states of the flags that are owned by the source.
|
||||
|
||||
We'll keep the same behaviors with this proposal:
|
||||
|
||||
1. If two sources provide the flags with the same composite key, the latest one will be stored.
|
||||
|
||||
2. If a flag from a source no longer presents in the latest configuration of the same source, it will be removed.
|
||||
|
||||
This behavior is less ideal as the ownership management depends on the ordre of the sync. This should be addressed in a separate ADR.
|
||||
|
||||
### Consequences
|
||||
|
||||
#### The good
|
||||
|
||||
- One source can provide flags with the same keys.
|
||||
- Flag set ID no longer bound to a source, so one source can have multiple flag sets.
|
||||
- No breaking change of the API definition and the API behaviors.
|
||||
- No significant change on the flagd stores and how selections work.
|
||||
|
||||
#### The bad
|
||||
|
||||
- The proposal still leverages the concept of flag set in the flagd storage.
|
||||
|
||||
- The schema does not guarantee that flags of the same flag set from the same source will not have the same keys. This is guaranteed in the proposal of #1634.
|
||||
|
||||
- Compared to #1634, this proposal does not allow to define flag set wide metadata.
|
|
@ -1,155 +0,0 @@
|
|||
---
|
||||
status: accepted
|
||||
author: Todd Baert
|
||||
created: 2025-06-05
|
||||
updated: 2025-06-05
|
||||
---
|
||||
|
||||
# Flag and Targeting Configuration
|
||||
|
||||
## Background
|
||||
|
||||
Feature flag systems require a flexible, safe, and portable way to express targeting rules that can evaluate contextual data to determine which variant of a feature to serve.
|
||||
|
||||
flagd's targeting system was designed with several key requirements:
|
||||
|
||||
## Requirements
|
||||
|
||||
- **Language agnostic**: Rules must be portable across different programming languages, ideally relying on existing expression language(s)
|
||||
- **Safe evaluation**: No arbitrary code execution or system access
|
||||
- **Deterministic**: Same inputs must always produce same outputs
|
||||
- **Extensible**: Support for the addition of domain-specific operations relevant to feature flags
|
||||
- **Developer and machine friendly**: Human-readable, easily validated, and easily serialized
|
||||
|
||||
## Proposal
|
||||
|
||||
### JSON Logic as the Foundation
|
||||
|
||||
flagd chose **JSON Logic** as its core evaluation engine, implementing a modified version with custom extensions.
|
||||
This provides a secure, portable foundation where rules are expressed as JSON objects with operators as keys and parameters as values.
|
||||
|
||||
#### Benefits realized
|
||||
|
||||
- Rules can be stored in databases, transmitted over networks, shared between frontend/backend, and embedded in Kubernetes custom resources
|
||||
- No eval() or code injection risks - computations are deterministic and sand-boxed
|
||||
- Implementations exist in most languages
|
||||
|
||||
#### Overview
|
||||
|
||||
The system provides two tiers of operators:
|
||||
|
||||
##### Primitive JSON Logic Operators (inherited from the JSONLogic)
|
||||
|
||||
- Logical: `and`, `or`, `!`, `!!`
|
||||
- Comparison: `==`, `!=`, `>`, `<`, etc
|
||||
- Arithmetic: `+`, `-`, `*`, `/`, `%`
|
||||
- Array operations: `in`, `map`, `filter`, etc
|
||||
- String operations: `cat`, `substr`, etc
|
||||
- Control flow: `if`
|
||||
- Assignment and extraction: `var`
|
||||
|
||||
##### Custom flagd Extensions
|
||||
|
||||
- `fractional`: Deterministic percentage-based distribution using murmur3 hashing
|
||||
- `starts_with`/`ends_with`: String prefix/suffix matching for common patterns
|
||||
- `sem_ver`: Semantic version comparisons with standard (npm-style) operators
|
||||
- `$ref`: Reference to shared evaluators for DRY principle
|
||||
|
||||
##### Evaluation Context and Automatic Enrichment
|
||||
|
||||
flagd automatically injects critical context values:
|
||||
|
||||
##### System-provided context
|
||||
|
||||
- `$flagd.flagKey`: The flag being evaluated (available v0.6.4+)
|
||||
- `$flagd.timestamp`: Unix timestamp of evaluation (available v0.6.7+)
|
||||
|
||||
This enables sophisticated targeting rules that can reference the flag itself or time-based conditions without requiring client-side context.
|
||||
|
||||
##### Reason Code System for Transparency
|
||||
|
||||
flagd returns specific reason codes with every evaluation to indicate how the decision was made:
|
||||
|
||||
1. **STATIC**: Flag has no targeting rules, and can be safely cached
|
||||
2. **TARGETING_MATCH**: Targeting rules matched and returned a variant
|
||||
3. **DEFAULT**: Targeting rules evaluated to null, fell back to default
|
||||
4. **CACHED**: Value retrieved from provider cache (RPC mode only)
|
||||
5. **ERROR**: Evaluation failed due to invalid configuration
|
||||
|
||||
This transparency enables:
|
||||
|
||||
- Appropriate caching strategies (only STATIC flags are cached)
|
||||
- Improved debugging, telemetry, and monitoring of flag behavior
|
||||
|
||||
##### Shared Evaluators for Reusability
|
||||
|
||||
The `$evaluators` top-level property enables shared targeting logic:
|
||||
|
||||
```json
|
||||
{
|
||||
"$evaluators": {
|
||||
"isEmployee": {
|
||||
"ends_with": [{"var": "email"}, "@company.com"]
|
||||
}
|
||||
},
|
||||
"flags": {
|
||||
"feature-x": {
|
||||
"state": "ENABLED",
|
||||
"defaultVariant": "enabled",
|
||||
"variants": {
|
||||
"enabled": true,
|
||||
"disabled": false
|
||||
},
|
||||
"targeting": {
|
||||
"if": [{"$ref": "isEmployee"}, "enabled", "disabled"]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
##### Intelligent Caching Strategy
|
||||
|
||||
Only flags with reason **STATIC** are cached, as they have deterministic outputs. This ensures:
|
||||
|
||||
- Maximum cache efficiency for simple toggles
|
||||
- Fresh evaluation for complex targeting rules
|
||||
- Cache invalidation on configuration changes
|
||||
|
||||
##### Schema-Driven Configuration
|
||||
|
||||
Two schemas validate flag configurations:
|
||||
|
||||
- `https://flagd.dev/schema/v0/flags.json`: Overall flag structure
|
||||
- `https://flagd.dev/schema/v0/targeting.json`: Targeting rule validation
|
||||
|
||||
These enable:
|
||||
|
||||
- IDE support with autocomplete
|
||||
- Run-time and build-time validation
|
||||
- Separate validation of rules and overall configuration if desired
|
||||
|
||||
## Considered Options
|
||||
|
||||
- **Custom DSL**: Would require parsers in every language
|
||||
- **JavaScript/Lua evaluation**: Security risks and language lock-in
|
||||
- **CEL**: limited number of implementations at time of decision, can't be directly parsed/validated when embedded in Kubernetes resources
|
||||
|
||||
## Consequences
|
||||
|
||||
### Positive
|
||||
|
||||
- Good, because implementations exist across languages
|
||||
- Good, because, no code injection or system access possible
|
||||
- Good, because combined with JSON schemas, we have rich IDE support
|
||||
- Good, because JSON is easily serialized and also can be represented/embedded in YAML
|
||||
|
||||
### Negative
|
||||
|
||||
- Bad, JSONLogic syntax can be cumbersome when rules are complex
|
||||
- Bad, hard to debug
|
||||
|
||||
## Conclusion
|
||||
|
||||
flagd's targeting configuration system represents a thoughtful balance between safety, portability, and capability.
|
||||
By building on JSON Logic and extending it with feature-flag-specific operators, flagd achieves remarkable flexibility while maintaining security and performance.
|
|
@ -1,121 +0,0 @@
|
|||
---
|
||||
status: draft
|
||||
author: @toddbaert
|
||||
created: 2025-06-06
|
||||
updated: 2025-06-06
|
||||
---
|
||||
|
||||
# Fractional Operator
|
||||
|
||||
The fractional operator enables deterministic, fractional feature flag distribution.
|
||||
|
||||
## Background
|
||||
|
||||
Nearly all feature flag systems require pseudorandom assignment support to facilitate key use cases, including experimentation and fractional progressive rollouts.
|
||||
Since flagd seeks to implement a full feature flag evaluation engine, such a feature is required.
|
||||
|
||||
## Requirements
|
||||
|
||||
- **Deterministic**: must be consistent given the same input (so users aren't re-assigned with each page view, for example)
|
||||
- **Performant**: must be quick; we want "predictable randomness", but with a relatively low performance cost
|
||||
- **Ease of use**: must be easy to use and understand for basic use-cases
|
||||
- **Customization**: must support customization, such as specifying a particular context attribute to "bucket" on
|
||||
- **Stability**: adding new variants should result in new assignments for as small a section of the audience as possible
|
||||
- **Strong avalanche effect**: slight input changes should result in relatively high chance of differential bucket assignment
|
||||
|
||||
## Considered Options
|
||||
|
||||
- We considered various "more common" hash algos, such as `sha1` and `md5`, but they were frequently slower than `Murmur3`, and didn't offer better performance for our purposes
|
||||
- Initially we required weights to sum to 100, but we've since revoked that requirement
|
||||
|
||||
## Proposal
|
||||
|
||||
### MurmurHash3 + numeric weights + optional targeting-key-based bucketing value
|
||||
|
||||
#### The fractional operator mechanism
|
||||
|
||||
The fractional operator facilitates **deterministic A/B testing and gradual rollouts** through a custom JSONLogic extension introduced in flagd version 0.6.4+.
|
||||
This operator splits feature flag variants into "buckets", based the `targetingKey` (or another optionally specified key), ensuring users consistently receive the same variant across sessions through sticky evaluation.
|
||||
|
||||
The core algorithm involves four steps: extracting a bucketing property from the evaluation context, hashing this value using MurmurHash3, mapping the hash to a [0, 100] range, and selecting variants based on cumulative weight thresholds.
|
||||
This approach guarantees that identical inputs always produce identical outputs (excepting the case of rules involving the `$flag.timestamp`), which is crucial for maintaining a consistent user experience.
|
||||
|
||||
#### MurmurHash3: The chosen algorithm
|
||||
|
||||
flagd specifically employs **MurmurHash3 (32-bit variant)** for its fractional operator, prioritizing performance and distribution quality over cryptographic security.
|
||||
This non-cryptographic hash function provides excellent performance and good avalanche properties (small input changes produce dramatically different outputs) while maintaining deterministic behavior essential for sticky evaluations.
|
||||
Its wide language implementation ensures identical results across different flagd providers, no matter the language in question.
|
||||
|
||||
#### Bucketing value
|
||||
|
||||
The bucking value is an optional first value to the operator (it may be a JSONLogic expression, other than an array).
|
||||
This allows enables targeting based on arbitrary attributes (individual users, companies/tenants, etc).
|
||||
If not specified, the bucketing value is a JSONLogic expression concatenating the `$flagd.flagKey` and the extracted [targeting key](https://openfeature.dev/specification/glossary/#targeting-key) (`targetingKey`) from the context (the inclusion of the flag key prevents users from landing in the same "bucket index" for all flags with the same number of buckets).
|
||||
If the bucking value does not resolve to a string, or the `targeting key` is undefined, the evaluation is considered erroneous.
|
||||
|
||||
```json
|
||||
// Default bucketing value
|
||||
{
|
||||
"cat": [
|
||||
{"var": "$flagd.flagKey"},
|
||||
{"var": "targetingKey"}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
#### Bucketing strategy implementation
|
||||
|
||||
After retrieving the bucketing value, and hashing it to a [0, 99] range, the algorithm iterates through variants, accumulating their relative weights until finding the bucket containing the hash value.
|
||||
|
||||
```go
|
||||
// Simplified implementation structure
|
||||
hashValue := murmur3Hash(bucketingValue) % 100
|
||||
currentWeight := 0
|
||||
for _, distribution := range variants {
|
||||
currentWeight += (distribution.weight * 100) / sumOfWeights
|
||||
if hashValue < currentWeight {
|
||||
return distribution.variant
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
This approach supports flexible weight ratios; weights of [25, 50, 25] translate to 25%, 50%, and 25% distribution respectively as do [1, 2, 1].
|
||||
It's worth noting that the maximum bucket resolution is 1/100, meaning that the maximum ratio between variant distributions is 1:99 (ie: a weight distribution of [1, 100000] behaves the same as [1, 100]).
|
||||
|
||||
#### Format flexibility: Shorthand vs longhand
|
||||
|
||||
flagd provides two syntactic options for defining fractional distributions, balancing simplicity with precision. **Shorthand format** enables equal distribution by specifying variants as single-element arrays (in this case, an equal weight of 1 is automatically assumed):
|
||||
|
||||
```json
|
||||
{
|
||||
"fractional": [
|
||||
["red"],
|
||||
["blue"],
|
||||
["green"]
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
**Longhand format** allows precise weight control through two-element arrays:
|
||||
|
||||
Note that in this example, we've also specified a custom bucketing value.
|
||||
|
||||
```json
|
||||
{
|
||||
"fractional": [
|
||||
{ "var": "email" },
|
||||
["red", 50],
|
||||
["blue", 20],
|
||||
["green", 30]
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### Consequences
|
||||
|
||||
- Good, because Murmur3 is fast, has good avalanche properties, and we don't need "cryptographic" randomness
|
||||
- Good, because we have flexibility but also simple shorthand
|
||||
- Good, because our bucketing algorithm is relatively stable when new variants are added
|
||||
- Bad, because we only support string bucketing values
|
||||
- Bad, because we don't have bucket resolution finer than 1:99
|
||||
- Bad, because we don't support JSONLogic expressions within bucket definitions
|
|
@ -1,157 +0,0 @@
|
|||
---
|
||||
status: rejected
|
||||
author: @alexandraoberaigner
|
||||
created: 2025-05-28
|
||||
updated: -
|
||||
---
|
||||
|
||||
# Add support for dynamic usage of Flag Sets to `flagd`
|
||||
|
||||
⚠️ REJECTED IN FAVOR OF <https://github.com/open-feature/flagd/blob/main/docs/architecture-decisions/duplicate-flag-keys.md> ⚠️
|
||||
|
||||
The goal of this decision document is to establish flag sets as a first class concept in `flagd`, and support the dynamic addition/update/removal of flag sets at runtime.
|
||||
|
||||
## Background
|
||||
|
||||
`flagd` is a language-agnostic feature flagging engine that forms a core part of the OpenFeature ecosystem.
|
||||
Flag configurations can be stored in different locations so called `sources`. These are specified at startup, e.g.:
|
||||
|
||||
````shell
|
||||
flagd start \
|
||||
--port 8013 \
|
||||
--uri file:etc/flagd/my-flags-1.json \
|
||||
--uri https://my-flags-2.com/flags
|
||||
````
|
||||
|
||||
The primary object here is to remove the coupling between sources and "logical" groups of flags, so that provider's aren't required to know their set of flags are sources from a file/http resource, etc, but could instead just supply a logical identifier for their flag set.
|
||||
|
||||
## Requirements
|
||||
|
||||
* Should enable the dynamic usage of flag sets as logical identifiers.
|
||||
* Should support configurations without flag sets.
|
||||
* Should adhere to existing OpenFeature and flagd terminology and concepts
|
||||
|
||||
## Considered Options
|
||||
|
||||
1. Addition of flag set support in the [flags schema](https://flagd.dev/reference/schema/#targeting) and associated enhancements to `flagd` storage layer
|
||||
2. Support for dynamically adding/removing flag sources through some kind of runtime configuration API
|
||||
3. Support for dynamically adding/removing flag sources through some kind of "discovery" protocol or endpoint (ie: point flagd at a resource that would enumerate a mutable collection of secondary resources which represent flag sets)
|
||||
|
||||
## Proposal
|
||||
|
||||
To support the dynamic usage of flag sets we propose to adapt the flag schema & storage layer in `flagd`.
|
||||
The changes will decouple flag sets from flag sources by supporting multiple flag sets within single flag sources.
|
||||
Dynamic updates to flag sources is already a feature of `flagd`.
|
||||
|
||||
### New Schema Structure
|
||||
|
||||
The proposed changes to the current flagd schema would allow the following json structure for **sources**:
|
||||
|
||||
````json
|
||||
{
|
||||
"$schema": "https://flagd.dev/schema/v1/flagsets.json",
|
||||
"flagSets": {
|
||||
"my-project-1": {
|
||||
"metadata": {
|
||||
...
|
||||
},
|
||||
"flags": {
|
||||
"my-flag-1": {
|
||||
"metadata": {
|
||||
...
|
||||
},
|
||||
...
|
||||
},
|
||||
...
|
||||
},
|
||||
"$evaluators": {
|
||||
...
|
||||
}
|
||||
},
|
||||
"my-project-2": {
|
||||
...
|
||||
}
|
||||
}
|
||||
}
|
||||
````
|
||||
|
||||
We propose to introduce a 3rd json schema `flagSets.json`, which references to `flags.json`:
|
||||
|
||||
1. flagSets.json (new)
|
||||
2. flags.json
|
||||
3. targeting.json
|
||||
|
||||
We don't want to support merging of flag sets, due to implementation efforts & potential confusing behaviour of the
|
||||
merge strategy.
|
||||
Therefore, we propose for the initial implementation, `flagSetId`s must be unique across different sources or the configuration is considered invalid.
|
||||
In the future, it might be useful to support and implement multiple "strategies" for merging flagSets from different sources, but that's beyond the scope of this proposal.
|
||||
|
||||
### New Data Structure
|
||||
|
||||
The storage layer in `flagd` requires refactoring to better support multiple flag sets within one source.
|
||||
|
||||
````go
|
||||
package store
|
||||
|
||||
type State struct {
|
||||
FlagSets map[string]FlagSet `json:"flagSets"` // key = flagSetId
|
||||
}
|
||||
|
||||
type FlagSet struct {
|
||||
Flags map[string]model.Flag `json:"flags"` // key = flagKey
|
||||
Metadata Metadata `json:"metadata,omitempty"`
|
||||
}
|
||||
|
||||
type Flag struct {
|
||||
State string `json:"state"`
|
||||
DefaultVariant string `json:"defaultVariant"`
|
||||
Variants map[string]any `json:"variants"`
|
||||
Targeting json.RawMessage `json:"targeting,omitempty"`
|
||||
Metadata Metadata `json:"metadata,omitempty"`
|
||||
}
|
||||
|
||||
type Metadata = map[string]interface{}
|
||||
````
|
||||
|
||||
### OpenFeature Provider Implications
|
||||
|
||||
Currently, creating a new flagd provider can look like follows:
|
||||
|
||||
````java
|
||||
final FlagdProvider flagdProvider =
|
||||
new FlagdProvider(FlagdOptions.builder()
|
||||
.resolverType(Config.Evaluator.IN_PROCESS)
|
||||
.host("localhost")
|
||||
.port(8015)
|
||||
.selector("myFlags.json")
|
||||
.build());
|
||||
````
|
||||
|
||||
* With the proposed solution the `flagSetId` should be passed to the builder as selector argument instead of the source.
|
||||
* `null` is now a valid selector value, referencing flags which do not belong to a flag set. The default/fallback `flagSetId` should be `null`.
|
||||
|
||||
### Consequences
|
||||
|
||||
* Good, because it decouples flag sets from the sources
|
||||
* Good, because we will refactor the flagd storage layer (which is currently storing duplicate data & difficult to
|
||||
understand)
|
||||
* Good, because we can support backwards compatibility with the v0 schema
|
||||
* Good, because the "null" flag set is logically treated as any other flag set, reducing overall implementation complexity.
|
||||
* Bad, because there's additional complexity to support this new config schema as well as the current.
|
||||
* Bad, because this is a breaking change in the behavior of the `selector` member.
|
||||
|
||||
### Other Options
|
||||
|
||||
We evaluated the [mentioned options](#considered-options) as follows: _options 2 + 3: support for dynamically adding/removing flag sources_ and decided against this option because it requires much more implementation effort than _option 1_. Required changes include:
|
||||
|
||||
* flagd/core/sync: dynamic mode, which allows specifying the sync type that should be added/removed at runtime
|
||||
* flagd/flagd: startup dynamic sync configuration
|
||||
* make sure to still support static syncs
|
||||
|
||||
## More Information
|
||||
|
||||
* Current flagd schema: [flags.json](https://flagd.dev/schema/v0/flags.json)
|
||||
* flagd storage layer
|
||||
implementation: [store/flags.go](https://github.com/open-feature/flagd/blob/main/core/pkg/store/flags.go)
|
||||
* [flagd GitHub Repository](https://github.com/open-feature/flagd)
|
||||
* [OpenFeature Project Overview](https://openfeature.dev/)
|
|
@ -1,77 +0,0 @@
|
|||
---
|
||||
status: accepted
|
||||
author: Dave Josephsen
|
||||
created: 2025-05-21
|
||||
updated: 2025-05-21
|
||||
---
|
||||
|
||||
# ADR: Multiple Sync Sources
|
||||
|
||||
It is the Intent of this document to articulate our rationale for supporting multiple flag syncronization sources (grpc, http, blob, local file, etc..) as a core design property of flagd. This document also includes a short discussion of how flagd is engineered to enable the community to extend it to support new sources in the future, to "future proof" the runtime against sources that don't yet exist, or those we may have omitted is a requisite byproduct of this architectural decision.
|
||||
|
||||
The goal of first-class multi-sync support generally is to broaden flagd's potential to suit the needs of many different types of users or architecture. By decoupling flag persistence from the runtime, flagd can focus on evaluation and sync, while enabling its user-base to choose a persistence layer that best suits their individual requirements.
|
||||
|
||||
## Background
|
||||
|
||||
The flagd daemon is a feature flag evaluation engine that forms a core part of the OpenFeature ecosystem as a production-grade reference implementation. Unlike OpenFeature SDK components, which are, by design, agnostic to the specifics of flag structure, evaluation, and persistence, flagd must take an opinionated stance about how feature-flags look, feel, and act.
|
||||
What schema best describes a flag? How should they be evaluated? And in what sort of persistence layer should they be stored?
|
||||
|
||||
This latter-most question -- _how should they be stored_ -- is the most opaque design detail of every commercial flagging product from the perspective its end-users.
|
||||
As a front-line engineer using a commercial flagging product, I may, for example, be exposed to flag schema by the product's SDK, and become familiar with its evaluation intricacies over time as my needs grow to require advanced features, or as I encounter surprising behavior. Rarely, however, is an end-user exposed to the details of a commercial product's storage backend.
|
||||
The SaaS vendor is expected to engineer a safe, fast, multi-tenant storage back-end, optimized for its flag schema and operational parameters, and insulate the customer from these details via its SDK.
|
||||
This presents Flagd, an open-source evaluation engine, with an interesting conundrum: what sort of flag storage best suits the needs of its potential user-base (which is everyone)?
|
||||
|
||||
## Requirements
|
||||
|
||||
* Support the storage technology that's most likely to meet the needs of current Flagd user-base (Don't be weird. Don't be surprising.)
|
||||
* Make it "easy" to extend the flagd runtime to support "new" storage systems
|
||||
* Horizontally scalable persistence layer
|
||||
* Minimize end-user exposure to persistence "quirks" (replication schemes, leader election, back-end scaling, consistency minutia, etc.. )
|
||||
* Reliable, Fast, Transparent
|
||||
* Full CRUD, read-optimized.
|
||||
|
||||
## Considered Options
|
||||
|
||||
* Be super-opinionated and prescribe a built-in raftesque key-value setup, analogous to the designs of k8s and kafka, which prescribe etcd and zookeeper respectively.
|
||||
* Roll a single "standard interface" for flag sync (published grpc spec or similar) (??)
|
||||
* Decouple storage from flagd entirely, by exposing a Golang interface type that "providers" can implement to provide support for any data store.
|
||||
|
||||
## Proposal
|
||||
<!--
|
||||
Unsure whether we want a diagram in this section or not. Happy to add one if we want one.
|
||||
-->
|
||||
The solution to the conundrum posited in the background section of this document is to decouple flag storage entirely from the rest of the runtime, including instead support for myriad commonly used data syncronization interfaces.
|
||||
This allows Flagd to be agnostic to flag storage, while enabling users to use whichever storage back-end best suits their environment.
|
||||
|
||||
To extend Flagd to support a new storage back-end, _sync providers_ implement the _ISync_ interface, detailed below:
|
||||
|
||||
```go
|
||||
type ISync interface {
|
||||
// Init is used by the sync provider to initialize its data structures and external dependencies.
|
||||
Init(ctx context.Context) error
|
||||
|
||||
// Sync is the contract between Runtime and sync implementation.
|
||||
// Note that, it is expected to return the first data sync as soon as possible to fill the store.
|
||||
Sync(ctx context.Context, dataSync chan<- DataSync) error
|
||||
|
||||
// ReSync is used to fetch the full flag configuration from the sync
|
||||
// This method should trigger an ALL sync operation then exit
|
||||
ReSync(ctx context.Context, dataSync chan<- DataSync) error
|
||||
|
||||
// IsReady shall return true if the provider is ready to communicate with the Runtime
|
||||
IsReady() bool
|
||||
}
|
||||
```
|
||||
|
||||
syncronization events "fan-in" from all configured sync providers to flagd's in-memory state-store via a channel carrying [`sync.DataSync`](https://github.com/open-feature/flagd/blob/main/core/pkg/store/flags.go#L19) events.
|
||||
These events detail the source and type of the change, along with the flag data in question and are merged into the currently held state by the [store](https://github.com/open-feature/flagd/blob/main/core/pkg/store/flags.go#L19).
|
||||
|
||||
### Consequences
|
||||
|
||||
Because syncronization providers may vary wildly with respect to their implementation details, supporting multiple sync providers means supporting custom configuration parameters for each provider.
|
||||
As a consequence, Flagd's configuration is itself made more complex, and its bootstrap process, whose goal is to create a [`runtime.Runtime`](https://github.com/open-feature/flagd/blob/main/flagd/pkg/runtime/runtime.go#L21) object from user-provided configuration, spends the preponderance of its time and effort interpreting, configuring, and initializing sync providers.
|
||||
There is, in fact, a custom bootstrap type, called the `syncbuilder` whose job is to bootstrap sync providers and arrange them into a map, for the runtime to use.
|
||||
|
||||
Further, Because sync providers may vary wildly with respect to implementation, the end-user's choice of sync sources can change Flagd's operational parameters. For example, end-users who choose the GRPC provider can expect flag-sync operations to be nearly immediate, because GRPC updates can be pushed to flagd as they occur, compared with end-users who chose the HTTP provider, who must wait for a timer to expire in order to notice updates, because HTTP is a polling-based implementation.
|
||||
|
||||
Finally, sync Providers also contribute a great deal of girth to flagd's documentation, because again, their setup, syntax, and runtime idiosyncrasies may differ wildly.
|
|
@ -1,211 +0,0 @@
|
|||
---
|
||||
status: accepted
|
||||
author: @beeme1mr
|
||||
created: 2025-06-06
|
||||
updated: 2025-06-20
|
||||
---
|
||||
|
||||
# Support Explicit Code Default Values in flagd Configuration
|
||||
|
||||
This ADR proposes adding support for explicitly configuring flagd to use code-defined default values by allowing `null` as a valid default variant. This change addresses the current limitation where users cannot differentiate between "use the code's default" and "use this configured default" without resorting to workarounds like misconfigured rulesets.
|
||||
|
||||
## Background
|
||||
|
||||
Currently, flagd requires a default variant to be specified in flag configurations. This creates a fundamental mismatch with the OpenFeature specification and common feature flag usage patterns where code-defined defaults serve as the ultimate fallback.
|
||||
|
||||
The current behavior leads to confusion and operational challenges:
|
||||
|
||||
1. **Two Sources of Truth**: Applications have default values defined in code (as per OpenFeature best practices), while flagd configurations require their own default variants. This dual-default pattern violates the principle of single source of truth.
|
||||
|
||||
2. **State Transition Issues**: When transitioning a flag from DISABLED to ENABLED state, the behavior changes unexpectedly:
|
||||
- DISABLED state: Flag evaluation falls through to code defaults
|
||||
- ENABLED state: Flag evaluation uses the configured default variant
|
||||
|
||||
3. **Workarounds**: Users resort to misconfiguring rulesets (e.g., returning invalid variants) to force fallback to code defaults, which generates confusing error states and complicates debugging.
|
||||
|
||||
4. **OpenFeature Alignment**: The OpenFeature specification emphasizes that code defaults should be the ultimate fallback, but flagd's current design doesn't provide a clean way to express this intent.
|
||||
|
||||
Related discussions and context can be found in the [OpenFeature specification](https://openfeature.dev/specification/types) and [flagd flag definitions reference](https://flagd.dev/reference/flag-definitions/).
|
||||
|
||||
## Requirements
|
||||
|
||||
- **Explicit Code Default Support**: Users must be able to explicitly configure a flag to use the code-defined default value as its resolution
|
||||
- **Backward Compatibility**: Existing flag configurations must continue to work without modification
|
||||
- **Clear Semantics**: The configuration must clearly indicate when code defaults are being used versus configured defaults
|
||||
- **Appropriate Reason Codes**: Resolution details must include appropriate reason codes when code defaults are used (e.g., `DEFAULT` or a new specific reason)
|
||||
- **Schema Validation**: JSON schema must support and validate the new configuration options
|
||||
- **Provider Compatibility**: All OpenFeature providers must handle the new behavior correctly
|
||||
- **Testbed Coverage**: flagd testbed must include test cases for the new functionality
|
||||
|
||||
## Considered Options
|
||||
|
||||
- **Option 1: Allow `null` as Default Variant** - Modify the schema to accept `null` as a valid value for defaultVariant, signaling "use code default"
|
||||
- **Option 2: Make Default Variant Optional** - Remove the requirement for defaultVariant entirely, with absence meaning "use code default"
|
||||
- **Option 3: Special Variant Value** - Define a reserved variant name (e.g., `"__CODE_DEFAULT__"`) that signals code default usage
|
||||
- **Option 4: New Configuration Property** - Add a new property like `useCodeDefault: true` alongside or instead of defaultVariant
|
||||
- **Option 5: Status Quo with Documentation** - Keep current behavior but improve documentation about workarounds
|
||||
|
||||
## Proposal
|
||||
|
||||
We propose implementing **Option 1: Allow `null` as Default Variant**, potentially combined with **Option 2: Make Default Variant Optional** for maximum flexibility.
|
||||
|
||||
The implementation leverages field presence in evaluation responses across all protocols (in-process, RPC, and OFREP). When a flag configuration has `defaultVariant: null`, the evaluation response omits the value field entirely, which serves as a programmatic signal to the client to use its code-defined default value.
|
||||
|
||||
This approach offers several key advantages:
|
||||
|
||||
1. **No Protocol Changes**: RPC and OFREP protocols remain unchanged
|
||||
2. **Clear Semantics**: Omitted value field = "use your code default"
|
||||
3. **Backward Compatible**: Existing clients and servers continue to work
|
||||
4. **Universal Pattern**: Works consistently across all evaluation modes
|
||||
|
||||
The absence of a value field provides an unambiguous signal that distinguishes between "the server evaluated to null/false/empty" (value field present) and "the server delegates to your code default" (value field absent).
|
||||
|
||||
### Implementation Details
|
||||
|
||||
1. **Schema Changes**:
|
||||
|
||||
```json
|
||||
{
|
||||
"defaultVariant": {
|
||||
"oneOf": [
|
||||
{ "type": "string" },
|
||||
{ "type": "null" }
|
||||
],
|
||||
"description": "Default variant to use. Set to null to use code-defined default."
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
2. **Evaluation Behavior**:
|
||||
- When flag has `defaultVariant: null` and targeting returns no match
|
||||
- Server responds with reason set to reason "ERROR" and error code "FLAG_NOT_FOUND"
|
||||
- Client detects this reason value field and uses its code-defined default
|
||||
- This same pattern works across all evaluation modes
|
||||
|
||||
3. **Provider Implementation**:
|
||||
- No changes to existing providers
|
||||
|
||||
### Design Rationale
|
||||
|
||||
**Using "ERROR" reason**: We intentionally reuse the existing "ERROR" reason code rather than introducing a new one (like "CODE_DEFAULT"). This retains the current behavior of an disabled flag and allows for progressive enablement of a flag without unexpected variations in flag evaluation behavior.
|
||||
|
||||
Advantages of this approach:
|
||||
|
||||
- The "ERROR" reason is already used for cases where the flag is not found or misconfigured, so it aligns with the intent of using code defaults.
|
||||
- This approach avoids introducing new reason codes that would require additional handling in providers and clients.
|
||||
|
||||
### API changes
|
||||
|
||||
**Flag Configuration**:
|
||||
|
||||
```yaml
|
||||
flags:
|
||||
my-feature:
|
||||
state: ENABLED
|
||||
defaultVariant: null # Explicitly use code default
|
||||
variants:
|
||||
on: true
|
||||
off: false
|
||||
targeting:
|
||||
if:
|
||||
- "===":
|
||||
- var: user-type
|
||||
- "beta"
|
||||
- on
|
||||
```
|
||||
|
||||
**OFREP Response** when code default is indicated:
|
||||
|
||||
#### Single flag evaluation response
|
||||
|
||||
A single flag evaluation returns a `404` status code.
|
||||
|
||||
```json
|
||||
{
|
||||
"key": "my-feature",
|
||||
"errorCode": "FLAG_NOT_FOUND",
|
||||
// Optional error details
|
||||
"errorDetails": "Targeting not matched, using code default",
|
||||
"metadata": {}
|
||||
}
|
||||
```
|
||||
|
||||
#### Bulk flag evaluation response
|
||||
|
||||
```json
|
||||
{
|
||||
"flags": [
|
||||
// Flag is omitted from bulk response
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
**flagd RPC Response** (ResolveBooleanResponse):
|
||||
|
||||
```protobuf
|
||||
{
|
||||
"reason": "ERROR",
|
||||
"errorCode": "FLAG_NOT_FOUND",
|
||||
"metadata": {}
|
||||
}
|
||||
```
|
||||
|
||||
### Consequences
|
||||
|
||||
- Good, because it eliminates the confusion between code and configuration defaults
|
||||
- Good, because it provides explicit control over default behavior without workarounds
|
||||
- Good, because it aligns flagd more closely with OpenFeature specification principles
|
||||
- Good, because it supports gradual flag rollout patterns more naturally
|
||||
- Good, because it provides the ability to delegate to whatever is defined in code
|
||||
- Good, because it requires no changes to existing RPC or protocol signatures
|
||||
- Good, because it uses established patterns (field presence) for clear semantics
|
||||
- Good, because it maintains full backward compatibility
|
||||
- Bad, because it requires updates across multiple components (flagd, providers, testbed)
|
||||
- Bad, because it introduces a new concept that users need to understand
|
||||
- Neutral, because existing configurations continue to work unchange
|
||||
|
||||
### Implementation Plan
|
||||
|
||||
1. Update flagd-schemas with new JSON schema supporting null default variants
|
||||
2. Update flagd-testbed with comprehensive test cases for all evaluation modes
|
||||
3. Implement core logic in flagd to handle null defaults and omit value/variant fields
|
||||
4. Update OpenFeature providers with the latest schema and test harness to ensure they handle the new behavior correctly
|
||||
5. Documentation updates, migration guides, and playground examples to demonstrate the new configuration options
|
||||
|
||||
### Testing Considerations
|
||||
|
||||
To ensure correct implementation across all components:
|
||||
|
||||
1. **Provider Tests**: Each component (flagd, providers) must have unit tests verifying the handling of `null` as a default variant
|
||||
2. **Integration Tests**: End-to-end tests across different language combinations (e.g., Go flagd with Java provider)
|
||||
3. **OFREP Tests**: Verify JSON responses correctly omits flags with a `null` default variant
|
||||
4. **Backward Compatibility Tests**: Ensure old providers handle new responses gracefully
|
||||
5. **Consistency Tests**: Verify identical behavior across in-process, RPC, and OFREP modes
|
||||
|
||||
### Open questions
|
||||
|
||||
- How should providers handle responses with missing value fields in strongly-typed languages?
|
||||
- We'll handle the same way as with optional fields, using language-specific patterns (e.g., pointers in Go, `hasValue()` in Java).
|
||||
- Should we support both `null` and absent `defaultVariant` fields, or choose one approach?
|
||||
- Yes, we'll support both `null` and absent fields to maximize flexibility. An absent `defaultVariant` will be the equivalent of `null`.
|
||||
- What migration path should we recommend for users currently using workarounds?
|
||||
- Update the flag configurations to use `defaultVariant: null` and remove any misconfigured rulesets that force code defaults.
|
||||
- Should this feature be gated behind a configuration flag during initial rollout?
|
||||
- We'll avoid public facing documentation until the feature is fully implemented and tested.
|
||||
- How do we ensure consistent behavior across all provider implementations?
|
||||
- Gherkin tests will be added to the flagd testbed to ensure all providers handle the new behavior consistently.
|
||||
- Should providers validate that the reason is "DEFAULT" when value is omitted, or accept any omitted value as delegation?
|
||||
- Providers should accept any omitted value as delegation.
|
||||
- How do we handle edge cases where network protocols might strip empty fields?
|
||||
- It would behaving as expected, as the absence of fields is the intended signal.
|
||||
- When the client uses its code default after receiving a delegation response, what variant should be reported in telemetry/analytics?
|
||||
- The variant will be omitted, indicating that the code default was used.
|
||||
- Should we add explicit proto comments documenting the field omission behavior?
|
||||
- Leave this to the implementers, but it would be beneficial to add comments in the proto files to clarify this behavior for future maintainers.
|
||||
|
||||
## More Information
|
||||
|
||||
- [OpenFeature Specification - Flag Evaluation](https://openfeature.dev/specification/types#flag-evaluation)
|
||||
- [flagd Flag Definitions Reference](https://flagd.dev/reference/flag-definitions/)
|
||||
- [flagd JSON Schema Repository](https://github.com/open-feature/flagd-schemas)
|
||||
- [flagd Testbed](https://github.com/open-feature/flagd-testbed)
|
|
@ -33,7 +33,7 @@ erDiagram
|
|||
|
||||
### In-Process evaluation
|
||||
|
||||
In-process deployments embed the flagd evaluation engine directly into the client application through the use of an [in-process provider](./providers/index.md).
|
||||
In-process deployments embed the flagd evaluation engine directly into the client application through the use of an [in-process provider](./installation.md#in-process).
|
||||
The in-process provider is connected via the sync protocol to an implementing [gRPC service](./concepts/syncs.md#grpc-sync) that provides the flag definitions.
|
||||
You can use flagd as a [gRPC sync service](./reference/grpc-sync-service.md).
|
||||
In this mode, the flag sync stream will expose aggregated flag configurations currently configured through [syncs](./concepts/syncs.md).
|
||||
|
|
|
@ -22,4 +22,3 @@ Below is a non-exhaustive table of common feature flag use-cases, and how flagd
|
|||
| dynamic (context-sensitive) evaluation | flagd evaluations are context sensitive. Rules can use arbitrary context attributes as inputs for flag evaluation logic. |
|
||||
| fractional evaluation / random assignment | flagd's [fractional](../reference/custom-operations/fractional-operation.md) custom operation supports pseudorandom assignment of flag values. |
|
||||
| progressive roll-outs | Progressive roll-outs of new features can be accomplished by leveraging the [fractional](../reference/custom-operations/fractional-operation.md) custom operation as well as automation in your build pipeline, SCM, or infrastructure which updates the distribution over time. |
|
||||
| feature flag telemetry | flagd supports the OpenTelemetry conventions for feature flags, by returning compliant [resolution details](https://openfeature.dev/specification/types#resolution-details) and [metadata](../reference/monitoring.md#metadata), in addition to flag values. |
|
||||
|
|
|
@ -26,19 +26,14 @@ See [sync source](../reference/sync-configuration.md#source-configuration) confi
|
|||
The HTTP sync provider fetch flags from a remote source and periodically poll the source for flag definition updates.
|
||||
|
||||
```shell
|
||||
flagd start --uri https://my-flag-source/flags.json
|
||||
flagd start --uri https://my-flag-source.json
|
||||
```
|
||||
|
||||
In this example, `https://my-flag-source/flags.json` is a remote endpoint responding valid feature flag definition when
|
||||
In this example, `https://my-flag-source.json` is a remote endpoint responding valid feature flag definition when
|
||||
invoked with **HTTP GET** request.
|
||||
The polling interval, port, TLS settings, and authentication information can be configured.
|
||||
See [sync source](../reference/sync-configuration.md#source-configuration) configuration for details.
|
||||
|
||||
To optimize network usage, it honors the HTTP ETag protocol: if the server includes an `ETag` header in its response,
|
||||
flagd will store this value and send it in the `If-None-Match` header on subsequent requests. If the flag data has
|
||||
not changed, the server responds with 304 Not Modified, and flagd will skip updating its state. If the data has
|
||||
changed, the server returns the new content and a new ETag, prompting flagd to update its flags.
|
||||
|
||||
---
|
||||
|
||||
### gRPC sync
|
||||
|
@ -73,53 +68,6 @@ In this example, `default/my_example` expected to be a valid FeatureFlag resourc
|
|||
namespace and `my_example` being the resource name.
|
||||
See [sync source](../reference/sync-configuration.md#source-configuration) configuration for details.
|
||||
|
||||
---
|
||||
|
||||
### GCS sync
|
||||
|
||||
The GCS sync provider fetches flags from a GCS blob and periodically polls the GCS for the flag definition updates.
|
||||
It uses [application default credentials](https://cloud.google.com/docs/authentication/application-default-credentials) if they
|
||||
are [configured](https://cloud.google.com/docs/authentication/provide-credentials-adc) to authorize the calls to GCS.
|
||||
|
||||
```shell
|
||||
flagd start --uri gs://my-bucket/my-flags.json
|
||||
```
|
||||
|
||||
In this example, `gs://my-bucket/my-flags.json` is expected to be a valid GCS URI accessible by the flagd
|
||||
(either by being public or together with application default credentials).
|
||||
The polling interval can be configured.
|
||||
See [sync source](../reference/sync-configuration.md#source-configuration) configuration for details.
|
||||
|
||||
### Azure Blob sync
|
||||
|
||||
The Azure Blob sync provider fetches flags from an Azure Blob Storage blob and periodically polls the blob for the flag definition updates.
|
||||
It uses [environment variables](https://pkg.go.dev/gocloud.dev/blob/azureblob#hdr-URLs) to set the Storage Account name and to
|
||||
authorize the calls to Azure Blob Storage.
|
||||
|
||||
```shell
|
||||
flagd start --uri azblob://my-container/my-flags.json
|
||||
```
|
||||
|
||||
In this example, assuming the environment variable AZURE_STORAGE_ACCOUNT is set to `myaccount`, and other options are not set, the service URL will be:
|
||||
`https://myaccount.blob.core.windows.net/my-container/my-flags.json`.
|
||||
This is expected to be a valid service URL accessible by flagd (either by being public or together with environment variable credentials).
|
||||
The polling interval can be configured.
|
||||
See [sync source](../reference/sync-configuration.md#source-configuration) configuration for details.
|
||||
|
||||
### S3 sync
|
||||
|
||||
The S3 sync provider fetches flags from an S3 bucket and periodically polls for flag definition updates.
|
||||
It uses [AWS standardized credentials chain](https://docs.aws.amazon.com/sdkref/latest/guide/standardized-credentials.html) to authorize the calls to AWS.
|
||||
|
||||
```shell
|
||||
flagd start --uri s3://my-bucket/my-flags.json
|
||||
```
|
||||
|
||||
In this example, `s3://my-bucket/my-flags.json` is expected to be a valid URI accessible by flagd
|
||||
(either by being public or together with the appropriate credentials read from a file or via the environment as described in the AWS docs linked above).
|
||||
The polling interval is configurable.
|
||||
See [sync source](../reference/sync-configuration.md#source-configuration) for details.
|
||||
|
||||
## Merging
|
||||
|
||||
Flagd can be configured to read from multiple sources at once, when this is the case flagd will merge all flag definition into a single
|
||||
|
|
21
docs/faq.md
21
docs/faq.md
|
@ -33,27 +33,6 @@ Please see [architecture](./architecture.md) and [installation](./installation.m
|
|||
|
||||
---
|
||||
|
||||
> How can I access the SBOM for flagd?
|
||||
|
||||
SBOMs for the flagd binary are available as assets on the [GitHub release page](https://github.com/open-feature/flagd/releases).
|
||||
Container SBOMs can be inspected using the Docker CLI.
|
||||
|
||||
An example of inspecting the SBOM for the latest flagd `linux/amd64` container image:
|
||||
|
||||
```shell
|
||||
docker buildx imagetools inspect ghcr.io/open-feature/flagd:latest \
|
||||
--format '{{ json (index .SBOM "linux/amd64").SPDX }}'
|
||||
```
|
||||
|
||||
An example of inspecting the SBOM for the latest flagd `linux/arm64` container image:
|
||||
|
||||
```shell
|
||||
docker buildx imagetools inspect ghcr.io/open-feature/flagd:latest \
|
||||
--format '{{ json (index .SBOM "linux/arm64").SPDX }}'
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
> Why doesn't flagd support {_my desired feature_}?
|
||||
|
||||
Because you haven't opened a PR or created an issue!
|
||||
|
|
File diff suppressed because one or more lines are too long
|
@ -13,15 +13,14 @@ Providers for flagd come in two flavors: those that are built to communicate wit
|
|||
|
||||
The following table lists all the available flagd providers.
|
||||
|
||||
| Technology | RPC | in-process |
|
||||
| --------------------------------------------------- | ---------------- | ---------------- |
|
||||
| Technology | RPC | in-process |
|
||||
| ------------------------------------------------------------- | ---------------- | ---------------- |
|
||||
| :fontawesome-brands-golang: [Go](./go.md) | :material-check: | :material-check: |
|
||||
| :fontawesome-brands-java: [Java](./java.md) | :material-check: | :material-check: |
|
||||
| :fontawesome-brands-node-js: [Node.JS](./nodejs.md) | :material-check: | :material-check: |
|
||||
| :simple-php: [PHP](./php.md) | :material-check: | :material-close: |
|
||||
| :simple-dotnet: [.NET](./dotnet.md) | :material-check: | :material-check: |
|
||||
| :simple-python: [Python](./python.md) | :material-check: | :material-check: |
|
||||
| :fontawesome-brands-rust: [Rust](./rust.md) | :material-check: | :material-check: |
|
||||
| :simple-python: [Python](./python.md) | :material-check: | :material-close: |
|
||||
| :material-web: [Web](./web.md) | :material-check: | :material-close: |
|
||||
|
||||
For information on implementing a flagd provider, see the [specification](../reference/specifications/providers.md).
|
||||
For information on implementing a flagd provider, see the specifications for [RPC](../reference/specifications/rpc-providers.md) and [in-process](../reference/specifications/in-process-providers.md) providers.
|
|
@ -5,5 +5,4 @@
|
|||
{%
|
||||
include "https://raw.githubusercontent.com/open-feature/python-sdk-contrib/main/providers/openfeature-provider-flagd/README.md"
|
||||
start="## Installation"
|
||||
end="## License"
|
||||
%}
|
||||
|
|
|
@ -1,9 +0,0 @@
|
|||
# Rust provider
|
||||
|
||||
## Installation
|
||||
|
||||
{%
|
||||
include "https://raw.githubusercontent.com/open-feature/rust-sdk-contrib/refs/heads/main/crates/flagd/README.md"
|
||||
start="### Installation"
|
||||
end="### License"
|
||||
%}
|
|
@ -25,58 +25,23 @@ These types of feature flags are commonly used to gate access to a new feature u
|
|||
The second flag has the key `background-color` and is a multi-variant string.
|
||||
These are commonly used for A/B/(n) testing and experimentation.
|
||||
|
||||
### Running Flagd
|
||||
### Start flagd
|
||||
|
||||
=== "Docker"
|
||||
Run the following command to start flagd using docker. This will expose flagd on port `8013` and read from the `demo.flagd.json` file we downloaded in the previous step.
|
||||
|
||||
Run the following command to start flagd using docker. This will expose flagd on port `8013` and read from the `demo.flagd.json` file we downloaded in the previous step.
|
||||
```shell
|
||||
docker run \
|
||||
--rm -it \
|
||||
--name flagd \
|
||||
-p 8013:8013 \
|
||||
-v $(pwd):/etc/flagd \
|
||||
ghcr.io/open-feature/flagd:latest start \
|
||||
--uri file:./etc/flagd/demo.flagd.json
|
||||
```
|
||||
|
||||
```shell
|
||||
docker run \
|
||||
--rm -it \
|
||||
--name flagd \
|
||||
-p 8013:8013 \
|
||||
-v $(pwd):/etc/flagd \
|
||||
ghcr.io/open-feature/flagd:latest start \
|
||||
--uri file:./etc/flagd/demo.flagd.json
|
||||
```
|
||||
|
||||
??? "Tips for Windows users"
|
||||
In Windows, use WSL system for both the file location and Docker runtime.
|
||||
Mixed file systems does not work and this is a [limitation of Docker](https://github.com/docker/for-win/issues/8479).
|
||||
|
||||
=== "Docker Compose"
|
||||
|
||||
Create a docker-compose.yaml file with the following contents:
|
||||
|
||||
```yaml
|
||||
services:
|
||||
flagd:
|
||||
image: ghcr.io/open-feature/flagd:latest
|
||||
volumes:
|
||||
- ./flags:/flags
|
||||
command: [
|
||||
'start',
|
||||
'--uri',
|
||||
'file:./flags/demo.flagd.json',
|
||||
]
|
||||
ports:
|
||||
- '8013:8013'
|
||||
```
|
||||
|
||||
Create a folder called `flags` where the JSON flag files can reside. [Download the flag definition](#download-the-flag-definition) and move this JSON file to the flags folder.
|
||||
|
||||
```text
|
||||
├── flags
|
||||
│ ├── demo.flagd.json
|
||||
├── docker-compose.yaml
|
||||
```
|
||||
|
||||
Open up a terminal and run the following:
|
||||
|
||||
```shell
|
||||
docker compose up
|
||||
```
|
||||
??? "Tips for Windows users"
|
||||
In Windows, use WSL system for both the file location and Docker runtime.
|
||||
Mixed file systems does not work and this is a [limitation of Docker](https://github.com/docker/for-win/issues/8479).
|
||||
|
||||
### Evaluating a feature flag
|
||||
|
||||
|
|
|
@ -64,9 +64,7 @@ The `defaultVariant` is `red`, but it contains a [targeting rule](../flag-defini
|
|||
In this case, `25%` of the evaluations will receive `red`, `25%` will receive `blue`, and so on.
|
||||
|
||||
Assignment is deterministic (sticky) based on the expression supplied as the first parameter (`{ "cat": [{ "var": "$flagd.flagKey" }, { "var": "email" }]}`, in this case).
|
||||
The value retrieved by this expression is referred to as the "bucketing value" and must be a string.
|
||||
Other primitive types can be used by casting the value using `"cat"` operator.
|
||||
For example, a less deterministic distribution can be achieved using `{ "cat": [{ "var": "$flagd.timestamp" }]}`.
|
||||
The value retrieved by this expression is referred to as the "bucketing value".
|
||||
The bucketing value expression can be omitted, in which case a concatenation of the `targetingKey` and the `flagKey` will be used.
|
||||
|
||||
The `fractional` operation is a custom JsonLogic operation which deterministically selects a variant based on
|
||||
|
|
|
@ -56,15 +56,8 @@ A fully configured flag may look like this.
|
|||
"on",
|
||||
"off"
|
||||
]
|
||||
},
|
||||
"metadata": {
|
||||
"version": "17"
|
||||
}
|
||||
}
|
||||
},
|
||||
"metadata": {
|
||||
"team": "user-experience",
|
||||
"flagSetId": "ecommerce"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
@ -163,7 +156,7 @@ Example of an invalid configuration:
|
|||
`targeting` is an **optional** property.
|
||||
A targeting rule **must** be valid JSON.
|
||||
Flagd uses a modified version of [JsonLogic](https://jsonlogic.com/), as well as some custom pre-processing, to evaluate these rules.
|
||||
If no targeting rules are defined, the response reason will always be `STATIC`, this allows for the flag values to be cached, this behavior is described [here](specifications/providers.md#flag-evaluation-caching).
|
||||
If no targeting rules are defined, the response reason will always be `STATIC`, this allows for the flag values to be cached, this behavior is described [here](specifications/rpc-providers.md#caching).
|
||||
|
||||
#### Variants Returned From Targeting Rules
|
||||
|
||||
|
@ -191,9 +184,6 @@ For example, when accessing flagd via HTTP, the POST body may look like this:
|
|||
|
||||
The evaluation context can be accessed in targeting rules using the `var` operation followed by the evaluation context property name.
|
||||
|
||||
The evaluation context can be appended by arbitrary key value pairs
|
||||
via the `-X` command line flag.
|
||||
|
||||
| Description | Example |
|
||||
| -------------------------------------------------------------- | ---------------------------------------------------- |
|
||||
| Retrieve property from the evaluation context | `#!json { "var": "email" }` |
|
||||
|
@ -346,13 +336,6 @@ Example:
|
|||
}
|
||||
```
|
||||
|
||||
## Metadata
|
||||
|
||||
Metadata can be defined at both the flag set (as a sibling of [flags](#flags)) and within each flag.
|
||||
Flag metadata conveys arbitrary information about the flag or flag set, such as a version number, or the business unit that is responsible for the flag.
|
||||
When flagd resolves flags, the returned [flag metadata](https://openfeature.dev/specification/types/#flag-metadata) is a merged representation of the metadata defined in the flag set, and the metadata defined in the flag, with the metadata defined in the flag taking priority.
|
||||
See the [playground](/playground/?scenario-name=Flag+metadata) for an interactive example.
|
||||
|
||||
## Boolean Variant Shorthand
|
||||
|
||||
Since rules that return `true` or `false` map to the variant indexed by the equivalent string (`"true"`, `"false"`), you can use shorthand for these cases.
|
||||
|
|
|
@ -11,29 +11,20 @@ flagd start [flags]
|
|||
### Options
|
||||
|
||||
```
|
||||
-H, --context-from-header stringToString add key-value pairs to map header values to context values, where key is Header name, value is context key (default [])
|
||||
-X, --context-value stringToString add arbitrary key value pairs to the flag evaluation context (default [])
|
||||
-C, --cors-origin strings CORS allowed origins, * will allow all origins
|
||||
--disable-sync-metadata Disables the getMetadata endpoint of the sync service. Defaults to false, but will default to true in later versions.
|
||||
-h, --help help for start
|
||||
-z, --log-format string Set the logging format, e.g. console or json (default "console")
|
||||
-m, --management-port int32 Port for management operations (default 8014)
|
||||
-t, --metrics-exporter string Set the metrics exporter. Default(if unset) is Prometheus. Can be override to otel - OpenTelemetry metric exporter. Overriding to otel require otelCollectorURI to be present
|
||||
-r, --ofrep-port int32 ofrep service port (default 8016)
|
||||
-A, --otel-ca-path string tls certificate authority path to use with OpenTelemetry collector
|
||||
-D, --otel-cert-path string tls certificate path to use with OpenTelemetry collector
|
||||
-o, --otel-collector-uri string Set the grpc URI of the OpenTelemetry collector for flagd runtime. If unset, the collector setup will be ignored and traces will not be exported.
|
||||
-K, --otel-key-path string tls key path to use with OpenTelemetry collector
|
||||
-I, --otel-reload-interval duration how long between reloading the otel tls certificate from disk (default 1h0m0s)
|
||||
-p, --port int32 Port to listen on (default 8013)
|
||||
-c, --server-cert-path string Server side tls certificate path
|
||||
-k, --server-key-path string Server side tls key path
|
||||
-d, --socket-path string Flagd unix socket path. With grpc the evaluations service will become available on this address. With http(s) the grpc-gateway proxy will use this address internally.
|
||||
-s, --sources string JSON representation of an array of SourceConfig objects. This object contains 2 required fields, uri (string) and provider (string). Documentation for this object: https://flagd.dev/reference/sync-configuration/#source-configuration
|
||||
--stream-deadline duration Set a server-side deadline for flagd sync and event streams (default 0, means no deadline).
|
||||
-g, --sync-port int32 gRPC Sync port (default 8015)
|
||||
-e, --sync-socket-path string Flagd sync service socket path. With grpc the sync service will be available on this address.
|
||||
-f, --uri .yaml/.yml/.json Set a sync provider uri to read data from, this can be a filepath, URL (HTTP and gRPC), FeatureFlag custom resource, or GCS or Azure Blob. When flag keys are duplicated across multiple providers the merge priority follows the index of the flag arguments, as such flags from the uri at index 0 take the lowest precedence, with duplicated keys being overwritten by those from the uri at index 1. Please note that if you are using filepath, flagd only supports files with .yaml/.yml/.json extension.
|
||||
-C, --cors-origin strings CORS allowed origins, * will allow all origins
|
||||
-h, --help help for start
|
||||
-z, --log-format string Set the logging format, e.g. console or json (default "console")
|
||||
-m, --management-port int32 Port for management operations (default 8014)
|
||||
-t, --metrics-exporter string Set the metrics exporter. Default(if unset) is Prometheus. Can be override to otel - OpenTelemetry metric exporter. Overriding to otel require otelCollectorURI to be present
|
||||
-r, --ofrep-port int32 ofrep service port (default 8016)
|
||||
-o, --otel-collector-uri string Set the grpc URI of the OpenTelemetry collector for flagd runtime. If unset, the collector setup will be ignored and traces will not be exported.
|
||||
-p, --port int32 Port to listen on (default 8013)
|
||||
-c, --server-cert-path string Server side tls certificate path
|
||||
-k, --server-key-path string Server side tls key path
|
||||
-d, --socket-path string Flagd socket path. With grpc the service will become available on this address. With http(s) the grpc-gateway proxy will use this address internally.
|
||||
-s, --sources string JSON representation of an array of SourceConfig objects. This object contains 2 required fields, uri (string) and provider (string). Documentation for this object: https://flagd.dev/reference/sync-configuration/#source-configuration
|
||||
-g, --sync-port int32 gRPC Sync port (default 8015)
|
||||
-f, --uri .yaml/.yml/.json Set a sync provider uri to read data from, this can be a filepath, URL (HTTP and gRPC) or FeatureFlag custom resource. When flag keys are duplicated across multiple providers the merge priority follows the index of the flag arguments, as such flags from the uri at index 0 take the lowest precedence, with duplicated keys being overwritten by those from the uri at index 1. Please note that if you are using filepath, flagd only supports files with .yaml/.yml/.json extension.
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
|
|
@ -45,33 +45,20 @@ Given below is the current implementation overview of flagd telemetry internals,
|
|||
|
||||
## Metrics
|
||||
|
||||
flagd exposes the following metrics:
|
||||
flagd expose following metrics,
|
||||
|
||||
- `http.server.request.duration` - Measures the duration of inbound HTTP requests
|
||||
- `http.server.response.body.size` - Measures the size of HTTP response messages
|
||||
- `http.server.active_requests` - Measures the number of concurrent HTTP requests that are currently in-flight
|
||||
- `feature_flag.flagd.impression` - Measures the number of evaluations for a given flag
|
||||
- `feature_flag.flagd.result.reason` - Measures the number of evaluations for a given reason
|
||||
|
||||
> Please note that metric names may vary based on the consuming monitoring tool naming requirements.
|
||||
> For example, the transformation of OTLP metrics to Prometheus is described [here](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/compatibility/prometheus_and_openmetrics.md#otlp-metric-points-to-prometheus).
|
||||
|
||||
### HTTP Metric Attributes
|
||||
|
||||
flagd uses the following OpenTelemetry Semantic Conventions for HTTP metrics:
|
||||
|
||||
- `service.name` - The name of the service
|
||||
- `http.route` - The matched route (path template)
|
||||
- `http.request.method` - The HTTP request method (GET, POST, etc.)
|
||||
- `http.response.status_code` - The HTTP response status code
|
||||
- `url.scheme` - The URI scheme (http or https)
|
||||
- `http.server.duration`
|
||||
- `http.server.response.size`
|
||||
- `http.server.active_requests`
|
||||
- `feature_flag.flagd.impression`
|
||||
- `feature_flag.flagd.evaluation.reason`
|
||||
|
||||
## Traces
|
||||
|
||||
flagd creates the following spans as part of a trace:
|
||||
flagd expose following traces,
|
||||
|
||||
- `flagEvaluationService(resolveX)` - SpanKind server
|
||||
- `jsonEvaluator(resolveX)` - SpanKind internal
|
||||
- `jsonEvaluator(resolveX)` - SpanKind internal
|
||||
- `jsonEvaluator(setState)` - SpanKind internal
|
||||
|
||||
## Export to OTEL collector
|
||||
|
@ -92,9 +79,11 @@ official [OTEL collector example](https://github.com/open-telemetry/opentelemetr
|
|||
#### docker-compose.yaml
|
||||
|
||||
```yaml
|
||||
version: "3"
|
||||
services:
|
||||
jaeger:
|
||||
image: cr.jaegertracing.io/jaegertracing/jaeger:2.8.0
|
||||
# Jaeger
|
||||
jaeger-all-in-one:
|
||||
image: jaegertracing/all-in-one:latest
|
||||
restart: always
|
||||
ports:
|
||||
- "16686:16686"
|
||||
|
@ -102,7 +91,7 @@ services:
|
|||
- "14250"
|
||||
# Collector
|
||||
otel-collector:
|
||||
image: otel/opentelemetry-collector:0.129.1
|
||||
image: otel/opentelemetry-collector:latest
|
||||
restart: always
|
||||
command: [ "--config=/etc/otel-collector-config.yaml" ]
|
||||
volumes:
|
||||
|
@ -115,10 +104,10 @@ services:
|
|||
- "4317:4317" # OTLP gRPC receiver
|
||||
- "55679:55679" # zpages extension
|
||||
depends_on:
|
||||
- jaeger
|
||||
- jaeger-all-in-one
|
||||
prometheus:
|
||||
container_name: prometheus
|
||||
image: prom/prometheus:v2.53.5
|
||||
image: prom/prometheus:latest
|
||||
restart: always
|
||||
volumes:
|
||||
- ./prometheus.yaml:/etc/prometheus/prometheus.yml
|
||||
|
@ -133,12 +122,13 @@ receivers:
|
|||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: 0.0.0.0:4317
|
||||
exporters:
|
||||
prometheus:
|
||||
endpoint: "0.0.0.0:8889"
|
||||
otlp/jaeger:
|
||||
endpoint: jaeger:4317
|
||||
const_labels:
|
||||
label1: value1
|
||||
jaeger:
|
||||
endpoint: jaeger-all-in-one:14250
|
||||
tls:
|
||||
insecure: true
|
||||
processors:
|
||||
|
@ -148,14 +138,14 @@ service:
|
|||
traces:
|
||||
receivers: [ otlp ]
|
||||
processors: [ batch ]
|
||||
exporters: [ otlp/jaeger ]
|
||||
exporters: [ jaeger ]
|
||||
metrics:
|
||||
receivers: [ otlp ]
|
||||
processors: [ batch ]
|
||||
exporters: [ prometheus ]
|
||||
```
|
||||
|
||||
#### prometheus.yaml
|
||||
#### prometheus.yml
|
||||
|
||||
```yaml
|
||||
scrape_configs:
|
||||
|
@ -163,14 +153,8 @@ scrape_configs:
|
|||
scrape_interval: 10s
|
||||
static_configs:
|
||||
- targets: [ 'otel-collector:8889' ]
|
||||
- targets: [ 'otel-collector:8888' ]
|
||||
```
|
||||
|
||||
Once, configuration files are ready, use `docker compose up` to start the local setup. With successful startup, you can
|
||||
Once, configuration files are ready, use `docker-compose up` to start the local setup. With successful startup, you can
|
||||
access metrics through [Prometheus](http://localhost:9090/graph) & traces through [Jaeger](http://localhost:16686/).
|
||||
|
||||
## Metadata
|
||||
|
||||
[Flag metadata](https://openfeature.dev/specification/types/#flag-metadata) comprises auxiliary data pertaining to feature flags; it's highly valuable in telemetry signals.
|
||||
Flag metadata might consist of attributes indicating the version of the flag, an identifier for the flag set, ownership information about the flag, or other documentary information.
|
||||
flagd supports flag metadata in all its [gRPC protocols](../reference/specifications//protos.md), in [OFREP](../reference/flagd-ofrep.md), and in its [flag definitions](./flag-definitions.md#metadata).
|
||||
These attributes are returned with flag evaluations, and can be added to telemetry signals as outlined in the [OpenFeature specification](https://openfeature.dev/specification/appendix-d).
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# OpenFeature Operator
|
||||
|
||||
The OpenFeature Operator provides a convenient way to use flagd in your Kubernetes cluster.
|
||||
The OpenFeature Operator provides a convent way to using flagd in your Kubernetes cluster.
|
||||
It allows you to define feature flags as custom resources, inject flagd as a sidecar, and more.
|
||||
Please see the [installation guide](https://github.com/open-feature/open-feature-operator/blob/main/docs/installation.md) to get started.
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue