feature: add context to lock&pubsub API
Signed-off-by: seachen <seachen@tencent.com>
This commit is contained in:
commit
5dda098868
|
|
@ -51,6 +51,10 @@ resource cosmosDb 'Microsoft.DocumentDB/databaseAccounts@2021-04-15' = {
|
|||
'/partitionKey' // Defined by conformance test state.go
|
||||
]
|
||||
}
|
||||
// Based on https://learn.microsoft.com/en-us/azure/cosmos-db/nosql/time-to-live,
|
||||
// if defaultTtl is not set, the item will never expire and also ttl is not enabled at all so
|
||||
// ttl set on a per item basis will not be honored.
|
||||
defaultTtl: -1 // only enable ttl
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,8 @@
|
|||
version: '2'
|
||||
services:
|
||||
redis:
|
||||
image: redis:7
|
||||
ports:
|
||||
- "6380:6379"
|
||||
environment:
|
||||
- REDIS_REPLICATION_MODE=master
|
||||
|
|
@ -57,7 +57,8 @@ jobs:
|
|||
- bindings.mqtt-mosquitto
|
||||
- bindings.mqtt-vernemq
|
||||
- bindings.postgres
|
||||
- bindings.redis
|
||||
- bindings.redis.v6
|
||||
- bindings.redis.v7
|
||||
- bindings.kubemq
|
||||
- bindings.rabbitmq
|
||||
- pubsub.aws.snssqs
|
||||
|
|
@ -69,7 +70,7 @@ jobs:
|
|||
- pubsub.natsstreaming
|
||||
- pubsub.pulsar
|
||||
- pubsub.rabbitmq
|
||||
- pubsub.redis
|
||||
- pubsub.redis.v6
|
||||
- pubsub.kafka-wurstmeister
|
||||
- pubsub.kafka-confluent
|
||||
- pubsub.kubemq
|
||||
|
|
@ -83,7 +84,8 @@ jobs:
|
|||
- state.mysql.mysql
|
||||
- state.mysql.mariadb
|
||||
- state.postgresql
|
||||
- state.redis
|
||||
- state.redis.v6
|
||||
- state.redis.v7
|
||||
- state.sqlserver
|
||||
- state.in-memory
|
||||
- state.cockroachdb
|
||||
|
|
@ -254,9 +256,13 @@ jobs:
|
|||
echo "$CERT_NAME=$CERT_FILE" >> $GITHUB_ENV
|
||||
done
|
||||
|
||||
- name: Start Redis
|
||||
- name: Start Redis 6 with Redis JSON
|
||||
run: docker-compose -f ./.github/infrastructure/docker-compose-redisjson.yml -p redis up -d
|
||||
if: contains(matrix.component, 'redis')
|
||||
if: contains(matrix.component, 'redis.v6')
|
||||
|
||||
- name: Start Redis 7
|
||||
run: docker-compose -f ./.github/infrastructure/docker-compose-redis7.yml -p redis up -d
|
||||
if: contains(matrix.component, 'redis.v7')
|
||||
|
||||
- name: Start Temporal
|
||||
run: docker-compose -f ./.github/infrastructure/docker-compose-temporal.yml -p temporal up -d
|
||||
|
|
|
|||
|
|
@ -15,11 +15,9 @@ package redis
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/go-redis/redis/v8"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
rediscomponent "github.com/dapr/components-contrib/internal/component/redis"
|
||||
"github.com/dapr/kit/logger"
|
||||
|
|
@ -27,7 +25,7 @@ import (
|
|||
|
||||
// Redis is a redis output binding.
|
||||
type Redis struct {
|
||||
client redis.UniversalClient
|
||||
client rediscomponent.RedisClient
|
||||
clientSettings *rediscomponent.Settings
|
||||
logger logger.Logger
|
||||
|
||||
|
|
@ -49,7 +47,7 @@ func (r *Redis) Init(meta bindings.Metadata) (err error) {
|
|||
|
||||
r.ctx, r.cancel = context.WithCancel(context.Background())
|
||||
|
||||
_, err = r.client.Ping(r.ctx).Result()
|
||||
_, err = r.client.PingResult(r.ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("redis binding: error connecting to redis at %s: %s", r.clientSettings.Host, err)
|
||||
}
|
||||
|
|
@ -58,7 +56,7 @@ func (r *Redis) Init(meta bindings.Metadata) (err error) {
|
|||
}
|
||||
|
||||
func (r *Redis) Ping() error {
|
||||
if _, err := r.client.Ping(r.ctx).Result(); err != nil {
|
||||
if _, err := r.client.PingResult(r.ctx); err != nil {
|
||||
return fmt.Errorf("redis binding: error connecting to redis at %s: %s", r.clientSettings.Host, err)
|
||||
}
|
||||
|
||||
|
|
@ -77,12 +75,12 @@ func (r *Redis) Invoke(ctx context.Context, req *bindings.InvokeRequest) (*bindi
|
|||
if key, ok := req.Metadata["key"]; ok && key != "" {
|
||||
switch req.Operation {
|
||||
case bindings.DeleteOperation:
|
||||
err := r.client.Del(ctx, key).Err()
|
||||
err := r.client.Del(ctx, key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case bindings.GetOperation:
|
||||
data, err := r.client.Get(ctx, key).Result()
|
||||
data, err := r.client.Get(ctx, key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -90,7 +88,7 @@ func (r *Redis) Invoke(ctx context.Context, req *bindings.InvokeRequest) (*bindi
|
|||
rep.Data = []byte(data)
|
||||
return rep, nil
|
||||
case bindings.CreateOperation:
|
||||
_, err := r.client.Do(ctx, "SET", key, req.Data).Result()
|
||||
err := r.client.DoWrite(ctx, "SET", key, req.Data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -22,6 +22,7 @@ import (
|
|||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
internalredis "github.com/dapr/components-contrib/internal/component/redis"
|
||||
"github.com/dapr/kit/logger"
|
||||
)
|
||||
|
||||
|
|
@ -34,13 +35,14 @@ func TestInvokeCreate(t *testing.T) {
|
|||
s, c := setupMiniredis()
|
||||
defer s.Close()
|
||||
|
||||
// miniRedis is compatible with the existing v8 client
|
||||
bind := &Redis{
|
||||
client: c,
|
||||
logger: logger.NewLogger("test"),
|
||||
}
|
||||
bind.ctx, bind.cancel = context.WithCancel(context.Background())
|
||||
|
||||
_, err := c.Do(context.Background(), "GET", testKey).Result()
|
||||
_, err := c.DoRead(context.Background(), "GET", testKey)
|
||||
assert.Equal(t, redis.Nil, err)
|
||||
|
||||
bindingRes, err := bind.Invoke(context.TODO(), &bindings.InvokeRequest{
|
||||
|
|
@ -51,7 +53,7 @@ func TestInvokeCreate(t *testing.T) {
|
|||
assert.Equal(t, nil, err)
|
||||
assert.Equal(t, true, bindingRes == nil)
|
||||
|
||||
getRes, err := c.Do(context.Background(), "GET", testKey).Result()
|
||||
getRes, err := c.DoRead(context.Background(), "GET", testKey)
|
||||
assert.Equal(t, nil, err)
|
||||
assert.Equal(t, true, getRes == testData)
|
||||
}
|
||||
|
|
@ -66,7 +68,7 @@ func TestInvokeGet(t *testing.T) {
|
|||
}
|
||||
bind.ctx, bind.cancel = context.WithCancel(context.Background())
|
||||
|
||||
_, err := c.Do(context.Background(), "SET", testKey, testData).Result()
|
||||
err := c.DoWrite(context.Background(), "SET", testKey, testData)
|
||||
assert.Equal(t, nil, err)
|
||||
|
||||
bindingRes, err := bind.Invoke(context.TODO(), &bindings.InvokeRequest{
|
||||
|
|
@ -87,10 +89,10 @@ func TestInvokeDelete(t *testing.T) {
|
|||
}
|
||||
bind.ctx, bind.cancel = context.WithCancel(context.Background())
|
||||
|
||||
_, err := c.Do(context.Background(), "SET", testKey, testData).Result()
|
||||
err := c.DoWrite(context.Background(), "SET", testKey, testData)
|
||||
assert.Equal(t, nil, err)
|
||||
|
||||
getRes, err := c.Do(context.Background(), "GET", testKey).Result()
|
||||
getRes, err := c.DoRead(context.Background(), "GET", testKey)
|
||||
assert.Equal(t, nil, err)
|
||||
assert.Equal(t, true, getRes == testData)
|
||||
|
||||
|
|
@ -101,12 +103,12 @@ func TestInvokeDelete(t *testing.T) {
|
|||
|
||||
assert.Equal(t, nil, err)
|
||||
|
||||
rgetRep, err := c.Do(context.Background(), "GET", testKey).Result()
|
||||
rgetRep, err := c.DoRead(context.Background(), "GET", testKey)
|
||||
assert.Equal(t, redis.Nil, err)
|
||||
assert.Equal(t, nil, rgetRep)
|
||||
}
|
||||
|
||||
func setupMiniredis() (*miniredis.Miniredis, *redis.Client) {
|
||||
func setupMiniredis() (*miniredis.Miniredis, internalredis.RedisClient) {
|
||||
s, err := miniredis.Run()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
|
|
@ -116,5 +118,5 @@ func setupMiniredis() (*miniredis.Miniredis, *redis.Client) {
|
|||
DB: 0,
|
||||
}
|
||||
|
||||
return s, redis.NewClient(opts)
|
||||
return s, internalredis.ClientFromV8Client(redis.NewClient(opts))
|
||||
}
|
||||
|
|
|
|||
2
go.mod
2
go.mod
|
|
@ -59,6 +59,7 @@ require (
|
|||
github.com/fasthttp-contrib/sessions v0.0.0-20160905201309-74f6ac73d5d5
|
||||
github.com/ghodss/yaml v1.0.0
|
||||
github.com/go-redis/redis/v8 v8.11.5
|
||||
github.com/go-redis/redis/v9 v9.0.0-rc.2
|
||||
github.com/go-sql-driver/mysql v1.7.0
|
||||
github.com/gocql/gocql v1.3.1
|
||||
github.com/golang-jwt/jwt/v4 v4.4.3
|
||||
|
|
@ -111,6 +112,7 @@ require (
|
|||
go.uber.org/atomic v1.10.0
|
||||
go.uber.org/ratelimit v0.2.0
|
||||
golang.org/x/crypto v0.4.0
|
||||
golang.org/x/mod v0.6.0
|
||||
golang.org/x/net v0.4.0
|
||||
golang.org/x/oauth2 v0.3.0
|
||||
google.golang.org/api v0.104.0
|
||||
|
|
|
|||
6
go.sum
6
go.sum
|
|
@ -773,6 +773,8 @@ github.com/go-playground/validator/v10 v10.11.0 h1:0W+xRM511GY47Yy3bZUbJVitCNg2B
|
|||
github.com/go-playground/validator/v10 v10.11.0/go.mod h1:i+3WkQ1FvaUjjxh1kSvIA4dMGDBiPU55YFDl0WbKdWU=
|
||||
github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI=
|
||||
github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo=
|
||||
github.com/go-redis/redis/v9 v9.0.0-rc.2 h1:IN1eI8AvJJeWHjMW/hlFAv2sAfvTun2DVksDDJ3a6a0=
|
||||
github.com/go-redis/redis/v9 v9.0.0-rc.2/go.mod h1:cgBknjwcBJa2prbnuHH/4k/Mlj4r0pWNV2HBanHujfY=
|
||||
github.com/go-resty/resty/v2 v2.7.0 h1:me+K9p3uhSmXtrBZ4k9jcEAfJmuC8IivWHwaLZwPrFY=
|
||||
github.com/go-resty/resty/v2 v2.7.0/go.mod h1:9PWDzw47qPphMRFfhsyk0NnSgvluHcljSMVIq3w7q0I=
|
||||
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
|
|
@ -1366,7 +1368,7 @@ github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7J
|
|||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
|
||||
github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro=
|
||||
github.com/onsi/gomega v1.23.0 h1:/oxKu9c2HVap+F3PfKort2Hw5DEU+HGlW8n+tguWsys=
|
||||
github.com/onsi/gomega v1.24.1 h1:KORJXNNTzJXzu4ScJWssJfJMnJ+2QJqhoQSRwNlze9E=
|
||||
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
|
||||
github.com/open-policy-agent/opa v0.47.3 h1:Uj8zw+q6Cvv1iiQFh704Q6sl3fKVvk35WZNJLsd6mgk=
|
||||
github.com/open-policy-agent/opa v0.47.3/go.mod h1:I5DbT677OGqfk9gvu5i54oIt0rrVf4B5pedpqDquAXo=
|
||||
|
|
@ -1848,6 +1850,8 @@ golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
|||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.6.0 h1:b9gGHsz9/HhJ3HF5DHQytPpuwocVTChQJK3AvoLRD5I=
|
||||
golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI=
|
||||
golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
|
|
|
|||
|
|
@ -78,7 +78,7 @@ func CreateContainerStorageClient(log logger.Logger, meta map[string]string) (*c
|
|||
// Try using shared key credentials first
|
||||
if m.AccountKey != "" {
|
||||
credential, newSharedKeyErr := azblob.NewSharedKeyCredential(m.AccountName, m.AccountKey)
|
||||
if err != nil {
|
||||
if newSharedKeyErr != nil {
|
||||
return nil, nil, fmt.Errorf("invalid shared key credentials with error: %w", newSharedKeyErr)
|
||||
}
|
||||
client, clientErr = container.NewClientWithSharedKeyCredential(URL.String(), credential, &options)
|
||||
|
|
@ -88,7 +88,7 @@ func CreateContainerStorageClient(log logger.Logger, meta map[string]string) (*c
|
|||
} else {
|
||||
// fallback to AAD
|
||||
credential, tokenErr := settings.GetTokenCredential()
|
||||
if err != nil {
|
||||
if tokenErr != nil {
|
||||
return nil, nil, fmt.Errorf("invalid token credentials with error: %w", tokenErr)
|
||||
}
|
||||
client, clientErr = container.NewClient(URL.String(), credential, &options)
|
||||
|
|
|
|||
|
|
@ -14,12 +14,14 @@ limitations under the License.
|
|||
package redis
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-redis/redis/v8"
|
||||
"golang.org/x/mod/semver"
|
||||
|
||||
"github.com/dapr/kit/ptr"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -27,7 +29,53 @@ const (
|
|||
NodeType = "node"
|
||||
)
|
||||
|
||||
func ParseClientFromProperties(properties map[string]string, defaultSettings *Settings) (client redis.UniversalClient, settings *Settings, err error) {
|
||||
type RedisXMessage struct {
|
||||
ID string
|
||||
Values map[string]interface{}
|
||||
}
|
||||
|
||||
type RedisXStream struct {
|
||||
Stream string
|
||||
Messages []RedisXMessage
|
||||
}
|
||||
|
||||
type RedisXPendingExt struct {
|
||||
ID string
|
||||
Consumer string
|
||||
Idle time.Duration
|
||||
RetryCount int64
|
||||
}
|
||||
|
||||
type RedisPipeliner interface {
|
||||
Exec(ctx context.Context) error
|
||||
Do(ctx context.Context, args ...interface{})
|
||||
}
|
||||
|
||||
var clientHasJSONSupport *bool
|
||||
|
||||
//nolint:interfacebloat
|
||||
type RedisClient interface {
|
||||
GetNilValueError() RedisError
|
||||
Context() context.Context
|
||||
DoRead(ctx context.Context, args ...interface{}) (interface{}, error)
|
||||
DoWrite(ctx context.Context, args ...interface{}) error
|
||||
Del(ctx context.Context, keys ...string) error
|
||||
Get(ctx context.Context, key string) (string, error)
|
||||
Close() error
|
||||
PingResult(ctx context.Context) (string, error)
|
||||
SetNX(ctx context.Context, key string, value interface{}, expiration time.Duration) (*bool, error)
|
||||
EvalInt(ctx context.Context, script string, keys []string, args ...interface{}) (*int, error, error)
|
||||
XAdd(ctx context.Context, stream string, maxLenApprox int64, values map[string]interface{}) (string, error)
|
||||
XGroupCreateMkStream(ctx context.Context, stream string, group string, start string) error
|
||||
XAck(ctx context.Context, stream string, group string, messageID string) error
|
||||
XReadGroupResult(ctx context.Context, group string, consumer string, streams []string, count int64, block time.Duration) ([]RedisXStream, error)
|
||||
XPendingExtResult(ctx context.Context, stream string, group string, start string, end string, count int64) ([]RedisXPendingExt, error)
|
||||
XClaimResult(ctx context.Context, stream string, group string, consumer string, minIdleTime time.Duration, messageIDs []string) ([]RedisXMessage, error)
|
||||
TxPipeline() RedisPipeliner
|
||||
TTLResult(ctx context.Context, key string) (time.Duration, error)
|
||||
}
|
||||
|
||||
func ParseClientFromProperties(properties map[string]string, defaultSettings *Settings) (client RedisClient, settings *Settings, err error) {
|
||||
if defaultSettings == nil {
|
||||
settings = &Settings{}
|
||||
} else {
|
||||
|
|
@ -37,110 +85,79 @@ func ParseClientFromProperties(properties map[string]string, defaultSettings *Se
|
|||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("redis client configuration error: %w", err)
|
||||
}
|
||||
|
||||
var c RedisClient
|
||||
if settings.Failover {
|
||||
return newFailoverClient(settings), settings, nil
|
||||
c = newV8FailoverClient(settings)
|
||||
} else {
|
||||
c = newV8Client(settings)
|
||||
}
|
||||
version, versionErr := GetServerVersion(c)
|
||||
c.Close() // close the client to avoid leaking connections
|
||||
|
||||
return newClient(settings), settings, nil
|
||||
useNewClient := false
|
||||
if versionErr != nil {
|
||||
// we couldn't query the server version, so we will assume the v8 client is not supported
|
||||
useNewClient = true
|
||||
} else if semver.Compare("v"+version, "v7.0.0") > -1 {
|
||||
// if the server version is >= 7, we will use the v9 client
|
||||
useNewClient = true
|
||||
}
|
||||
if useNewClient {
|
||||
if settings.Failover {
|
||||
return newV9FailoverClient(settings), settings, nil
|
||||
}
|
||||
return newV9Client(settings), settings, nil
|
||||
} else {
|
||||
if settings.Failover {
|
||||
return newV8FailoverClient(settings), settings, nil
|
||||
}
|
||||
return newV8Client(settings), settings, nil
|
||||
}
|
||||
}
|
||||
|
||||
func newFailoverClient(s *Settings) redis.UniversalClient {
|
||||
if s == nil {
|
||||
return nil
|
||||
func ClientHasJSONSupport(c RedisClient) bool {
|
||||
if clientHasJSONSupport != nil {
|
||||
return *clientHasJSONSupport
|
||||
}
|
||||
opts := &redis.FailoverOptions{
|
||||
DB: s.DB,
|
||||
MasterName: s.SentinelMasterName,
|
||||
SentinelAddrs: []string{s.Host},
|
||||
Password: s.Password,
|
||||
Username: s.Username,
|
||||
MaxRetries: s.RedisMaxRetries,
|
||||
MaxRetryBackoff: time.Duration(s.RedisMaxRetryInterval),
|
||||
MinRetryBackoff: time.Duration(s.RedisMinRetryInterval),
|
||||
DialTimeout: time.Duration(s.DialTimeout),
|
||||
ReadTimeout: time.Duration(s.ReadTimeout),
|
||||
WriteTimeout: time.Duration(s.WriteTimeout),
|
||||
PoolSize: s.PoolSize,
|
||||
MaxConnAge: time.Duration(s.MaxConnAge),
|
||||
MinIdleConns: s.MinIdleConns,
|
||||
PoolTimeout: time.Duration(s.PoolTimeout),
|
||||
IdleCheckFrequency: time.Duration(s.IdleCheckFrequency),
|
||||
IdleTimeout: time.Duration(s.IdleTimeout),
|
||||
bgctx := context.Background()
|
||||
ctx, cancel := context.WithTimeout(bgctx, 5*time.Second)
|
||||
defer cancel()
|
||||
err := c.DoWrite(ctx, "JSON.GET")
|
||||
|
||||
if err == nil {
|
||||
clientHasJSONSupport = ptr.Of(true)
|
||||
return true
|
||||
}
|
||||
|
||||
/* #nosec */
|
||||
if s.EnableTLS {
|
||||
opts.TLSConfig = &tls.Config{
|
||||
InsecureSkipVerify: s.EnableTLS,
|
||||
}
|
||||
if strings.HasPrefix(err.Error(), "ERR unknown command") {
|
||||
clientHasJSONSupport = ptr.Of(false)
|
||||
return false
|
||||
}
|
||||
|
||||
if s.RedisType == ClusterType {
|
||||
opts.SentinelAddrs = strings.Split(s.Host, ",")
|
||||
|
||||
return redis.NewFailoverClusterClient(opts)
|
||||
}
|
||||
|
||||
return redis.NewFailoverClient(opts)
|
||||
clientHasJSONSupport = ptr.Of(true)
|
||||
return true
|
||||
}
|
||||
|
||||
func newClient(s *Settings) redis.UniversalClient {
|
||||
if s == nil {
|
||||
return nil
|
||||
func GetServerVersion(c RedisClient) (string, error) {
|
||||
bgctx := context.Background()
|
||||
ctx, cancel := context.WithTimeout(bgctx, 5*time.Second)
|
||||
defer cancel()
|
||||
res, err := c.DoRead(ctx, "INFO", "server")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if s.RedisType == ClusterType {
|
||||
options := &redis.ClusterOptions{
|
||||
Addrs: strings.Split(s.Host, ","),
|
||||
Password: s.Password,
|
||||
Username: s.Username,
|
||||
MaxRetries: s.RedisMaxRetries,
|
||||
MaxRetryBackoff: time.Duration(s.RedisMaxRetryInterval),
|
||||
MinRetryBackoff: time.Duration(s.RedisMinRetryInterval),
|
||||
DialTimeout: time.Duration(s.DialTimeout),
|
||||
ReadTimeout: time.Duration(s.ReadTimeout),
|
||||
WriteTimeout: time.Duration(s.WriteTimeout),
|
||||
PoolSize: s.PoolSize,
|
||||
MaxConnAge: time.Duration(s.MaxConnAge),
|
||||
MinIdleConns: s.MinIdleConns,
|
||||
PoolTimeout: time.Duration(s.PoolTimeout),
|
||||
IdleCheckFrequency: time.Duration(s.IdleCheckFrequency),
|
||||
IdleTimeout: time.Duration(s.IdleTimeout),
|
||||
}
|
||||
/* #nosec */
|
||||
if s.EnableTLS {
|
||||
options.TLSConfig = &tls.Config{
|
||||
InsecureSkipVerify: s.EnableTLS,
|
||||
}
|
||||
}
|
||||
|
||||
return redis.NewClusterClient(options)
|
||||
}
|
||||
|
||||
options := &redis.Options{
|
||||
Addr: s.Host,
|
||||
Password: s.Password,
|
||||
Username: s.Username,
|
||||
DB: s.DB,
|
||||
MaxRetries: s.RedisMaxRetries,
|
||||
MaxRetryBackoff: time.Duration(s.RedisMaxRetryInterval),
|
||||
MinRetryBackoff: time.Duration(s.RedisMinRetryInterval),
|
||||
DialTimeout: time.Duration(s.DialTimeout),
|
||||
ReadTimeout: time.Duration(s.ReadTimeout),
|
||||
WriteTimeout: time.Duration(s.WriteTimeout),
|
||||
PoolSize: s.PoolSize,
|
||||
MaxConnAge: time.Duration(s.MaxConnAge),
|
||||
MinIdleConns: s.MinIdleConns,
|
||||
PoolTimeout: time.Duration(s.PoolTimeout),
|
||||
IdleCheckFrequency: time.Duration(s.IdleCheckFrequency),
|
||||
IdleTimeout: time.Duration(s.IdleTimeout),
|
||||
}
|
||||
|
||||
/* #nosec */
|
||||
if s.EnableTLS {
|
||||
options.TLSConfig = &tls.Config{
|
||||
InsecureSkipVerify: s.EnableTLS,
|
||||
// get row in string res beginning with "redis_version"
|
||||
rows := strings.Split(res.(string), "\n")
|
||||
for _, row := range rows {
|
||||
if strings.HasPrefix(row, "redis_version:") {
|
||||
return strings.TrimSpace(strings.Split(row, ":")[1]), nil
|
||||
}
|
||||
}
|
||||
|
||||
return redis.NewClient(options)
|
||||
return "", fmt.Errorf("could not find redis_version in redis info response")
|
||||
}
|
||||
|
||||
type RedisError string
|
||||
|
||||
func (e RedisError) Error() string { return string(e) }
|
||||
|
||||
func (RedisError) RedisError() {}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,414 @@
|
|||
/*
|
||||
Copyright 2021 The Dapr Authors
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
v8 "github.com/go-redis/redis/v8"
|
||||
)
|
||||
|
||||
type v8Pipeliner struct {
|
||||
pipeliner v8.Pipeliner
|
||||
writeTimeout Duration
|
||||
}
|
||||
|
||||
func (p v8Pipeliner) Exec(ctx context.Context) error {
|
||||
_, err := p.pipeliner.Exec(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
func (p v8Pipeliner) Do(ctx context.Context, args ...interface{}) {
|
||||
if p.writeTimeout > 0 {
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, time.Duration(p.writeTimeout))
|
||||
defer cancel()
|
||||
p.pipeliner.Do(timeoutCtx, args...)
|
||||
}
|
||||
p.pipeliner.Do(ctx, args...)
|
||||
}
|
||||
|
||||
// v8Client is an interface implementation of RedisClient
|
||||
|
||||
type v8Client struct {
|
||||
client v8.UniversalClient
|
||||
readTimeout Duration
|
||||
writeTimeout Duration
|
||||
dialTimeout Duration
|
||||
}
|
||||
|
||||
func (c v8Client) DoWrite(ctx context.Context, args ...interface{}) error {
|
||||
if c.writeTimeout > 0 {
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, time.Duration(c.writeTimeout))
|
||||
defer cancel()
|
||||
return c.client.Do(timeoutCtx, args...).Err()
|
||||
}
|
||||
return c.client.Do(ctx, args...).Err()
|
||||
}
|
||||
|
||||
func (c v8Client) DoRead(ctx context.Context, args ...interface{}) (interface{}, error) {
|
||||
if c.readTimeout > 0 {
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, time.Duration(c.readTimeout))
|
||||
defer cancel()
|
||||
return c.client.Do(timeoutCtx, args...).Result()
|
||||
}
|
||||
return c.client.Do(ctx, args...).Result()
|
||||
}
|
||||
|
||||
func (c v8Client) Del(ctx context.Context, keys ...string) error {
|
||||
err := c.client.Del(ctx, keys...).Err()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c v8Client) Get(ctx context.Context, key string) (string, error) {
|
||||
return c.client.Get(ctx, key).Result()
|
||||
}
|
||||
|
||||
func (c v8Client) GetNilValueError() RedisError {
|
||||
return RedisError(v8.Nil.Error())
|
||||
}
|
||||
|
||||
func (c v8Client) Context() context.Context {
|
||||
return c.client.Context()
|
||||
}
|
||||
|
||||
func (c v8Client) Close() error {
|
||||
return c.client.Close()
|
||||
}
|
||||
|
||||
func (c v8Client) PingResult(ctx context.Context) (string, error) {
|
||||
if c.dialTimeout > 0 {
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, time.Duration(c.dialTimeout))
|
||||
defer cancel()
|
||||
return c.client.Ping(timeoutCtx).Result()
|
||||
}
|
||||
return c.client.Ping(ctx).Result()
|
||||
}
|
||||
|
||||
func (c v8Client) EvalInt(ctx context.Context, script string, keys []string, args ...interface{}) (*int, error, error) {
|
||||
var evalCtx context.Context
|
||||
if c.readTimeout > 0 {
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, time.Duration(c.readTimeout))
|
||||
defer cancel()
|
||||
evalCtx = timeoutCtx
|
||||
} else {
|
||||
evalCtx = ctx
|
||||
}
|
||||
eval := c.client.Eval(evalCtx, script, keys, args...)
|
||||
if eval == nil {
|
||||
return nil, nil, nil
|
||||
}
|
||||
i, err := eval.Int()
|
||||
return &i, err, eval.Err()
|
||||
}
|
||||
|
||||
func (c v8Client) SetNX(ctx context.Context, key string, value interface{}, expiration time.Duration) (*bool, error) {
|
||||
var writeCtx context.Context
|
||||
if c.writeTimeout > 0 {
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, time.Duration(c.writeTimeout))
|
||||
defer cancel()
|
||||
writeCtx = timeoutCtx
|
||||
} else {
|
||||
writeCtx = ctx
|
||||
}
|
||||
nx := c.client.SetNX(writeCtx, key, value, expiration)
|
||||
if nx == nil {
|
||||
return nil, nil
|
||||
}
|
||||
val := nx.Val()
|
||||
return &val, nx.Err()
|
||||
}
|
||||
|
||||
func (c v8Client) XAdd(ctx context.Context, stream string, maxLenApprox int64, values map[string]interface{}) (string, error) {
|
||||
var writeCtx context.Context
|
||||
if c.writeTimeout > 0 {
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, time.Duration(c.writeTimeout))
|
||||
defer cancel()
|
||||
writeCtx = timeoutCtx
|
||||
} else {
|
||||
writeCtx = ctx
|
||||
}
|
||||
return c.client.XAdd(writeCtx, &v8.XAddArgs{
|
||||
Stream: stream,
|
||||
Values: values,
|
||||
MaxLenApprox: maxLenApprox,
|
||||
}).Result()
|
||||
}
|
||||
|
||||
func (c v8Client) XGroupCreateMkStream(ctx context.Context, stream string, group string, start string) error {
|
||||
var writeCtx context.Context
|
||||
if c.writeTimeout > 0 {
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, time.Duration(c.writeTimeout))
|
||||
defer cancel()
|
||||
writeCtx = timeoutCtx
|
||||
} else {
|
||||
writeCtx = ctx
|
||||
}
|
||||
return c.client.XGroupCreateMkStream(writeCtx, stream, group, start).Err()
|
||||
}
|
||||
|
||||
func (c v8Client) XAck(ctx context.Context, stream string, group string, messageID string) error {
|
||||
var readCtx context.Context
|
||||
if c.readTimeout > 0 {
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, time.Duration(c.readTimeout))
|
||||
defer cancel()
|
||||
readCtx = timeoutCtx
|
||||
} else {
|
||||
readCtx = ctx
|
||||
}
|
||||
ack := c.client.XAck(readCtx, stream, group, messageID)
|
||||
return ack.Err()
|
||||
}
|
||||
|
||||
func (c v8Client) XReadGroupResult(ctx context.Context, group string, consumer string, streams []string, count int64, block time.Duration) ([]RedisXStream, error) {
|
||||
var readCtx context.Context
|
||||
if c.readTimeout > 0 {
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, time.Duration(c.readTimeout))
|
||||
defer cancel()
|
||||
readCtx = timeoutCtx
|
||||
} else {
|
||||
readCtx = ctx
|
||||
}
|
||||
res, err := c.client.XReadGroup(readCtx,
|
||||
&v8.XReadGroupArgs{
|
||||
Group: group,
|
||||
Consumer: consumer,
|
||||
Streams: streams,
|
||||
Count: count,
|
||||
Block: block,
|
||||
},
|
||||
).Result()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// convert []v8.XStream to []RedisXStream
|
||||
redisXStreams := make([]RedisXStream, len(res))
|
||||
for i, xStream := range res {
|
||||
redisXStreams[i].Stream = xStream.Stream
|
||||
redisXStreams[i].Messages = make([]RedisXMessage, len(xStream.Messages))
|
||||
for j, message := range xStream.Messages {
|
||||
redisXStreams[i].Messages[j].ID = message.ID
|
||||
redisXStreams[i].Messages[j].Values = message.Values
|
||||
}
|
||||
}
|
||||
|
||||
return redisXStreams, nil
|
||||
}
|
||||
|
||||
func (c v8Client) XPendingExtResult(ctx context.Context, stream string, group string, start string, end string, count int64) ([]RedisXPendingExt, error) {
|
||||
var readCtx context.Context
|
||||
if c.readTimeout > 0 {
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, time.Duration(c.readTimeout))
|
||||
defer cancel()
|
||||
readCtx = timeoutCtx
|
||||
} else {
|
||||
readCtx = ctx
|
||||
}
|
||||
res, err := c.client.XPendingExt(readCtx, &v8.XPendingExtArgs{
|
||||
Stream: stream,
|
||||
Group: group,
|
||||
Start: start,
|
||||
End: end,
|
||||
Count: count,
|
||||
}).Result()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// convert []v8.XPendingExt to []RedisXPendingExt
|
||||
redisXPendingExts := make([]RedisXPendingExt, len(res))
|
||||
for i, xPendingExt := range res {
|
||||
redisXPendingExts[i] = RedisXPendingExt(xPendingExt)
|
||||
}
|
||||
return redisXPendingExts, nil
|
||||
}
|
||||
|
||||
func (c v8Client) XClaimResult(ctx context.Context, stream string, group string, consumer string, minIdleTime time.Duration, messageIDs []string) ([]RedisXMessage, error) {
|
||||
var readCtx context.Context
|
||||
if c.readTimeout > 0 {
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, time.Duration(c.readTimeout))
|
||||
defer cancel()
|
||||
readCtx = timeoutCtx
|
||||
} else {
|
||||
readCtx = ctx
|
||||
}
|
||||
res, err := c.client.XClaim(readCtx, &v8.XClaimArgs{
|
||||
Stream: stream,
|
||||
Group: group,
|
||||
Consumer: consumer,
|
||||
MinIdle: minIdleTime,
|
||||
Messages: messageIDs,
|
||||
}).Result()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// convert res to []RedisXMessage
|
||||
redisXMessages := make([]RedisXMessage, len(res))
|
||||
for i, xMessage := range res {
|
||||
redisXMessages[i] = RedisXMessage(xMessage)
|
||||
}
|
||||
|
||||
return redisXMessages, nil
|
||||
}
|
||||
|
||||
func (c v8Client) TxPipeline() RedisPipeliner {
|
||||
return v8Pipeliner{
|
||||
pipeliner: c.client.TxPipeline(),
|
||||
writeTimeout: c.writeTimeout,
|
||||
}
|
||||
}
|
||||
|
||||
func (c v8Client) TTLResult(ctx context.Context, key string) (time.Duration, error) {
|
||||
var writeCtx context.Context
|
||||
if c.writeTimeout > 0 {
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, time.Duration(c.writeTimeout))
|
||||
defer cancel()
|
||||
writeCtx = timeoutCtx
|
||||
} else {
|
||||
writeCtx = ctx
|
||||
}
|
||||
return c.client.TTL(writeCtx, key).Result()
|
||||
}
|
||||
|
||||
func newV8FailoverClient(s *Settings) RedisClient {
|
||||
if s == nil {
|
||||
return nil
|
||||
}
|
||||
opts := &v8.FailoverOptions{
|
||||
DB: s.DB,
|
||||
MasterName: s.SentinelMasterName,
|
||||
SentinelAddrs: []string{s.Host},
|
||||
Password: s.Password,
|
||||
Username: s.Username,
|
||||
MaxRetries: s.RedisMaxRetries,
|
||||
MaxRetryBackoff: time.Duration(s.RedisMaxRetryInterval),
|
||||
MinRetryBackoff: time.Duration(s.RedisMinRetryInterval),
|
||||
DialTimeout: time.Duration(s.DialTimeout),
|
||||
ReadTimeout: time.Duration(s.ReadTimeout),
|
||||
WriteTimeout: time.Duration(s.WriteTimeout),
|
||||
PoolSize: s.PoolSize,
|
||||
MaxConnAge: time.Duration(s.MaxConnAge),
|
||||
MinIdleConns: s.MinIdleConns,
|
||||
PoolTimeout: time.Duration(s.PoolTimeout),
|
||||
IdleCheckFrequency: time.Duration(s.IdleCheckFrequency),
|
||||
IdleTimeout: time.Duration(s.IdleTimeout),
|
||||
}
|
||||
|
||||
/* #nosec */
|
||||
if s.EnableTLS {
|
||||
opts.TLSConfig = &tls.Config{
|
||||
InsecureSkipVerify: s.EnableTLS,
|
||||
}
|
||||
}
|
||||
|
||||
if s.RedisType == ClusterType {
|
||||
opts.SentinelAddrs = strings.Split(s.Host, ",")
|
||||
|
||||
return v8Client{
|
||||
client: v8.NewFailoverClusterClient(opts),
|
||||
readTimeout: s.ReadTimeout,
|
||||
writeTimeout: s.WriteTimeout,
|
||||
dialTimeout: s.DialTimeout,
|
||||
}
|
||||
}
|
||||
|
||||
return v8Client{
|
||||
client: v8.NewFailoverClient(opts),
|
||||
readTimeout: s.ReadTimeout,
|
||||
writeTimeout: s.WriteTimeout,
|
||||
dialTimeout: s.DialTimeout,
|
||||
}
|
||||
}
|
||||
|
||||
func newV8Client(s *Settings) RedisClient {
|
||||
if s == nil {
|
||||
return nil
|
||||
}
|
||||
if s.RedisType == ClusterType {
|
||||
options := &v8.ClusterOptions{
|
||||
Addrs: strings.Split(s.Host, ","),
|
||||
Password: s.Password,
|
||||
Username: s.Username,
|
||||
MaxRetries: s.RedisMaxRetries,
|
||||
MaxRetryBackoff: time.Duration(s.RedisMaxRetryInterval),
|
||||
MinRetryBackoff: time.Duration(s.RedisMinRetryInterval),
|
||||
DialTimeout: time.Duration(s.DialTimeout),
|
||||
ReadTimeout: time.Duration(s.ReadTimeout),
|
||||
WriteTimeout: time.Duration(s.WriteTimeout),
|
||||
PoolSize: s.PoolSize,
|
||||
MaxConnAge: time.Duration(s.MaxConnAge),
|
||||
MinIdleConns: s.MinIdleConns,
|
||||
PoolTimeout: time.Duration(s.PoolTimeout),
|
||||
IdleCheckFrequency: time.Duration(s.IdleCheckFrequency),
|
||||
IdleTimeout: time.Duration(s.IdleTimeout),
|
||||
}
|
||||
/* #nosec */
|
||||
if s.EnableTLS {
|
||||
options.TLSConfig = &tls.Config{
|
||||
InsecureSkipVerify: s.EnableTLS,
|
||||
}
|
||||
}
|
||||
|
||||
return v8Client{
|
||||
client: v8.NewClusterClient(options),
|
||||
readTimeout: s.ReadTimeout,
|
||||
writeTimeout: s.WriteTimeout,
|
||||
dialTimeout: s.DialTimeout,
|
||||
}
|
||||
}
|
||||
|
||||
options := &v8.Options{
|
||||
Addr: s.Host,
|
||||
Password: s.Password,
|
||||
Username: s.Username,
|
||||
DB: s.DB,
|
||||
MaxRetries: s.RedisMaxRetries,
|
||||
MaxRetryBackoff: time.Duration(s.RedisMaxRetryInterval),
|
||||
MinRetryBackoff: time.Duration(s.RedisMinRetryInterval),
|
||||
DialTimeout: time.Duration(s.DialTimeout),
|
||||
ReadTimeout: time.Duration(s.ReadTimeout),
|
||||
WriteTimeout: time.Duration(s.WriteTimeout),
|
||||
PoolSize: s.PoolSize,
|
||||
MaxConnAge: time.Duration(s.MaxConnAge),
|
||||
MinIdleConns: s.MinIdleConns,
|
||||
PoolTimeout: time.Duration(s.PoolTimeout),
|
||||
IdleCheckFrequency: time.Duration(s.IdleCheckFrequency),
|
||||
IdleTimeout: time.Duration(s.IdleTimeout),
|
||||
}
|
||||
|
||||
/* #nosec */
|
||||
if s.EnableTLS {
|
||||
options.TLSConfig = &tls.Config{
|
||||
InsecureSkipVerify: s.EnableTLS,
|
||||
}
|
||||
}
|
||||
|
||||
return v8Client{
|
||||
client: v8.NewClient(options),
|
||||
readTimeout: s.ReadTimeout,
|
||||
writeTimeout: s.WriteTimeout,
|
||||
dialTimeout: s.DialTimeout,
|
||||
}
|
||||
}
|
||||
|
||||
func ClientFromV8Client(client v8.UniversalClient) RedisClient {
|
||||
return v8Client{client: client}
|
||||
}
|
||||
|
|
@ -0,0 +1,410 @@
|
|||
/*
|
||||
Copyright 2021 The Dapr Authors
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
v9 "github.com/go-redis/redis/v9"
|
||||
)
|
||||
|
||||
type v9Pipeliner struct {
|
||||
pipeliner v9.Pipeliner
|
||||
writeTimeout Duration
|
||||
}
|
||||
|
||||
func (p v9Pipeliner) Exec(ctx context.Context) error {
|
||||
_, err := p.pipeliner.Exec(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
func (p v9Pipeliner) Do(ctx context.Context, args ...interface{}) {
|
||||
if p.writeTimeout > 0 {
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, time.Duration(p.writeTimeout))
|
||||
defer cancel()
|
||||
p.pipeliner.Do(timeoutCtx, args...)
|
||||
}
|
||||
p.pipeliner.Do(ctx, args...)
|
||||
}
|
||||
|
||||
// v9Client is an interface implementation of RedisClient
|
||||
|
||||
type v9Client struct {
|
||||
client v9.UniversalClient
|
||||
readTimeout Duration
|
||||
writeTimeout Duration
|
||||
dialTimeout Duration
|
||||
}
|
||||
|
||||
func (c v9Client) DoWrite(ctx context.Context, args ...interface{}) error {
|
||||
if c.writeTimeout > 0 {
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, time.Duration(c.writeTimeout))
|
||||
defer cancel()
|
||||
return c.client.Do(timeoutCtx, args...).Err()
|
||||
}
|
||||
return c.client.Do(ctx, args...).Err()
|
||||
}
|
||||
|
||||
func (c v9Client) DoRead(ctx context.Context, args ...interface{}) (interface{}, error) {
|
||||
if c.readTimeout > 0 {
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, time.Duration(c.readTimeout))
|
||||
defer cancel()
|
||||
return c.client.Do(timeoutCtx, args...).Result()
|
||||
}
|
||||
return c.client.Do(ctx, args...).Result()
|
||||
}
|
||||
|
||||
func (c v9Client) Del(ctx context.Context, keys ...string) error {
|
||||
err := c.client.Del(ctx, keys...).Err()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c v9Client) Get(ctx context.Context, key string) (string, error) {
|
||||
return c.client.Get(ctx, key).Result()
|
||||
}
|
||||
|
||||
func (c v9Client) GetNilValueError() RedisError {
|
||||
return RedisError(v9.Nil.Error())
|
||||
}
|
||||
|
||||
func (c v9Client) Context() context.Context {
|
||||
return context.Background()
|
||||
}
|
||||
|
||||
func (c v9Client) Close() error {
|
||||
return c.client.Close()
|
||||
}
|
||||
|
||||
func (c v9Client) PingResult(ctx context.Context) (string, error) {
|
||||
if c.dialTimeout > 0 {
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, time.Duration(c.dialTimeout))
|
||||
defer cancel()
|
||||
return c.client.Ping(timeoutCtx).Result()
|
||||
}
|
||||
return c.client.Ping(ctx).Result()
|
||||
}
|
||||
|
||||
func (c v9Client) EvalInt(ctx context.Context, script string, keys []string, args ...interface{}) (*int, error, error) {
|
||||
var evalCtx context.Context
|
||||
if c.readTimeout > 0 {
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, time.Duration(c.readTimeout))
|
||||
defer cancel()
|
||||
evalCtx = timeoutCtx
|
||||
} else {
|
||||
evalCtx = ctx
|
||||
}
|
||||
eval := c.client.Eval(evalCtx, script, keys, args...)
|
||||
if eval == nil {
|
||||
return nil, nil, nil
|
||||
}
|
||||
i, err := eval.Int()
|
||||
return &i, err, eval.Err()
|
||||
}
|
||||
|
||||
func (c v9Client) SetNX(ctx context.Context, key string, value interface{}, expiration time.Duration) (*bool, error) {
|
||||
var writeCtx context.Context
|
||||
if c.writeTimeout > 0 {
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, time.Duration(c.writeTimeout))
|
||||
defer cancel()
|
||||
writeCtx = timeoutCtx
|
||||
} else {
|
||||
writeCtx = ctx
|
||||
}
|
||||
nx := c.client.SetNX(writeCtx, key, value, expiration)
|
||||
if nx == nil {
|
||||
return nil, nil
|
||||
}
|
||||
val := nx.Val()
|
||||
return &val, nx.Err()
|
||||
}
|
||||
|
||||
func (c v9Client) XAdd(ctx context.Context, stream string, maxLenApprox int64, values map[string]interface{}) (string, error) {
|
||||
var writeCtx context.Context
|
||||
if c.writeTimeout > 0 {
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, time.Duration(c.writeTimeout))
|
||||
defer cancel()
|
||||
writeCtx = timeoutCtx
|
||||
} else {
|
||||
writeCtx = ctx
|
||||
}
|
||||
return c.client.XAdd(writeCtx, &v9.XAddArgs{
|
||||
Stream: stream,
|
||||
Values: values,
|
||||
MaxLen: maxLenApprox,
|
||||
}).Result()
|
||||
}
|
||||
|
||||
func (c v9Client) XGroupCreateMkStream(ctx context.Context, stream string, group string, start string) error {
|
||||
var writeCtx context.Context
|
||||
if c.writeTimeout > 0 {
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, time.Duration(c.writeTimeout))
|
||||
defer cancel()
|
||||
writeCtx = timeoutCtx
|
||||
} else {
|
||||
writeCtx = ctx
|
||||
}
|
||||
return c.client.XGroupCreateMkStream(writeCtx, stream, group, start).Err()
|
||||
}
|
||||
|
||||
func (c v9Client) XAck(ctx context.Context, stream string, group string, messageID string) error {
|
||||
var readCtx context.Context
|
||||
if c.readTimeout > 0 {
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, time.Duration(c.readTimeout))
|
||||
defer cancel()
|
||||
readCtx = timeoutCtx
|
||||
} else {
|
||||
readCtx = ctx
|
||||
}
|
||||
ack := c.client.XAck(readCtx, stream, group, messageID)
|
||||
return ack.Err()
|
||||
}
|
||||
|
||||
func (c v9Client) XReadGroupResult(ctx context.Context, group string, consumer string, streams []string, count int64, block time.Duration) ([]RedisXStream, error) {
|
||||
var readCtx context.Context
|
||||
if c.readTimeout > 0 {
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, time.Duration(c.readTimeout))
|
||||
defer cancel()
|
||||
readCtx = timeoutCtx
|
||||
} else {
|
||||
readCtx = ctx
|
||||
}
|
||||
res, err := c.client.XReadGroup(readCtx,
|
||||
&v9.XReadGroupArgs{
|
||||
Group: group,
|
||||
Consumer: consumer,
|
||||
Streams: streams,
|
||||
Count: count,
|
||||
Block: block,
|
||||
},
|
||||
).Result()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// convert []v9.XStream to []RedisXStream
|
||||
redisXStreams := make([]RedisXStream, len(res))
|
||||
for i, xStream := range res {
|
||||
redisXStreams[i].Stream = xStream.Stream
|
||||
redisXStreams[i].Messages = make([]RedisXMessage, len(xStream.Messages))
|
||||
for j, message := range xStream.Messages {
|
||||
redisXStreams[i].Messages[j].ID = message.ID
|
||||
redisXStreams[i].Messages[j].Values = message.Values
|
||||
}
|
||||
}
|
||||
|
||||
return redisXStreams, nil
|
||||
}
|
||||
|
||||
func (c v9Client) XPendingExtResult(ctx context.Context, stream string, group string, start string, end string, count int64) ([]RedisXPendingExt, error) {
|
||||
var readCtx context.Context
|
||||
if c.readTimeout > 0 {
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, time.Duration(c.readTimeout))
|
||||
defer cancel()
|
||||
readCtx = timeoutCtx
|
||||
} else {
|
||||
readCtx = ctx
|
||||
}
|
||||
res, err := c.client.XPendingExt(readCtx, &v9.XPendingExtArgs{
|
||||
Stream: stream,
|
||||
Group: group,
|
||||
Start: start,
|
||||
End: end,
|
||||
Count: count,
|
||||
}).Result()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// convert []v9.XPendingExt to []RedisXPendingExt
|
||||
redisXPendingExts := make([]RedisXPendingExt, len(res))
|
||||
for i, xPendingExt := range res {
|
||||
redisXPendingExts[i] = RedisXPendingExt(xPendingExt)
|
||||
}
|
||||
return redisXPendingExts, nil
|
||||
}
|
||||
|
||||
func (c v9Client) XClaimResult(ctx context.Context, stream string, group string, consumer string, minIdleTime time.Duration, messageIDs []string) ([]RedisXMessage, error) {
|
||||
var readCtx context.Context
|
||||
if c.readTimeout > 0 {
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, time.Duration(c.readTimeout))
|
||||
defer cancel()
|
||||
readCtx = timeoutCtx
|
||||
} else {
|
||||
readCtx = ctx
|
||||
}
|
||||
res, err := c.client.XClaim(readCtx, &v9.XClaimArgs{
|
||||
Stream: stream,
|
||||
Group: group,
|
||||
Consumer: consumer,
|
||||
MinIdle: minIdleTime,
|
||||
Messages: messageIDs,
|
||||
}).Result()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// convert res to []RedisXMessage
|
||||
redisXMessages := make([]RedisXMessage, len(res))
|
||||
for i, xMessage := range res {
|
||||
redisXMessages[i] = RedisXMessage(xMessage)
|
||||
}
|
||||
|
||||
return redisXMessages, nil
|
||||
}
|
||||
|
||||
func (c v9Client) TxPipeline() RedisPipeliner {
|
||||
return v9Pipeliner{
|
||||
pipeliner: c.client.TxPipeline(),
|
||||
writeTimeout: c.writeTimeout,
|
||||
}
|
||||
}
|
||||
|
||||
func (c v9Client) TTLResult(ctx context.Context, key string) (time.Duration, error) {
|
||||
var writeCtx context.Context
|
||||
if c.writeTimeout > 0 {
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, time.Duration(c.writeTimeout))
|
||||
defer cancel()
|
||||
writeCtx = timeoutCtx
|
||||
} else {
|
||||
writeCtx = ctx
|
||||
}
|
||||
return c.client.TTL(writeCtx, key).Result()
|
||||
}
|
||||
|
||||
func newV9FailoverClient(s *Settings) RedisClient {
|
||||
if s == nil {
|
||||
return nil
|
||||
}
|
||||
opts := &v9.FailoverOptions{
|
||||
DB: s.DB,
|
||||
MasterName: s.SentinelMasterName,
|
||||
SentinelAddrs: []string{s.Host},
|
||||
Password: s.Password,
|
||||
Username: s.Username,
|
||||
MaxRetries: s.RedisMaxRetries,
|
||||
MaxRetryBackoff: time.Duration(s.RedisMaxRetryInterval),
|
||||
MinRetryBackoff: time.Duration(s.RedisMinRetryInterval),
|
||||
DialTimeout: time.Duration(s.DialTimeout),
|
||||
ReadTimeout: time.Duration(s.ReadTimeout),
|
||||
WriteTimeout: time.Duration(s.WriteTimeout),
|
||||
PoolSize: s.PoolSize,
|
||||
ConnMaxLifetime: time.Duration(s.MaxConnAge),
|
||||
MinIdleConns: s.MinIdleConns,
|
||||
PoolTimeout: time.Duration(s.PoolTimeout),
|
||||
ConnMaxIdleTime: time.Duration(s.IdleTimeout),
|
||||
ContextTimeoutEnabled: true,
|
||||
}
|
||||
|
||||
/* #nosec */
|
||||
if s.EnableTLS {
|
||||
opts.TLSConfig = &tls.Config{
|
||||
InsecureSkipVerify: s.EnableTLS,
|
||||
}
|
||||
}
|
||||
|
||||
if s.RedisType == ClusterType {
|
||||
opts.SentinelAddrs = strings.Split(s.Host, ",")
|
||||
|
||||
return v9Client{
|
||||
client: v9.NewFailoverClusterClient(opts),
|
||||
readTimeout: s.ReadTimeout,
|
||||
writeTimeout: s.WriteTimeout,
|
||||
dialTimeout: s.DialTimeout,
|
||||
}
|
||||
}
|
||||
|
||||
return v9Client{
|
||||
client: v9.NewFailoverClient(opts),
|
||||
readTimeout: s.ReadTimeout,
|
||||
writeTimeout: s.WriteTimeout,
|
||||
dialTimeout: s.DialTimeout,
|
||||
}
|
||||
}
|
||||
|
||||
func newV9Client(s *Settings) RedisClient {
|
||||
if s == nil {
|
||||
return nil
|
||||
}
|
||||
if s.RedisType == ClusterType {
|
||||
options := &v9.ClusterOptions{
|
||||
Addrs: strings.Split(s.Host, ","),
|
||||
Password: s.Password,
|
||||
Username: s.Username,
|
||||
MaxRetries: s.RedisMaxRetries,
|
||||
MaxRetryBackoff: time.Duration(s.RedisMaxRetryInterval),
|
||||
MinRetryBackoff: time.Duration(s.RedisMinRetryInterval),
|
||||
DialTimeout: time.Duration(s.DialTimeout),
|
||||
ReadTimeout: time.Duration(s.ReadTimeout),
|
||||
WriteTimeout: time.Duration(s.WriteTimeout),
|
||||
PoolSize: s.PoolSize,
|
||||
ConnMaxLifetime: time.Duration(s.MaxConnAge),
|
||||
MinIdleConns: s.MinIdleConns,
|
||||
PoolTimeout: time.Duration(s.PoolTimeout),
|
||||
ConnMaxIdleTime: time.Duration(s.IdleTimeout),
|
||||
ContextTimeoutEnabled: true,
|
||||
}
|
||||
/* #nosec */
|
||||
if s.EnableTLS {
|
||||
options.TLSConfig = &tls.Config{
|
||||
InsecureSkipVerify: s.EnableTLS,
|
||||
}
|
||||
}
|
||||
|
||||
return v9Client{
|
||||
client: v9.NewClusterClient(options),
|
||||
readTimeout: s.ReadTimeout,
|
||||
writeTimeout: s.WriteTimeout,
|
||||
dialTimeout: s.DialTimeout,
|
||||
}
|
||||
}
|
||||
|
||||
options := &v9.Options{
|
||||
Addr: s.Host,
|
||||
Password: s.Password,
|
||||
Username: s.Username,
|
||||
DB: s.DB,
|
||||
MaxRetries: s.RedisMaxRetries,
|
||||
MaxRetryBackoff: time.Duration(s.RedisMaxRetryInterval),
|
||||
MinRetryBackoff: time.Duration(s.RedisMinRetryInterval),
|
||||
DialTimeout: time.Duration(s.DialTimeout),
|
||||
ReadTimeout: time.Duration(s.ReadTimeout),
|
||||
WriteTimeout: time.Duration(s.WriteTimeout),
|
||||
PoolSize: s.PoolSize,
|
||||
ConnMaxLifetime: time.Duration(s.MaxConnAge),
|
||||
MinIdleConns: s.MinIdleConns,
|
||||
PoolTimeout: time.Duration(s.PoolTimeout),
|
||||
ConnMaxIdleTime: time.Duration(s.IdleTimeout),
|
||||
ContextTimeoutEnabled: true,
|
||||
}
|
||||
|
||||
/* #nosec */
|
||||
if s.EnableTLS {
|
||||
options.TLSConfig = &tls.Config{
|
||||
InsecureSkipVerify: s.EnableTLS,
|
||||
}
|
||||
}
|
||||
|
||||
return v9Client{
|
||||
client: v9.NewClient(options),
|
||||
readTimeout: s.ReadTimeout,
|
||||
writeTimeout: s.WriteTimeout,
|
||||
dialTimeout: s.DialTimeout,
|
||||
}
|
||||
}
|
||||
|
|
@ -20,8 +20,6 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-redis/redis/v8"
|
||||
|
||||
rediscomponent "github.com/dapr/components-contrib/internal/component/redis"
|
||||
"github.com/dapr/components-contrib/lock"
|
||||
"github.com/dapr/kit/logger"
|
||||
|
|
@ -35,7 +33,7 @@ const (
|
|||
|
||||
// Standalone Redis lock store.Any fail-over related features are not supported,such as Sentinel and Redis Cluster.
|
||||
type StandaloneRedisLock struct {
|
||||
client redis.UniversalClient
|
||||
client rediscomponent.RedisClient
|
||||
clientSettings *rediscomponent.Settings
|
||||
metadata rediscomponent.Metadata
|
||||
|
||||
|
|
@ -79,7 +77,7 @@ func (r *StandaloneRedisLock) InitLockStore(metadata lock.Metadata) error {
|
|||
}
|
||||
r.ctx, r.cancel = context.WithCancel(context.Background())
|
||||
// 3. connect to redis
|
||||
if _, err = r.client.Ping(r.ctx).Result(); err != nil {
|
||||
if _, err = r.client.PingResult(r.ctx); err != nil {
|
||||
return fmt.Errorf("[standaloneRedisLock]: error connecting to redis at %s: %s", r.clientSettings.Host, err)
|
||||
}
|
||||
// no replica
|
||||
|
|
@ -104,7 +102,7 @@ func needFailover(properties map[string]string) bool {
|
|||
}
|
||||
|
||||
func (r *StandaloneRedisLock) getConnectedSlaves() (int, error) {
|
||||
res, err := r.client.Do(r.ctx, "INFO", "replication").Result()
|
||||
res, err := r.client.DoRead(r.ctx, "INFO", "replication")
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
|
@ -135,37 +133,32 @@ func (r *StandaloneRedisLock) parseConnectedSlaves(res string) int {
|
|||
// Try to acquire a redis lock.
|
||||
func (r *StandaloneRedisLock) TryLock(ctx context.Context, req *lock.TryLockRequest) (*lock.TryLockResponse, error) {
|
||||
// 1.Setting redis expiration time
|
||||
nx := r.client.SetNX(ctx, req.ResourceID, req.LockOwner, time.Second*time.Duration(req.ExpiryInSeconds))
|
||||
if nx == nil {
|
||||
nxval, err := r.client.SetNX(ctx, req.ResourceID, req.LockOwner, time.Second*time.Duration(req.ExpiryInSeconds))
|
||||
if nxval == nil {
|
||||
return &lock.TryLockResponse{}, fmt.Errorf("[standaloneRedisLock]: SetNX returned nil.ResourceID: %s", req.ResourceID)
|
||||
}
|
||||
// 2. check error
|
||||
err := nx.Err()
|
||||
if err != nil {
|
||||
return &lock.TryLockResponse{}, err
|
||||
}
|
||||
|
||||
return &lock.TryLockResponse{
|
||||
Success: nx.Val(),
|
||||
Success: *nxval,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Try to release a redis lock.
|
||||
func (r *StandaloneRedisLock) Unlock(ctx context.Context, req *lock.UnlockRequest) (*lock.UnlockResponse, error) {
|
||||
// 1. delegate to client.eval lua script
|
||||
eval := r.client.Eval(ctx, unlockScript, []string{req.ResourceID}, req.LockOwner)
|
||||
evalInt, parseErr, err := r.client.EvalInt(ctx, unlockScript, []string{req.ResourceID}, req.LockOwner)
|
||||
// 2. check error
|
||||
if eval == nil {
|
||||
if evalInt == nil {
|
||||
return newInternalErrorUnlockResponse(), fmt.Errorf("[standaloneRedisLock]: Eval unlock script returned nil.ResourceID: %s", req.ResourceID)
|
||||
}
|
||||
err := eval.Err()
|
||||
if err != nil {
|
||||
return newInternalErrorUnlockResponse(), err
|
||||
}
|
||||
// 3. parse result
|
||||
i, err := eval.Int()
|
||||
i := *evalInt
|
||||
status := lock.InternalError
|
||||
if err != nil {
|
||||
if parseErr != nil {
|
||||
return &lock.UnlockResponse{
|
||||
Status: status,
|
||||
}, err
|
||||
|
|
@ -194,7 +187,9 @@ func (r *StandaloneRedisLock) Close() error {
|
|||
r.cancel()
|
||||
}
|
||||
if r.client != nil {
|
||||
return r.client.Close()
|
||||
closeErr := r.client.Close()
|
||||
r.client = nil
|
||||
return closeErr
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -20,8 +20,6 @@ import (
|
|||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/go-redis/redis/v8"
|
||||
|
||||
rediscomponent "github.com/dapr/components-contrib/internal/component/redis"
|
||||
"github.com/dapr/components-contrib/pubsub"
|
||||
"github.com/dapr/kit/logger"
|
||||
|
|
@ -45,7 +43,7 @@ const (
|
|||
// on the mechanics of Redis Streams.
|
||||
type redisStreams struct {
|
||||
metadata metadata
|
||||
client redis.UniversalClient
|
||||
client rediscomponent.RedisClient
|
||||
clientSettings *rediscomponent.Settings
|
||||
logger logger.Logger
|
||||
|
||||
|
|
@ -144,7 +142,7 @@ func (r *redisStreams) Init(metadata pubsub.Metadata) error {
|
|||
|
||||
r.ctx, r.cancel = context.WithCancel(context.Background())
|
||||
|
||||
if _, err = r.client.Ping(r.ctx).Result(); err != nil {
|
||||
if _, err = r.client.PingResult(r.ctx); err != nil {
|
||||
return fmt.Errorf("redis streams: error connecting to redis at %s: %s", r.clientSettings.Host, err)
|
||||
}
|
||||
r.queue = make(chan redisMessageWrapper, int(r.metadata.queueDepth))
|
||||
|
|
@ -157,11 +155,7 @@ func (r *redisStreams) Init(metadata pubsub.Metadata) error {
|
|||
}
|
||||
|
||||
func (r *redisStreams) Publish(ctx context.Context, req *pubsub.PublishRequest) error {
|
||||
_, err := r.client.XAdd(ctx, &redis.XAddArgs{
|
||||
Stream: req.Topic,
|
||||
MaxLenApprox: r.metadata.maxLenApprox,
|
||||
Values: map[string]interface{}{"data": req.Data},
|
||||
}).Result()
|
||||
_, err := r.client.XAdd(ctx, req.Topic, r.metadata.maxLenApprox, map[string]interface{}{"data": req.Data})
|
||||
if err != nil {
|
||||
return fmt.Errorf("redis streams: error from publish: %s", err)
|
||||
}
|
||||
|
|
@ -170,7 +164,7 @@ func (r *redisStreams) Publish(ctx context.Context, req *pubsub.PublishRequest)
|
|||
}
|
||||
|
||||
func (r *redisStreams) Subscribe(ctx context.Context, req pubsub.SubscribeRequest, handler pubsub.Handler) error {
|
||||
err := r.client.XGroupCreateMkStream(ctx, req.Topic, r.metadata.consumerID, "0").Err()
|
||||
err := r.client.XGroupCreateMkStream(ctx, req.Topic, r.metadata.consumerID, "0")
|
||||
// Ignore BUSYGROUP errors
|
||||
if err != nil && err.Error() != "BUSYGROUP Consumer Group name already exists" {
|
||||
r.logger.Errorf("redis streams: %s", err)
|
||||
|
|
@ -186,14 +180,14 @@ func (r *redisStreams) Subscribe(ctx context.Context, req pubsub.SubscribeReques
|
|||
// enqueueMessages is a shared function that funnels new messages (via polling)
|
||||
// and redelivered messages (via reclaiming) to a channel where workers can
|
||||
// pick them up for processing.
|
||||
func (r *redisStreams) enqueueMessages(ctx context.Context, stream string, handler pubsub.Handler, msgs []redis.XMessage) {
|
||||
func (r *redisStreams) enqueueMessages(ctx context.Context, stream string, handler pubsub.Handler, msgs []rediscomponent.RedisXMessage) {
|
||||
for _, msg := range msgs {
|
||||
rmsg := createRedisMessageWrapper(ctx, stream, handler, msg)
|
||||
|
||||
select {
|
||||
// Might block if the queue is full so we need the ctx.Done below.
|
||||
case r.queue <- rmsg:
|
||||
|
||||
// Noop
|
||||
// Handle cancelation
|
||||
case <-ctx.Done():
|
||||
return
|
||||
|
|
@ -203,7 +197,7 @@ func (r *redisStreams) enqueueMessages(ctx context.Context, stream string, handl
|
|||
|
||||
// createRedisMessageWrapper encapsulates the Redis message, message identifier, and handler
|
||||
// in `redisMessage` for processing.
|
||||
func createRedisMessageWrapper(ctx context.Context, stream string, handler pubsub.Handler, msg redis.XMessage) redisMessageWrapper {
|
||||
func createRedisMessageWrapper(ctx context.Context, stream string, handler pubsub.Handler, msg rediscomponent.RedisXMessage) redisMessageWrapper {
|
||||
var data []byte
|
||||
if dataValue, exists := msg.Values["data"]; exists && dataValue != nil {
|
||||
switch v := dataValue.(type) {
|
||||
|
|
@ -259,7 +253,7 @@ func (r *redisStreams) processMessage(msg redisMessageWrapper) error {
|
|||
}
|
||||
|
||||
// Use the background context in case subscriptionCtx is already closed
|
||||
if err := r.client.XAck(context.Background(), msg.message.Topic, r.metadata.consumerID, msg.messageID).Err(); err != nil {
|
||||
if err := r.client.XAck(context.Background(), msg.message.Topic, r.metadata.consumerID, msg.messageID); err != nil {
|
||||
r.logger.Errorf("Error acknowledging Redis message %s: %v", msg.messageID, err)
|
||||
|
||||
return err
|
||||
|
|
@ -278,15 +272,9 @@ func (r *redisStreams) pollNewMessagesLoop(ctx context.Context, stream string, h
|
|||
}
|
||||
|
||||
// Read messages
|
||||
streams, err := r.client.XReadGroup(ctx, &redis.XReadGroupArgs{
|
||||
Group: r.metadata.consumerID,
|
||||
Consumer: r.metadata.consumerID,
|
||||
Streams: []string{stream, ">"},
|
||||
Count: int64(r.metadata.queueDepth),
|
||||
Block: time.Duration(r.clientSettings.ReadTimeout),
|
||||
}).Result()
|
||||
streams, err := r.client.XReadGroupResult(ctx, r.metadata.consumerID, r.metadata.consumerID, []string{stream, ">"}, int64(r.metadata.queueDepth), time.Duration(r.clientSettings.ReadTimeout))
|
||||
if err != nil {
|
||||
if !errors.Is(err, redis.Nil) && err != context.Canceled {
|
||||
if !errors.Is(err, r.client.GetNilValueError()) && err != context.Canceled {
|
||||
r.logger.Errorf("redis streams: error reading from stream %s: %s", stream, err)
|
||||
}
|
||||
continue
|
||||
|
|
@ -329,14 +317,14 @@ func (r *redisStreams) reclaimPendingMessagesLoop(ctx context.Context, stream st
|
|||
func (r *redisStreams) reclaimPendingMessages(ctx context.Context, stream string, handler pubsub.Handler) {
|
||||
for {
|
||||
// Retrieve pending messages for this stream and consumer
|
||||
pendingResult, err := r.client.XPendingExt(ctx, &redis.XPendingExtArgs{
|
||||
Stream: stream,
|
||||
Group: r.metadata.consumerID,
|
||||
Start: "-",
|
||||
End: "+",
|
||||
Count: int64(r.metadata.queueDepth),
|
||||
}).Result()
|
||||
if err != nil && !errors.Is(err, redis.Nil) {
|
||||
pendingResult, err := r.client.XPendingExtResult(ctx,
|
||||
stream,
|
||||
r.metadata.consumerID,
|
||||
"-",
|
||||
"+",
|
||||
int64(r.metadata.queueDepth),
|
||||
)
|
||||
if err != nil && !errors.Is(err, r.client.GetNilValueError()) {
|
||||
r.logger.Errorf("error retrieving pending Redis messages: %v", err)
|
||||
|
||||
break
|
||||
|
|
@ -356,14 +344,14 @@ func (r *redisStreams) reclaimPendingMessages(ctx context.Context, stream string
|
|||
}
|
||||
|
||||
// Attempt to claim the messages for the filtered IDs
|
||||
claimResult, err := r.client.XClaim(ctx, &redis.XClaimArgs{
|
||||
Stream: stream,
|
||||
Group: r.metadata.consumerID,
|
||||
Consumer: r.metadata.consumerID,
|
||||
MinIdle: r.metadata.processingTimeout,
|
||||
Messages: msgIDs,
|
||||
}).Result()
|
||||
if err != nil && !errors.Is(err, redis.Nil) {
|
||||
claimResult, err := r.client.XClaimResult(ctx,
|
||||
stream,
|
||||
r.metadata.consumerID,
|
||||
r.metadata.consumerID,
|
||||
r.metadata.processingTimeout,
|
||||
msgIDs,
|
||||
)
|
||||
if err != nil && !errors.Is(err, r.client.GetNilValueError()) {
|
||||
r.logger.Errorf("error claiming pending Redis messages: %v", err)
|
||||
|
||||
break
|
||||
|
|
@ -375,7 +363,7 @@ func (r *redisStreams) reclaimPendingMessages(ctx context.Context, stream string
|
|||
// If the Redis nil error is returned, it means somes message in the pending
|
||||
// state no longer exist. We need to acknowledge these messages to
|
||||
// remove them from the pending list.
|
||||
if errors.Is(err, redis.Nil) {
|
||||
if errors.Is(err, r.client.GetNilValueError()) {
|
||||
// Build a set of message IDs that were not returned
|
||||
// that potentially no longer exist.
|
||||
expectedMsgIDs := make(map[string]struct{}, len(msgIDs))
|
||||
|
|
@ -396,23 +384,23 @@ func (r *redisStreams) reclaimPendingMessages(ctx context.Context, stream string
|
|||
func (r *redisStreams) removeMessagesThatNoLongerExistFromPending(ctx context.Context, stream string, messageIDs map[string]struct{}, handler pubsub.Handler) {
|
||||
// Check each message ID individually.
|
||||
for pendingID := range messageIDs {
|
||||
claimResultSingleMsg, err := r.client.XClaim(ctx, &redis.XClaimArgs{
|
||||
Stream: stream,
|
||||
Group: r.metadata.consumerID,
|
||||
Consumer: r.metadata.consumerID,
|
||||
MinIdle: 0,
|
||||
Messages: []string{pendingID},
|
||||
}).Result()
|
||||
if err != nil && !errors.Is(err, redis.Nil) {
|
||||
claimResultSingleMsg, err := r.client.XClaimResult(ctx,
|
||||
stream,
|
||||
r.metadata.consumerID,
|
||||
r.metadata.consumerID,
|
||||
0,
|
||||
[]string{pendingID},
|
||||
)
|
||||
if err != nil && !errors.Is(err, r.client.GetNilValueError()) {
|
||||
r.logger.Errorf("error claiming pending Redis message %s: %v", pendingID, err)
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
// Ack the message to remove it from the pending list.
|
||||
if errors.Is(err, redis.Nil) {
|
||||
if errors.Is(err, r.client.GetNilValueError()) {
|
||||
// Use the background context in case subscriptionCtx is already closed
|
||||
if err = r.client.XAck(context.Background(), stream, r.metadata.consumerID, pendingID).Err(); err != nil {
|
||||
if err = r.client.XAck(context.Background(), stream, r.metadata.consumerID, pendingID); err != nil {
|
||||
r.logger.Errorf("error acknowledging Redis message %s after failed claim for %s: %v", pendingID, stream, err)
|
||||
}
|
||||
} else {
|
||||
|
|
@ -423,8 +411,13 @@ func (r *redisStreams) removeMessagesThatNoLongerExistFromPending(ctx context.Co
|
|||
}
|
||||
|
||||
func (r *redisStreams) Close() error {
|
||||
r.cancel()
|
||||
if r.cancel != nil {
|
||||
r.cancel()
|
||||
}
|
||||
|
||||
if r.client == nil {
|
||||
return nil
|
||||
}
|
||||
return r.client.Close()
|
||||
}
|
||||
|
||||
|
|
@ -433,7 +426,7 @@ func (r *redisStreams) Features() []pubsub.Feature {
|
|||
}
|
||||
|
||||
func (r *redisStreams) Ping() error {
|
||||
if _, err := r.client.Ping(context.Background()).Result(); err != nil {
|
||||
if _, err := r.client.PingResult(context.Background()); err != nil {
|
||||
return fmt.Errorf("redis pubsub: error connecting to redis at %s: %s", r.clientSettings.Host, err)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -20,12 +20,13 @@ import (
|
|||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/go-redis/redis/v8"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
mdata "github.com/dapr/components-contrib/metadata"
|
||||
"github.com/dapr/components-contrib/pubsub"
|
||||
"github.com/dapr/kit/logger"
|
||||
|
||||
internalredis "github.com/dapr/components-contrib/internal/component/redis"
|
||||
)
|
||||
|
||||
func getFakeProperties() map[string]string {
|
||||
|
|
@ -108,9 +109,9 @@ func TestProcessStreams(t *testing.T) {
|
|||
assert.Equal(t, 3, messageCount)
|
||||
}
|
||||
|
||||
func generateRedisStreamTestData(topicCount, messageCount int, data string) []redis.XMessage {
|
||||
generateXMessage := func(id int) redis.XMessage {
|
||||
return redis.XMessage{
|
||||
func generateRedisStreamTestData(topicCount, messageCount int, data string) []internalredis.RedisXMessage {
|
||||
generateXMessage := func(id int) internalredis.RedisXMessage {
|
||||
return internalredis.RedisXMessage{
|
||||
ID: fmt.Sprintf("%d", id),
|
||||
Values: map[string]interface{}{
|
||||
"data": data,
|
||||
|
|
@ -118,7 +119,7 @@ func generateRedisStreamTestData(topicCount, messageCount int, data string) []re
|
|||
}
|
||||
}
|
||||
|
||||
xmessageArray := make([]redis.XMessage, messageCount)
|
||||
xmessageArray := make([]internalredis.RedisXMessage, messageCount)
|
||||
for i := range xmessageArray {
|
||||
xmessageArray[i] = generateXMessage(i)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -21,7 +21,6 @@ import (
|
|||
"fmt"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
|
|
@ -36,6 +35,7 @@ import (
|
|||
contribmeta "github.com/dapr/components-contrib/metadata"
|
||||
"github.com/dapr/components-contrib/state"
|
||||
"github.com/dapr/components-contrib/state/query"
|
||||
stateutils "github.com/dapr/components-contrib/state/utils"
|
||||
"github.com/dapr/kit/logger"
|
||||
"github.com/dapr/kit/ptr"
|
||||
)
|
||||
|
|
@ -77,7 +77,6 @@ type CosmosItem struct {
|
|||
|
||||
const (
|
||||
metadataPartitionKey = "partitionKey"
|
||||
metadataTTLKey = "ttlInSeconds"
|
||||
defaultTimeout = 20 * time.Second
|
||||
statusNotFound = "NotFound"
|
||||
)
|
||||
|
|
@ -481,7 +480,7 @@ func createUpsertItem(contentType string, req state.SetRequest, partitionKey str
|
|||
isBinary = false
|
||||
}
|
||||
|
||||
ttl, err := parseTTL(req.Metadata)
|
||||
ttl, err := stateutils.ParseTTL(req.Metadata)
|
||||
if err != nil {
|
||||
return CosmosItem{}, fmt.Errorf("error parsing TTL from metadata: %s", err)
|
||||
}
|
||||
|
|
@ -534,20 +533,6 @@ func populatePartitionMetadata(key string, requestMetadata map[string]string) st
|
|||
return key
|
||||
}
|
||||
|
||||
func parseTTL(requestMetadata map[string]string) (*int, error) {
|
||||
if val, found := requestMetadata[metadataTTLKey]; found && val != "" {
|
||||
parsedVal, err := strconv.ParseInt(val, 10, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
i := int(parsedVal)
|
||||
|
||||
return &i, nil
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func isNotFoundError(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
|
|
|
|||
|
|
@ -21,6 +21,7 @@ import (
|
|||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/dapr/components-contrib/state"
|
||||
stateutils "github.com/dapr/components-contrib/state/utils"
|
||||
)
|
||||
|
||||
type widget struct {
|
||||
|
|
@ -284,7 +285,7 @@ func TestCreateCosmosItemWithTTL(t *testing.T) {
|
|||
Key: "testKey",
|
||||
Value: value,
|
||||
Metadata: map[string]string{
|
||||
metadataTTLKey: strconv.Itoa(ttl),
|
||||
stateutils.MetadataTTLKey: strconv.Itoa(ttl),
|
||||
},
|
||||
}
|
||||
|
||||
|
|
@ -316,7 +317,7 @@ func TestCreateCosmosItemWithTTL(t *testing.T) {
|
|||
Key: "testKey",
|
||||
Value: value,
|
||||
Metadata: map[string]string{
|
||||
metadataTTLKey: strconv.Itoa(ttl),
|
||||
stateutils.MetadataTTLKey: strconv.Itoa(ttl),
|
||||
},
|
||||
}
|
||||
|
||||
|
|
@ -347,7 +348,7 @@ func TestCreateCosmosItemWithTTL(t *testing.T) {
|
|||
Key: "testKey",
|
||||
Value: value,
|
||||
Metadata: map[string]string{
|
||||
metadataTTLKey: "notattl",
|
||||
stateutils.MetadataTTLKey: "notattl",
|
||||
},
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -24,6 +24,7 @@ import (
|
|||
|
||||
"github.com/dapr/components-contrib/metadata"
|
||||
"github.com/dapr/components-contrib/state"
|
||||
stateutils "github.com/dapr/components-contrib/state/utils"
|
||||
"github.com/dapr/kit/logger"
|
||||
)
|
||||
|
||||
|
|
@ -279,7 +280,7 @@ func (c *Cassandra) Set(ctx context.Context, req *state.SetRequest) error {
|
|||
session = sess
|
||||
}
|
||||
|
||||
ttl, err := parseTTL(req.Metadata)
|
||||
ttl, err := stateutils.ParseTTL(req.Metadata)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error parsing TTL from Metadata: %s", err)
|
||||
}
|
||||
|
|
@ -302,20 +303,6 @@ func (c *Cassandra) createSession(consistency gocql.Consistency) (*gocql.Session
|
|||
return session, nil
|
||||
}
|
||||
|
||||
func parseTTL(requestMetadata map[string]string) (*int, error) {
|
||||
if val, found := requestMetadata[metadataTTLKey]; found && val != "" {
|
||||
parsedVal, err := strconv.ParseInt(val, 10, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
parsedInt := int(parsedVal)
|
||||
|
||||
return &parsedInt, nil
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (c *Cassandra) GetComponentMetadata() map[string]string {
|
||||
metadataStruct := cassandraMetadata{}
|
||||
metadataInfo := map[string]string{}
|
||||
|
|
|
|||
|
|
@ -14,7 +14,6 @@ limitations under the License.
|
|||
package cassandra
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
|
|
@ -111,35 +110,3 @@ func TestGetCassandraMetadata(t *testing.T) {
|
|||
assert.NotNil(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestParseTTL(t *testing.T) {
|
||||
t.Run("TTL Not an integer", func(t *testing.T) {
|
||||
ttlInSeconds := "not an integer"
|
||||
ttl, err := parseTTL(map[string]string{
|
||||
"ttlInSeconds": ttlInSeconds,
|
||||
})
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, ttl)
|
||||
})
|
||||
t.Run("TTL specified with wrong key", func(t *testing.T) {
|
||||
ttlInSeconds := 12345
|
||||
ttl, err := parseTTL(map[string]string{
|
||||
"expirationTime": strconv.Itoa(ttlInSeconds),
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, ttl)
|
||||
})
|
||||
t.Run("TTL is a number", func(t *testing.T) {
|
||||
ttlInSeconds := 12345
|
||||
ttl, err := parseTTL(map[string]string{
|
||||
"ttlInSeconds": strconv.Itoa(ttlInSeconds),
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, *ttl, ttlInSeconds)
|
||||
})
|
||||
t.Run("TTL not set", func(t *testing.T) {
|
||||
ttl, err := parseTTL(map[string]string{})
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, ttl)
|
||||
})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -93,7 +93,7 @@ func TestParseTTL(t *testing.T) {
|
|||
},
|
||||
})
|
||||
|
||||
assert.NotNil(t, err, "tll is not an integer")
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, ttl)
|
||||
})
|
||||
t.Run("TTL is a negative integer ends up translated to 0", func(t *testing.T) {
|
||||
|
|
|
|||
|
|
@ -22,7 +22,6 @@ import (
|
|||
"os"
|
||||
"path"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
|
|
@ -34,6 +33,7 @@ import (
|
|||
|
||||
"github.com/dapr/components-contrib/metadata"
|
||||
"github.com/dapr/components-contrib/state"
|
||||
stateutils "github.com/dapr/components-contrib/state/utils"
|
||||
"github.com/dapr/kit/logger"
|
||||
)
|
||||
|
||||
|
|
@ -278,17 +278,13 @@ func (r *StateStore) writeDocument(ctx context.Context, req *state.SetRequest) e
|
|||
}
|
||||
|
||||
func (r *StateStore) convertTTLtoExpiryTime(req *state.SetRequest, metadata map[string]string) error {
|
||||
ttl, ttlerr := parseTTL(req.Metadata)
|
||||
ttl, ttlerr := stateutils.ParseTTL(req.Metadata)
|
||||
if ttlerr != nil {
|
||||
return fmt.Errorf("error in parsing TTL %w", ttlerr)
|
||||
return fmt.Errorf("error parsing TTL: %w", ttlerr)
|
||||
}
|
||||
if ttl != nil {
|
||||
if *ttl == -1 {
|
||||
r.logger.Debugf("TTL is set to -1; this means: never expire. ")
|
||||
} else {
|
||||
metadata[expiryTimeMetaLabel] = time.Now().UTC().Add(time.Second * time.Duration(*ttl)).Format(isoDateTimeFormat)
|
||||
r.logger.Debugf("Set %s in meta properties for object to ", expiryTimeMetaLabel, metadata[expiryTimeMetaLabel])
|
||||
}
|
||||
metadata[expiryTimeMetaLabel] = time.Now().UTC().Add(time.Second * time.Duration(*ttl)).Format(isoDateTimeFormat)
|
||||
r.logger.Debugf("Set %s in meta properties for object to ", expiryTimeMetaLabel, metadata[expiryTimeMetaLabel])
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -367,20 +363,6 @@ func getFileName(key string) string {
|
|||
return path.Join(pr[0], pr[1])
|
||||
}
|
||||
|
||||
func parseTTL(requestMetadata map[string]string) (*int, error) {
|
||||
if val, found := requestMetadata[metadataTTLKey]; found && val != "" {
|
||||
parsedVal, err := strconv.ParseInt(val, 10, 0)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error in parsing ttl metadata : %w", err)
|
||||
}
|
||||
parsedInt := int(parsedVal)
|
||||
|
||||
return &parsedInt, nil
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
/**************** functions with OCI ObjectStorage Service interaction. */
|
||||
|
||||
func getNamespace(ctx context.Context, client objectstorage.ObjectStorageClient) (string, error) {
|
||||
|
|
|
|||
|
|
@ -17,7 +17,6 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
|
|
@ -394,36 +393,3 @@ func TestGetFilename(t *testing.T) {
|
|||
assert.Equal(t, "app-id-key", filename)
|
||||
})
|
||||
}
|
||||
|
||||
func TestParseTTL(t *testing.T) {
|
||||
t.Parallel()
|
||||
t.Run("TTL Not an integer", func(t *testing.T) {
|
||||
ttlInSeconds := "not an integer"
|
||||
ttl, err := parseTTL(map[string]string{
|
||||
"ttlInSeconds": ttlInSeconds,
|
||||
})
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, ttl)
|
||||
})
|
||||
t.Run("TTL specified with wrong key", func(t *testing.T) {
|
||||
ttlInSeconds := 12345
|
||||
ttl, err := parseTTL(map[string]string{
|
||||
"expirationTime": strconv.Itoa(ttlInSeconds),
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, ttl)
|
||||
})
|
||||
t.Run("TTL is a number", func(t *testing.T) {
|
||||
ttlInSeconds := 12345
|
||||
ttl, err := parseTTL(map[string]string{
|
||||
"ttlInSeconds": strconv.Itoa(ttlInSeconds),
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, *ttl, ttlInSeconds)
|
||||
})
|
||||
t.Run("TTL not set", func(t *testing.T) {
|
||||
ttl, err := parseTTL(map[string]string{})
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, ttl)
|
||||
})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -21,7 +21,6 @@ import (
|
|||
"fmt"
|
||||
"net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
|
|
@ -658,43 +657,6 @@ func setItemWithNoKey(t *testing.T, ods *OracleDatabase) {
|
|||
assert.NotNil(t, err)
|
||||
}
|
||||
|
||||
func TestParseTTL(t *testing.T) {
|
||||
t.Parallel()
|
||||
t.Run("TTL Not an integer", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ttlInSeconds := "not an integer"
|
||||
ttl, err := parseTTL(map[string]string{
|
||||
"ttlInSeconds": ttlInSeconds,
|
||||
})
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, ttl)
|
||||
})
|
||||
t.Run("TTL specified with wrong key", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ttlInSeconds := 12345
|
||||
ttl, err := parseTTL(map[string]string{
|
||||
"expirationTime": strconv.Itoa(ttlInSeconds),
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, ttl)
|
||||
})
|
||||
t.Run("TTL is a number", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ttlInSeconds := 12345
|
||||
ttl, err := parseTTL(map[string]string{
|
||||
"ttlInSeconds": strconv.Itoa(ttlInSeconds),
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, *ttl, ttlInSeconds)
|
||||
})
|
||||
t.Run("TTL not set", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ttl, err := parseTTL(map[string]string{})
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, ttl)
|
||||
})
|
||||
}
|
||||
|
||||
func testSetItemWithInvalidTTL(t *testing.T, ods *OracleDatabase) {
|
||||
setReq := &state.SetRequest{
|
||||
Key: randomKey(),
|
||||
|
|
|
|||
|
|
@ -20,13 +20,12 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strconv"
|
||||
|
||||
"github.com/google/uuid"
|
||||
|
||||
"github.com/dapr/components-contrib/metadata"
|
||||
"github.com/dapr/components-contrib/state"
|
||||
"github.com/dapr/components-contrib/state/utils"
|
||||
stateutils "github.com/dapr/components-contrib/state/utils"
|
||||
"github.com/dapr/kit/logger"
|
||||
|
||||
// Blank import for the underlying Oracle Database driver.
|
||||
|
|
@ -36,7 +35,6 @@ import (
|
|||
const (
|
||||
connectionStringKey = "connectionString"
|
||||
oracleWalletLocationKey = "oracleWalletLocation"
|
||||
metadataTTLKey = "ttlInSeconds"
|
||||
errMissingConnectionString = "missing connection string"
|
||||
tableName = "state"
|
||||
)
|
||||
|
|
@ -115,20 +113,6 @@ func (o *oracleDatabaseAccess) Init(metadata state.Metadata) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func parseTTL(requestMetadata map[string]string) (*int, error) {
|
||||
if val, found := requestMetadata[metadataTTLKey]; found && val != "" {
|
||||
parsedVal, err := strconv.ParseInt(val, 10, 0)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error in parsing ttl metadata : %w", err)
|
||||
}
|
||||
parsedInt := int(parsedVal)
|
||||
|
||||
return &parsedInt, nil
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Set makes an insert or update to the database.
|
||||
func (o *oracleDatabaseAccess) Set(ctx context.Context, req *state.SetRequest) error {
|
||||
o.logger.Debug("Setting state value in OracleDatabase")
|
||||
|
|
@ -149,19 +133,12 @@ func (o *oracleDatabaseAccess) Set(ctx context.Context, req *state.SetRequest) e
|
|||
return fmt.Errorf("when FirstWrite is to be enforced, a value must be provided for the ETag")
|
||||
}
|
||||
var ttlSeconds int
|
||||
ttl, ttlerr := parseTTL(req.Metadata)
|
||||
ttl, ttlerr := stateutils.ParseTTL(req.Metadata)
|
||||
if ttlerr != nil {
|
||||
return fmt.Errorf("error in parsing TTL %w", ttlerr)
|
||||
return fmt.Errorf("error parsing TTL: %w", ttlerr)
|
||||
}
|
||||
if ttl != nil {
|
||||
if *ttl == -1 {
|
||||
o.logger.Debugf("TTL is set to -1; this means: never expire. ")
|
||||
} else {
|
||||
if *ttl < -1 {
|
||||
return fmt.Errorf("incorrect value for %s %d", metadataTTLKey, *ttl)
|
||||
}
|
||||
ttlSeconds = *ttl
|
||||
}
|
||||
ttlSeconds = *ttl
|
||||
}
|
||||
requestValue := req.Value
|
||||
byteArray, isBinary := req.Value.([]uint8)
|
||||
|
|
@ -172,7 +149,7 @@ func (o *oracleDatabaseAccess) Set(ctx context.Context, req *state.SetRequest) e
|
|||
}
|
||||
|
||||
// Convert to json string.
|
||||
bt, _ := utils.Marshal(requestValue, json.Marshal)
|
||||
bt, _ := stateutils.Marshal(requestValue, json.Marshal)
|
||||
value := string(bt)
|
||||
|
||||
var result sql.Result
|
||||
|
|
|
|||
|
|
@ -15,6 +15,7 @@ package postgresql
|
|||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/dapr/components-contrib/state"
|
||||
)
|
||||
|
|
@ -31,3 +32,12 @@ type dbAccess interface {
|
|||
Query(ctx context.Context, req *state.QueryRequest) (*state.QueryResponse, error)
|
||||
Close() error // io.Closer
|
||||
}
|
||||
|
||||
// Interface that contains methods for querying.
|
||||
// Applies to both *sql.DB and *sql.Tx
|
||||
type dbquerier interface {
|
||||
Exec(query string, args ...any) (sql.Result, error)
|
||||
ExecContext(ctx context.Context, query string, args ...any) (sql.Result, error)
|
||||
QueryRow(query string, args ...any) *sql.Row
|
||||
QueryRowContext(ctx context.Context, query string, args ...any) *sql.Row
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,227 @@
|
|||
/*
|
||||
Copyright 2021 The Dapr Authors
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package postgresql
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/dapr/kit/logger"
|
||||
)
|
||||
|
||||
// Performs migrations for the database schema
|
||||
type migrations struct {
|
||||
Logger logger.Logger
|
||||
Conn *sql.DB
|
||||
StateTableName string
|
||||
MetadataTableName string
|
||||
}
|
||||
|
||||
// Perform the required migrations
|
||||
func (m *migrations) Perform(ctx context.Context) error {
|
||||
// Use an advisory lock (with an arbitrary number) to ensure that no one else is performing migrations at the same time
|
||||
// This is the only way to also ensure we are not running multiple "CREATE TABLE IF NOT EXISTS" at the exact same time
|
||||
// See: https://www.postgresql.org/message-id/CA+TgmoZAdYVtwBfp1FL2sMZbiHCWT4UPrzRLNnX1Nb30Ku3-gg@mail.gmail.com
|
||||
const lockID = 42
|
||||
|
||||
// Long timeout here as this query may block
|
||||
queryCtx, cancel := context.WithTimeout(ctx, time.Minute)
|
||||
_, err := m.Conn.ExecContext(queryCtx, "SELECT pg_advisory_lock($1)", lockID)
|
||||
cancel()
|
||||
if err != nil {
|
||||
return fmt.Errorf("faild to acquire advisory lock: %w", err)
|
||||
}
|
||||
|
||||
// Release the lock
|
||||
defer func() {
|
||||
queryCtx, cancel = context.WithTimeout(ctx, time.Minute)
|
||||
_, err = m.Conn.ExecContext(queryCtx, "SELECT pg_advisory_unlock($1)", lockID)
|
||||
cancel()
|
||||
if err != nil {
|
||||
// Panicking here, as this forcibly closes the session and thus ensures we are not leaving locks hanging around
|
||||
m.Logger.Fatalf("Failed to release advisory lock: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Check if the metadata table exists, which we also use to store the migration level
|
||||
queryCtx, cancel = context.WithTimeout(ctx, 30*time.Second)
|
||||
exists, _, _, err := m.tableExists(queryCtx, m.MetadataTableName)
|
||||
cancel()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If the table doesn't exist, create it
|
||||
if !exists {
|
||||
queryCtx, cancel = context.WithTimeout(ctx, 30*time.Second)
|
||||
err = m.createMetadataTable(queryCtx)
|
||||
cancel()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Select the migration level
|
||||
var (
|
||||
migrationLevelStr string
|
||||
migrationLevel int
|
||||
)
|
||||
queryCtx, cancel = context.WithTimeout(ctx, 30*time.Second)
|
||||
err = m.Conn.
|
||||
QueryRowContext(queryCtx,
|
||||
fmt.Sprintf(`SELECT value FROM %s WHERE key = 'migrations'`, m.MetadataTableName),
|
||||
).Scan(&migrationLevelStr)
|
||||
cancel()
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
// If there's no row...
|
||||
migrationLevel = 0
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("failed to read migration level: %w", err)
|
||||
} else {
|
||||
migrationLevel, err = strconv.Atoi(migrationLevelStr)
|
||||
if err != nil || migrationLevel < 0 {
|
||||
return fmt.Errorf("invalid migration level found in metadata table: %s", migrationLevelStr)
|
||||
}
|
||||
}
|
||||
|
||||
// Perform the migrations
|
||||
for i := migrationLevel; i < len(allMigrations); i++ {
|
||||
m.Logger.Infof("Performing migration %d", i)
|
||||
err = allMigrations[i](ctx, m)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to perform migration %d: %w", i, err)
|
||||
}
|
||||
|
||||
queryCtx, cancel = context.WithTimeout(ctx, 30*time.Second)
|
||||
_, err = m.Conn.ExecContext(queryCtx,
|
||||
fmt.Sprintf(`INSERT INTO %s (key, value) VALUES ('migrations', $1) ON CONFLICT (key) DO UPDATE SET value = $1`, m.MetadataTableName),
|
||||
strconv.Itoa(i+1),
|
||||
)
|
||||
cancel()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update migration level in metadata table: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m migrations) createMetadataTable(ctx context.Context) error {
|
||||
m.Logger.Infof("Creating metadata table '%s'", m.MetadataTableName)
|
||||
// Add an "IF NOT EXISTS" in case another Dapr sidecar is creating the same table at the same time
|
||||
// In the next step we'll acquire a lock so there won't be issues with concurrency
|
||||
_, err := m.Conn.Exec(fmt.Sprintf(
|
||||
`CREATE TABLE IF NOT EXISTS %s (
|
||||
key text NOT NULL PRIMARY KEY,
|
||||
value text NOT NULL
|
||||
)`,
|
||||
m.MetadataTableName,
|
||||
))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create metadata table: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// If the table exists, returns true and the name of the table and schema
|
||||
func (m migrations) tableExists(ctx context.Context, tableName string) (exists bool, schema string, table string, err error) {
|
||||
table, schema, err = m.tableSchemaName(tableName)
|
||||
if err != nil {
|
||||
return false, "", "", err
|
||||
}
|
||||
|
||||
if schema == "" {
|
||||
err = m.Conn.
|
||||
QueryRowContext(
|
||||
ctx,
|
||||
`SELECT table_name, table_schema
|
||||
FROM information_schema.tables
|
||||
WHERE table_name = $1`,
|
||||
table,
|
||||
).
|
||||
Scan(&table, &schema)
|
||||
} else {
|
||||
err = m.Conn.
|
||||
QueryRowContext(
|
||||
ctx,
|
||||
`SELECT table_name, table_schema
|
||||
FROM information_schema.tables
|
||||
WHERE table_schema = $1 AND table_name = $2`,
|
||||
schema, table,
|
||||
).
|
||||
Scan(&table, &schema)
|
||||
}
|
||||
|
||||
if err != nil && errors.Is(err, sql.ErrNoRows) {
|
||||
return false, "", "", nil
|
||||
} else if err != nil {
|
||||
return false, "", "", fmt.Errorf("failed to check if table '%s' exists: %w", tableName, err)
|
||||
}
|
||||
return true, schema, table, nil
|
||||
}
|
||||
|
||||
// If the table name includes a schema (e.g. `schema.table`, returns the two parts separately)
|
||||
func (m migrations) tableSchemaName(tableName string) (table string, schema string, err error) {
|
||||
parts := strings.Split(tableName, ".")
|
||||
switch len(parts) {
|
||||
case 1:
|
||||
return parts[0], "", nil
|
||||
case 2:
|
||||
return parts[1], parts[0], nil
|
||||
default:
|
||||
return "", "", errors.New("invalid table name: must be in the format 'table' or 'schema.table'")
|
||||
}
|
||||
}
|
||||
|
||||
var allMigrations = [2]func(ctx context.Context, m *migrations) error{
|
||||
// Migration 0: create the state table
|
||||
func(ctx context.Context, m *migrations) error {
|
||||
// We need to add an "IF NOT EXISTS" because we may be migrating from when we did not use a metadata table
|
||||
m.Logger.Infof("Creating state table '%s'", m.StateTableName)
|
||||
_, err := m.Conn.Exec(
|
||||
fmt.Sprintf(
|
||||
`CREATE TABLE IF NOT EXISTS %s (
|
||||
key text NOT NULL PRIMARY KEY,
|
||||
value jsonb NOT NULL,
|
||||
isbinary boolean NOT NULL,
|
||||
insertdate TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
|
||||
updatedate TIMESTAMP WITH TIME ZONE NULL
|
||||
)`,
|
||||
m.StateTableName,
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create state table: %w", err)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
|
||||
// Migration 1: add the "expiredate" column
|
||||
func(ctx context.Context, m *migrations) error {
|
||||
m.Logger.Infof("Adding expiredate column to state table '%s'", m.StateTableName)
|
||||
_, err := m.Conn.Exec(fmt.Sprintf(
|
||||
`ALTER TABLE %s ADD expiredate TIMESTAMP WITH TIME ZONE`,
|
||||
m.StateTableName,
|
||||
))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update state table: %w", err)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
|
@ -26,34 +26,38 @@ import (
|
|||
"github.com/dapr/components-contrib/metadata"
|
||||
"github.com/dapr/components-contrib/state"
|
||||
"github.com/dapr/components-contrib/state/query"
|
||||
"github.com/dapr/components-contrib/state/utils"
|
||||
stateutils "github.com/dapr/components-contrib/state/utils"
|
||||
"github.com/dapr/kit/logger"
|
||||
"github.com/dapr/kit/ptr"
|
||||
|
||||
// Blank import for the underlying PostgreSQL driver.
|
||||
// Blank import for the underlying Postgres driver.
|
||||
_ "github.com/jackc/pgx/v5/stdlib"
|
||||
)
|
||||
|
||||
const (
|
||||
connectionStringKey = "connectionString"
|
||||
errMissingConnectionString = "missing connection string"
|
||||
defaultTableName = "state"
|
||||
defaultTableName = "state"
|
||||
defaultMetadataTableName = "dapr_metadata"
|
||||
cleanupIntervalKey = "cleanupIntervalInSeconds"
|
||||
defaultCleanupInternal = 3600 // In seconds = 1 hour
|
||||
)
|
||||
|
||||
// postgresDBAccess implements dbaccess.
|
||||
type postgresDBAccess struct {
|
||||
logger logger.Logger
|
||||
metadata postgresMetadataStruct
|
||||
db *sql.DB
|
||||
connectionString string
|
||||
tableName string
|
||||
var errMissingConnectionString = errors.New("missing connection string")
|
||||
|
||||
// PostgresDBAccess implements dbaccess.
|
||||
type PostgresDBAccess struct {
|
||||
logger logger.Logger
|
||||
metadata postgresMetadataStruct
|
||||
cleanupInterval *time.Duration
|
||||
db *sql.DB
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
}
|
||||
|
||||
// newPostgresDBAccess creates a new instance of postgresAccess.
|
||||
func newPostgresDBAccess(logger logger.Logger) *postgresDBAccess {
|
||||
logger.Debug("Instantiating new PostgreSQL state store")
|
||||
func newPostgresDBAccess(logger logger.Logger) *PostgresDBAccess {
|
||||
logger.Debug("Instantiating new Postgres state store")
|
||||
|
||||
return &postgresDBAccess{
|
||||
return &PostgresDBAccess{
|
||||
logger: logger,
|
||||
}
|
||||
}
|
||||
|
|
@ -61,14 +65,66 @@ func newPostgresDBAccess(logger logger.Logger) *postgresDBAccess {
|
|||
type postgresMetadataStruct struct {
|
||||
ConnectionString string
|
||||
ConnectionMaxIdleTime time.Duration
|
||||
TableName string
|
||||
TableName string // Could be in the format "schema.table" or just "table"
|
||||
MetadataTableName string // Could be in the format "schema.table" or just "table"
|
||||
}
|
||||
|
||||
// Init sets up PostgreSQL connection and ensures that the state table exists.
|
||||
func (p *postgresDBAccess) Init(meta state.Metadata) error {
|
||||
p.logger.Debug("Initializing PostgreSQL state store")
|
||||
// Init sets up Postgres connection and ensures that the state table exists.
|
||||
func (p *PostgresDBAccess) Init(meta state.Metadata) error {
|
||||
p.logger.Debug("Initializing Postgres state store")
|
||||
|
||||
p.ctx, p.cancel = context.WithCancel(context.Background())
|
||||
|
||||
err := p.ParseMetadata(meta)
|
||||
if err != nil {
|
||||
p.logger.Errorf("Failed to parse metadata: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
db, err := sql.Open("pgx", p.metadata.ConnectionString)
|
||||
if err != nil {
|
||||
p.logger.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
p.db = db
|
||||
|
||||
pingCtx, pingCancel := context.WithTimeout(p.ctx, 30*time.Second)
|
||||
pingErr := db.PingContext(pingCtx)
|
||||
pingCancel()
|
||||
if pingErr != nil {
|
||||
return pingErr
|
||||
}
|
||||
|
||||
p.db.SetConnMaxIdleTime(p.metadata.ConnectionMaxIdleTime)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
migrate := &migrations{
|
||||
Logger: p.logger,
|
||||
Conn: p.db,
|
||||
MetadataTableName: p.metadata.MetadataTableName,
|
||||
StateTableName: p.metadata.TableName,
|
||||
}
|
||||
err = migrate.Perform(p.ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
p.ScheduleCleanupExpiredData(p.ctx)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *PostgresDBAccess) GetDB() *sql.DB {
|
||||
return p.db
|
||||
}
|
||||
|
||||
func (p *PostgresDBAccess) ParseMetadata(meta state.Metadata) error {
|
||||
m := postgresMetadataStruct{
|
||||
TableName: defaultTableName,
|
||||
TableName: defaultTableName,
|
||||
MetadataTableName: defaultMetadataTableName,
|
||||
}
|
||||
err := metadata.DecodeMetadata(meta.Properties, &m)
|
||||
if err != nil {
|
||||
|
|
@ -77,44 +133,33 @@ func (p *postgresDBAccess) Init(meta state.Metadata) error {
|
|||
p.metadata = m
|
||||
|
||||
if m.ConnectionString == "" {
|
||||
p.logger.Error("Missing postgreSQL connection string")
|
||||
|
||||
return errors.New(errMissingConnectionString)
|
||||
}
|
||||
p.connectionString = m.ConnectionString
|
||||
|
||||
db, err := sql.Open("pgx", p.connectionString)
|
||||
if err != nil {
|
||||
p.logger.Error(err)
|
||||
|
||||
return err
|
||||
return errMissingConnectionString
|
||||
}
|
||||
|
||||
p.db = db
|
||||
s, ok := meta.Properties[cleanupIntervalKey]
|
||||
if ok && s != "" {
|
||||
cleanupIntervalInSec, err := strconv.ParseInt(s, 10, 0)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid value for '%s': %s", cleanupIntervalKey, s)
|
||||
}
|
||||
|
||||
pingErr := db.Ping()
|
||||
if pingErr != nil {
|
||||
return pingErr
|
||||
// Non-positive value from meta means disable auto cleanup.
|
||||
if cleanupIntervalInSec > 0 {
|
||||
p.cleanupInterval = ptr.Of(time.Duration(cleanupIntervalInSec) * time.Second)
|
||||
}
|
||||
} else {
|
||||
p.cleanupInterval = ptr.Of(defaultCleanupInternal * time.Second)
|
||||
}
|
||||
|
||||
p.db.SetConnMaxIdleTime(m.ConnectionMaxIdleTime)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = p.ensureStateTable(m.TableName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.tableName = m.TableName
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Set makes an insert or update to the database.
|
||||
func (p *postgresDBAccess) Set(ctx context.Context, req *state.SetRequest) error {
|
||||
p.logger.Debug("Setting state value in PostgreSQL")
|
||||
func (p *PostgresDBAccess) Set(ctx context.Context, req *state.SetRequest) error {
|
||||
return p.doSet(ctx, p.db, req)
|
||||
}
|
||||
|
||||
func (p *PostgresDBAccess) doSet(parentCtx context.Context, db dbquerier, req *state.SetRequest) error {
|
||||
err := state.CheckRequestOptions(req.Options)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -135,22 +180,47 @@ func (p *postgresDBAccess) Set(ctx context.Context, req *state.SetRequest) error
|
|||
}
|
||||
|
||||
// Convert to json string
|
||||
bt, _ := utils.Marshal(v, json.Marshal)
|
||||
bt, _ := stateutils.Marshal(v, json.Marshal)
|
||||
value := string(bt)
|
||||
|
||||
// TTL
|
||||
var ttlSeconds int
|
||||
ttl, ttlerr := stateutils.ParseTTL(req.Metadata)
|
||||
if ttlerr != nil {
|
||||
return fmt.Errorf("error parsing TTL: %w", ttlerr)
|
||||
}
|
||||
if ttl != nil {
|
||||
ttlSeconds = *ttl
|
||||
}
|
||||
|
||||
var result sql.Result
|
||||
|
||||
// Sprintf is required for table name because sql.DB does not substitute parameters for table names.
|
||||
// Other parameters use sql.DB parameter substitution.
|
||||
if req.Options.Concurrency == state.FirstWrite && (req.ETag == nil || *req.ETag == "") {
|
||||
result, err = p.db.ExecContext(ctx, fmt.Sprintf(
|
||||
`INSERT INTO %s (key, value, isbinary) VALUES ($1, $2, $3);`,
|
||||
p.tableName), req.Key, value, isBinary)
|
||||
} else if req.ETag == nil || *req.ETag == "" {
|
||||
result, err = p.db.ExecContext(ctx, fmt.Sprintf(
|
||||
`INSERT INTO %s (key, value, isbinary) VALUES ($1, $2, $3)
|
||||
ON CONFLICT (key) DO UPDATE SET value = $2, isbinary = $3, updatedate = NOW();`,
|
||||
p.tableName), req.Key, value, isBinary)
|
||||
// Sprintf is required for table name because query.DB does not substitute parameters for table names.
|
||||
// Other parameters use query.DB parameter substitution.
|
||||
var (
|
||||
query string
|
||||
queryExpiredate string
|
||||
params []any
|
||||
)
|
||||
if req.ETag == nil || *req.ETag == "" {
|
||||
if req.Options.Concurrency == state.FirstWrite {
|
||||
query = `INSERT INTO %[1]s
|
||||
(key, value, isbinary, expiredate)
|
||||
VALUES
|
||||
($1, $2, $3, %[2]s)`
|
||||
} else {
|
||||
query = `INSERT INTO %[1]s
|
||||
(key, value, isbinary, expiredate)
|
||||
VALUES
|
||||
($1, $2, $3, %[2]s)
|
||||
ON CONFLICT (key)
|
||||
DO UPDATE SET
|
||||
value = $2,
|
||||
isbinary = $3,
|
||||
updatedate = CURRENT_TIMESTAMP,
|
||||
expiredate = %[2]s`
|
||||
}
|
||||
params = []any{req.Key, value, isBinary}
|
||||
} else {
|
||||
// Convert req.ETag to uint32 for postgres XID compatibility
|
||||
var etag64 uint64
|
||||
|
|
@ -158,20 +228,30 @@ func (p *postgresDBAccess) Set(ctx context.Context, req *state.SetRequest) error
|
|||
if err != nil {
|
||||
return state.NewETagError(state.ETagInvalid, err)
|
||||
}
|
||||
etag := uint32(etag64)
|
||||
|
||||
// When an etag is provided do an update - no insert
|
||||
result, err = p.db.ExecContext(ctx, fmt.Sprintf(
|
||||
`UPDATE %s SET value = $1, isbinary = $2, updatedate = NOW()
|
||||
WHERE key = $3 AND xmin = $4;`,
|
||||
p.tableName), value, isBinary, req.Key, etag)
|
||||
query = `UPDATE %[1]s
|
||||
SET
|
||||
value = $1,
|
||||
isbinary = $2,
|
||||
updatedate = CURRENT_TIMESTAMP,
|
||||
expiredate = %[2]s
|
||||
WHERE
|
||||
key = $3
|
||||
AND xmin = $4`
|
||||
params = []any{value, isBinary, req.Key, uint32(etag64)}
|
||||
}
|
||||
|
||||
if ttlSeconds > 0 {
|
||||
queryExpiredate = "CURRENT_TIMESTAMP + interval '" + strconv.Itoa(ttlSeconds) + " seconds'"
|
||||
} else {
|
||||
queryExpiredate = "NULL"
|
||||
}
|
||||
result, err = db.ExecContext(parentCtx, fmt.Sprintf(query, p.metadata.TableName, queryExpiredate), params...)
|
||||
|
||||
if err != nil {
|
||||
if req.ETag != nil && *req.ETag != "" {
|
||||
return state.NewETagError(state.ETagMismatch, err)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
@ -179,7 +259,6 @@ func (p *postgresDBAccess) Set(ctx context.Context, req *state.SetRequest) error
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if rows != 1 {
|
||||
return errors.New("no item was updated")
|
||||
}
|
||||
|
|
@ -187,33 +266,32 @@ func (p *postgresDBAccess) Set(ctx context.Context, req *state.SetRequest) error
|
|||
return nil
|
||||
}
|
||||
|
||||
func (p *postgresDBAccess) BulkSet(ctx context.Context, req []state.SetRequest) error {
|
||||
p.logger.Debug("Executing BulkSet request")
|
||||
tx, err := p.db.Begin()
|
||||
func (p *PostgresDBAccess) BulkSet(parentCtx context.Context, req []state.SetRequest) error {
|
||||
tx, err := p.db.BeginTx(parentCtx, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to begin transaction: %w", err)
|
||||
}
|
||||
defer tx.Rollback()
|
||||
|
||||
if len(req) > 0 {
|
||||
for _, s := range req {
|
||||
sa := s // Fix for gosec G601: Implicit memory aliasing in for loop.
|
||||
err = p.Set(ctx, &sa)
|
||||
for i := range req {
|
||||
err = p.doSet(parentCtx, tx, &req[i])
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
err = tx.Commit()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to commit transaction: %w", err)
|
||||
}
|
||||
|
||||
return err
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get returns data from the database. If data does not exist for the key an empty state.GetResponse will be returned.
|
||||
func (p *postgresDBAccess) Get(ctx context.Context, req *state.GetRequest) (*state.GetResponse, error) {
|
||||
p.logger.Debug("Getting state value from PostgreSQL")
|
||||
func (p *PostgresDBAccess) Get(parentCtx context.Context, req *state.GetRequest) (*state.GetResponse, error) {
|
||||
if req.Key == "" {
|
||||
return nil, errors.New("missing key in get operation")
|
||||
}
|
||||
|
|
@ -223,7 +301,15 @@ func (p *postgresDBAccess) Get(ctx context.Context, req *state.GetRequest) (*sta
|
|||
isBinary bool
|
||||
etag uint64 // Postgres uses uint32, but FormatUint requires uint64, so using uint64 directly to avoid re-allocations
|
||||
)
|
||||
err := p.db.QueryRowContext(ctx, fmt.Sprintf("SELECT value, isbinary, xmin as etag FROM %s WHERE key = $1", p.tableName), req.Key).Scan(&value, &isBinary, &etag)
|
||||
query := `SELECT
|
||||
value, isbinary, xmin AS etag
|
||||
FROM %s
|
||||
WHERE
|
||||
key = $1
|
||||
AND (expiredate IS NULL OR expiredate >= CURRENT_TIMESTAMP)`
|
||||
err := p.db.
|
||||
QueryRowContext(parentCtx, fmt.Sprintf(query, p.metadata.TableName), req.Key).
|
||||
Scan(&value, &isBinary, &etag)
|
||||
if err != nil {
|
||||
// If no rows exist, return an empty response, otherwise return the error.
|
||||
if err == sql.ErrNoRows {
|
||||
|
|
@ -261,8 +347,11 @@ func (p *postgresDBAccess) Get(ctx context.Context, req *state.GetRequest) (*sta
|
|||
}
|
||||
|
||||
// Delete removes an item from the state store.
|
||||
func (p *postgresDBAccess) Delete(ctx context.Context, req *state.DeleteRequest) (err error) {
|
||||
p.logger.Debug("Deleting state value from PostgreSQL")
|
||||
func (p *PostgresDBAccess) Delete(ctx context.Context, req *state.DeleteRequest) (err error) {
|
||||
return p.doDelete(ctx, p.db, req)
|
||||
}
|
||||
|
||||
func (p *PostgresDBAccess) doDelete(parentCtx context.Context, db dbquerier, req *state.DeleteRequest) (err error) {
|
||||
if req.Key == "" {
|
||||
return errors.New("missing key in delete operation")
|
||||
}
|
||||
|
|
@ -270,7 +359,7 @@ func (p *postgresDBAccess) Delete(ctx context.Context, req *state.DeleteRequest)
|
|||
var result sql.Result
|
||||
|
||||
if req.ETag == nil || *req.ETag == "" {
|
||||
result, err = p.db.ExecContext(ctx, "DELETE FROM state WHERE key = $1", req.Key)
|
||||
result, err = db.ExecContext(parentCtx, "DELETE FROM state WHERE key = $1", req.Key)
|
||||
} else {
|
||||
// Convert req.ETag to uint32 for postgres XID compatibility
|
||||
var etag64 uint64
|
||||
|
|
@ -280,7 +369,7 @@ func (p *postgresDBAccess) Delete(ctx context.Context, req *state.DeleteRequest)
|
|||
}
|
||||
etag := uint32(etag64)
|
||||
|
||||
result, err = p.db.ExecContext(ctx, "DELETE FROM state WHERE key = $1 and xmin = $2", req.Key, etag)
|
||||
result, err = db.ExecContext(parentCtx, "DELETE FROM state WHERE key = $1 AND xmin = $2", req.Key, etag)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
|
|
@ -299,92 +388,88 @@ func (p *postgresDBAccess) Delete(ctx context.Context, req *state.DeleteRequest)
|
|||
return nil
|
||||
}
|
||||
|
||||
func (p *postgresDBAccess) BulkDelete(ctx context.Context, req []state.DeleteRequest) error {
|
||||
p.logger.Debug("Executing BulkDelete request")
|
||||
tx, err := p.db.Begin()
|
||||
func (p *PostgresDBAccess) BulkDelete(parentCtx context.Context, req []state.DeleteRequest) error {
|
||||
tx, err := p.db.BeginTx(parentCtx, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to begin transaction: %w", err)
|
||||
}
|
||||
defer tx.Rollback()
|
||||
|
||||
if len(req) > 0 {
|
||||
for i := range req {
|
||||
err = p.Delete(ctx, &req[i])
|
||||
err = p.doDelete(parentCtx, tx, &req[i])
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
err = tx.Commit()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to commit transaction: %w", err)
|
||||
}
|
||||
|
||||
return err
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *postgresDBAccess) ExecuteMulti(ctx context.Context, request *state.TransactionalStateRequest) error {
|
||||
p.logger.Debug("Executing PostgreSQL transaction")
|
||||
|
||||
tx, err := p.db.Begin()
|
||||
func (p *PostgresDBAccess) ExecuteMulti(parentCtx context.Context, request *state.TransactionalStateRequest) error {
|
||||
tx, err := p.db.BeginTx(parentCtx, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to begin transaction: %w", err)
|
||||
}
|
||||
defer tx.Rollback()
|
||||
|
||||
for _, o := range request.Operations {
|
||||
switch o.Operation {
|
||||
case state.Upsert:
|
||||
var setReq state.SetRequest
|
||||
|
||||
setReq, err = getSet(o)
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return err
|
||||
}
|
||||
|
||||
err = p.Set(ctx, &setReq)
|
||||
err = p.doSet(parentCtx, tx, &setReq)
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return err
|
||||
}
|
||||
|
||||
case state.Delete:
|
||||
var delReq state.DeleteRequest
|
||||
|
||||
delReq, err = getDelete(o)
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return err
|
||||
}
|
||||
|
||||
err = p.Delete(ctx, &delReq)
|
||||
err = p.doDelete(parentCtx, tx, &delReq)
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return err
|
||||
}
|
||||
|
||||
default:
|
||||
tx.Rollback()
|
||||
return fmt.Errorf("unsupported operation: %s", o.Operation)
|
||||
}
|
||||
}
|
||||
|
||||
err = tx.Commit()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to commit transaction: %w", err)
|
||||
}
|
||||
|
||||
return err
|
||||
return nil
|
||||
}
|
||||
|
||||
// Query executes a query against store.
|
||||
func (p *postgresDBAccess) Query(ctx context.Context, req *state.QueryRequest) (*state.QueryResponse, error) {
|
||||
p.logger.Debug("Getting query value from PostgreSQL")
|
||||
func (p *PostgresDBAccess) Query(parentCtx context.Context, req *state.QueryRequest) (*state.QueryResponse, error) {
|
||||
q := &Query{
|
||||
query: "",
|
||||
params: []interface{}{},
|
||||
tableName: p.tableName,
|
||||
params: []any{},
|
||||
tableName: p.metadata.TableName,
|
||||
}
|
||||
qbuilder := query.NewQueryBuilder(q)
|
||||
if err := qbuilder.BuildQuery(&req.Query); err != nil {
|
||||
return &state.QueryResponse{}, err
|
||||
}
|
||||
data, token, err := q.execute(ctx, p.logger, p.db)
|
||||
data, token, err := q.execute(parentCtx, p.logger, p.db)
|
||||
if err != nil {
|
||||
return &state.QueryResponse{}, err
|
||||
}
|
||||
|
|
@ -395,8 +480,94 @@ func (p *postgresDBAccess) Query(ctx context.Context, req *state.QueryRequest) (
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (p *PostgresDBAccess) ScheduleCleanupExpiredData(ctx context.Context) {
|
||||
if p.cleanupInterval == nil {
|
||||
return
|
||||
}
|
||||
|
||||
p.logger.Infof("Schedule expired data clean up every %d seconds", int(p.cleanupInterval.Seconds()))
|
||||
|
||||
go func() {
|
||||
ticker := time.NewTicker(*p.cleanupInterval)
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
err := p.CleanupExpired(ctx)
|
||||
if err != nil {
|
||||
p.logger.Errorf("Error removing expired data: %v", err)
|
||||
}
|
||||
case <-ctx.Done():
|
||||
p.logger.Debug("Stopped background cleanup of expired data")
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (p *PostgresDBAccess) CleanupExpired(ctx context.Context) error {
|
||||
// Check if the last iteration was too recent
|
||||
// This performs an atomic operation, so allows coordination with other daprd processes too
|
||||
canContinue, err := p.UpdateLastCleanup(ctx, p.db, *p.cleanupInterval)
|
||||
if err != nil {
|
||||
// Log errors only
|
||||
p.logger.Warnf("Failed to read last cleanup time from database: %v", err)
|
||||
}
|
||||
if !canContinue {
|
||||
p.logger.Debug("Last cleanup was performed too recently")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Note we're not using the transaction here as we don't want this to be rolled back half-way or to lock the table unnecessarily
|
||||
// Need to use fmt.Sprintf because we can't parametrize a table name
|
||||
// Note we are not setting a timeout here as this query can take a "long" time, especially if there's no index on expiredate
|
||||
//nolint:gosec
|
||||
stmt := fmt.Sprintf(`DELETE FROM %s WHERE expiredate IS NOT NULL AND expiredate < CURRENT_TIMESTAMP`, p.metadata.TableName)
|
||||
res, err := p.db.ExecContext(ctx, stmt)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to execute query: %w", err)
|
||||
}
|
||||
|
||||
cleaned, err := res.RowsAffected()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to count affected rows: %w", err)
|
||||
}
|
||||
|
||||
p.logger.Infof("Removed %d expired rows", cleaned)
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateLastCleanup sets the 'last-cleanup' value only if it's less than cleanupInterval.
|
||||
// Returns true if the row was updated, which means that the cleanup can proceed.
|
||||
func (p *PostgresDBAccess) UpdateLastCleanup(ctx context.Context, db dbquerier, cleanupInterval time.Duration) (bool, error) {
|
||||
queryCtx, cancel := context.WithTimeout(ctx, 30*time.Second)
|
||||
res, err := db.ExecContext(queryCtx,
|
||||
fmt.Sprintf(`INSERT INTO %[1]s (key, value)
|
||||
VALUES ('last-cleanup', CURRENT_TIMESTAMP)
|
||||
ON CONFLICT (key)
|
||||
DO UPDATE SET value = CURRENT_TIMESTAMP
|
||||
WHERE (EXTRACT('epoch' FROM CURRENT_TIMESTAMP - %[1]s.value::timestamp with time zone) * 1000)::bigint > $1`,
|
||||
p.metadata.MetadataTableName),
|
||||
cleanupInterval.Milliseconds()-100, // Subtract 100ms for some buffer
|
||||
)
|
||||
cancel()
|
||||
if err != nil {
|
||||
return true, fmt.Errorf("failed to execute query: %w", err)
|
||||
}
|
||||
|
||||
n, err := res.RowsAffected()
|
||||
if err != nil {
|
||||
return true, fmt.Errorf("failed to count affected rows: %w", err)
|
||||
}
|
||||
|
||||
return n > 0, nil
|
||||
}
|
||||
|
||||
// Close implements io.Close.
|
||||
func (p *postgresDBAccess) Close() error {
|
||||
func (p *PostgresDBAccess) Close() error {
|
||||
if p.cancel != nil {
|
||||
p.cancel()
|
||||
p.cancel = nil
|
||||
}
|
||||
if p.db != nil {
|
||||
return p.db.Close()
|
||||
}
|
||||
|
|
@ -404,34 +575,10 @@ func (p *postgresDBAccess) Close() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (p *postgresDBAccess) ensureStateTable(stateTableName string) error {
|
||||
exists, err := tableExists(p.db, stateTableName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !exists {
|
||||
p.logger.Info("Creating PostgreSQL state table")
|
||||
createTable := fmt.Sprintf(`CREATE TABLE %s (
|
||||
key text NOT NULL PRIMARY KEY,
|
||||
value jsonb NOT NULL,
|
||||
isbinary boolean NOT NULL,
|
||||
insertdate TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
|
||||
updatedate TIMESTAMP WITH TIME ZONE NULL);`, stateTableName)
|
||||
_, err = p.db.Exec(createTable)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func tableExists(db *sql.DB, tableName string) (bool, error) {
|
||||
exists := false
|
||||
err := db.QueryRow("SELECT EXISTS (SELECT FROM pg_tables where tablename = $1)", tableName).Scan(&exists)
|
||||
|
||||
return exists, err
|
||||
// GetCleanupInterval returns the cleanupInterval property.
|
||||
// This is primarily used for tests.
|
||||
func (p *PostgresDBAccess) GetCleanupInterval() *time.Duration {
|
||||
return p.cleanupInterval
|
||||
}
|
||||
|
||||
// Returns the set requests.
|
||||
|
|
|
|||
|
|
@ -18,18 +18,23 @@ import (
|
|||
"context"
|
||||
"database/sql"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/DATA-DOG/go-sqlmock"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/dapr/components-contrib/metadata"
|
||||
"github.com/dapr/components-contrib/state"
|
||||
"github.com/dapr/kit/logger"
|
||||
|
||||
// Blank import for pgx
|
||||
_ "github.com/jackc/pgx/v5/stdlib"
|
||||
)
|
||||
|
||||
type mocks struct {
|
||||
db *sql.DB
|
||||
mock sqlmock.Sqlmock
|
||||
pgDba *postgresDBAccess
|
||||
pgDba *PostgresDBAccess
|
||||
}
|
||||
|
||||
func TestGetSetWithWrongType(t *testing.T) {
|
||||
|
|
@ -451,7 +456,7 @@ func mockDatabase(t *testing.T) (*mocks, error) {
|
|||
t.Fatalf("an error '%s' was not expected when opening a stub database connection", err)
|
||||
}
|
||||
|
||||
dba := &postgresDBAccess{
|
||||
dba := &PostgresDBAccess{
|
||||
logger: logger,
|
||||
db: db,
|
||||
}
|
||||
|
|
@ -462,3 +467,95 @@ func mockDatabase(t *testing.T) (*mocks, error) {
|
|||
pgDba: dba,
|
||||
}, err
|
||||
}
|
||||
|
||||
func TestParseMetadata(t *testing.T) {
|
||||
t.Run("missing connection string", func(t *testing.T) {
|
||||
p := &PostgresDBAccess{}
|
||||
props := map[string]string{}
|
||||
|
||||
err := p.ParseMetadata(state.Metadata{Base: metadata.Base{Properties: props}})
|
||||
assert.Error(t, err)
|
||||
assert.ErrorIs(t, err, errMissingConnectionString)
|
||||
})
|
||||
|
||||
t.Run("has connection string", func(t *testing.T) {
|
||||
p := &PostgresDBAccess{}
|
||||
props := map[string]string{
|
||||
"connectionString": "foo",
|
||||
}
|
||||
|
||||
err := p.ParseMetadata(state.Metadata{Base: metadata.Base{Properties: props}})
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("default table name", func(t *testing.T) {
|
||||
p := &PostgresDBAccess{}
|
||||
props := map[string]string{
|
||||
"connectionString": "foo",
|
||||
}
|
||||
|
||||
err := p.ParseMetadata(state.Metadata{Base: metadata.Base{Properties: props}})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, p.metadata.TableName, defaultTableName)
|
||||
})
|
||||
|
||||
t.Run("custom table name", func(t *testing.T) {
|
||||
p := &PostgresDBAccess{}
|
||||
props := map[string]string{
|
||||
"connectionString": "foo",
|
||||
"tableName": "mytable",
|
||||
}
|
||||
|
||||
err := p.ParseMetadata(state.Metadata{Base: metadata.Base{Properties: props}})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, p.metadata.TableName, "mytable")
|
||||
})
|
||||
|
||||
t.Run("default cleanupIntervalInSeconds", func(t *testing.T) {
|
||||
p := &PostgresDBAccess{}
|
||||
props := map[string]string{
|
||||
"connectionString": "foo",
|
||||
}
|
||||
|
||||
err := p.ParseMetadata(state.Metadata{Base: metadata.Base{Properties: props}})
|
||||
assert.NoError(t, err)
|
||||
_ = assert.NotNil(t, p.cleanupInterval) &&
|
||||
assert.Equal(t, *p.cleanupInterval, defaultCleanupInternal*time.Second)
|
||||
})
|
||||
|
||||
t.Run("invalid cleanupIntervalInSeconds", func(t *testing.T) {
|
||||
p := &PostgresDBAccess{}
|
||||
props := map[string]string{
|
||||
"connectionString": "foo",
|
||||
"cleanupIntervalInSeconds": "NaN",
|
||||
}
|
||||
|
||||
err := p.ParseMetadata(state.Metadata{Base: metadata.Base{Properties: props}})
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("positive cleanupIntervalInSeconds", func(t *testing.T) {
|
||||
p := &PostgresDBAccess{}
|
||||
props := map[string]string{
|
||||
"connectionString": "foo",
|
||||
"cleanupIntervalInSeconds": "42",
|
||||
}
|
||||
|
||||
err := p.ParseMetadata(state.Metadata{Base: metadata.Base{Properties: props}})
|
||||
assert.NoError(t, err)
|
||||
_ = assert.NotNil(t, p.cleanupInterval) &&
|
||||
assert.Equal(t, *p.cleanupInterval, 42*time.Second)
|
||||
})
|
||||
|
||||
t.Run("zero cleanupIntervalInSeconds", func(t *testing.T) {
|
||||
p := &PostgresDBAccess{}
|
||||
props := map[string]string{
|
||||
"connectionString": "foo",
|
||||
"cleanupIntervalInSeconds": "0",
|
||||
}
|
||||
|
||||
err := p.ParseMetadata(state.Metadata{Base: metadata.Base{Properties: props}})
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, p.cleanupInterval)
|
||||
})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -24,7 +24,6 @@ import (
|
|||
|
||||
// PostgreSQL state store.
|
||||
type PostgreSQL struct {
|
||||
features []state.Feature
|
||||
logger logger.Logger
|
||||
dbaccess dbAccess
|
||||
}
|
||||
|
|
@ -40,7 +39,6 @@ func NewPostgreSQLStateStore(logger logger.Logger) state.Store {
|
|||
// This unexported constructor allows injecting a dbAccess instance for unit testing.
|
||||
func newPostgreSQLStateStore(logger logger.Logger, dba dbAccess) *PostgreSQL {
|
||||
return &PostgreSQL{
|
||||
features: []state.Feature{state.FeatureETag, state.FeatureTransactional, state.FeatureQueryAPI},
|
||||
logger: logger,
|
||||
dbaccess: dba,
|
||||
}
|
||||
|
|
@ -53,7 +51,7 @@ func (p *PostgreSQL) Init(metadata state.Metadata) error {
|
|||
|
||||
// Features returns the features available in this state store.
|
||||
func (p *PostgreSQL) Features() []state.Feature {
|
||||
return p.features
|
||||
return []state.Feature{state.FeatureETag, state.FeatureTransactional, state.FeatureQueryAPI}
|
||||
}
|
||||
|
||||
// Delete removes an entity from the store.
|
||||
|
|
@ -102,10 +100,15 @@ func (p *PostgreSQL) Close() error {
|
|||
if p.dbaccess != nil {
|
||||
return p.dbaccess.Close()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Returns the dbaccess property.
|
||||
// This method is used in tests.
|
||||
func (p *PostgreSQL) GetDBAccess() dbAccess {
|
||||
return p.dbaccess
|
||||
}
|
||||
|
||||
func (p *PostgreSQL) GetComponentMetadata() map[string]string {
|
||||
metadataStruct := postgresMetadataStruct{}
|
||||
metadataInfo := map[string]string{}
|
||||
|
|
|
|||
|
|
@ -49,7 +49,7 @@ func TestPostgreSQLIntegration(t *testing.T) {
|
|||
})
|
||||
|
||||
metadata := state.Metadata{
|
||||
Base: metadata.Base{Properties: map[string]string{connectionStringKey: connectionString}},
|
||||
Base: metadata.Base{Properties: map[string]string{"connectionString": connectionString}},
|
||||
}
|
||||
|
||||
pgs := NewPostgreSQLStateStore(logger.NewLogger("test")).(*PostgreSQL)
|
||||
|
|
@ -62,11 +62,6 @@ func TestPostgreSQLIntegration(t *testing.T) {
|
|||
t.Fatal(error)
|
||||
}
|
||||
|
||||
t.Run("Create table succeeds", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
testCreateTable(t, pgs.dbaccess.(*postgresDBAccess))
|
||||
})
|
||||
|
||||
t.Run("Get Set Delete one item", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
setGetUpdateDeleteOneItem(t, pgs)
|
||||
|
|
@ -161,33 +156,6 @@ func setGetUpdateDeleteOneItem(t *testing.T, pgs *PostgreSQL) {
|
|||
deleteItem(t, pgs, key, getResponse.ETag)
|
||||
}
|
||||
|
||||
// testCreateTable tests the ability to create the state table.
|
||||
func testCreateTable(t *testing.T, dba *postgresDBAccess) {
|
||||
tableName := "test_state"
|
||||
|
||||
// Drop the table if it already exists
|
||||
exists, err := tableExists(dba.db, tableName)
|
||||
assert.Nil(t, err)
|
||||
if exists {
|
||||
dropTable(t, dba.db, tableName)
|
||||
}
|
||||
|
||||
// Create the state table and test for its existence
|
||||
err = dba.ensureStateTable(tableName)
|
||||
assert.Nil(t, err)
|
||||
exists, err = tableExists(dba.db, tableName)
|
||||
assert.Nil(t, err)
|
||||
assert.True(t, exists)
|
||||
|
||||
// Drop the state table
|
||||
dropTable(t, dba.db, tableName)
|
||||
}
|
||||
|
||||
func dropTable(t *testing.T, db *sql.DB, tableName string) {
|
||||
_, err := db.Exec(fmt.Sprintf("DROP TABLE %s", tableName))
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
func deleteItemThatDoesNotExist(t *testing.T, pgs *PostgreSQL) {
|
||||
// Delete the item with a key not in the store
|
||||
deleteReq := &state.DeleteRequest{
|
||||
|
|
@ -477,7 +445,7 @@ func testInitConfiguration(t *testing.T) {
|
|||
tests := []struct {
|
||||
name string
|
||||
props map[string]string
|
||||
expectedErr string
|
||||
expectedErr error
|
||||
}{
|
||||
{
|
||||
name: "Empty",
|
||||
|
|
@ -486,8 +454,8 @@ func testInitConfiguration(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "Valid connection string",
|
||||
props: map[string]string{connectionStringKey: getConnectionString()},
|
||||
expectedErr: "",
|
||||
props: map[string]string{"connectionString": getConnectionString()},
|
||||
expectedErr: nil,
|
||||
},
|
||||
}
|
||||
|
||||
|
|
@ -501,11 +469,11 @@ func testInitConfiguration(t *testing.T) {
|
|||
}
|
||||
|
||||
err := p.Init(metadata)
|
||||
if tt.expectedErr == "" {
|
||||
assert.Nil(t, err)
|
||||
if tt.expectedErr == nil {
|
||||
assert.NoError(t, err)
|
||||
} else {
|
||||
assert.NotNil(t, err)
|
||||
assert.Equal(t, err.Error(), tt.expectedErr)
|
||||
assert.Error(t, err)
|
||||
assert.ErrorIs(t, err, tt.expectedErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -107,7 +107,7 @@ func createPostgreSQL(t *testing.T) *PostgreSQL {
|
|||
assert.NotNil(t, pgs)
|
||||
|
||||
metadata := &state.Metadata{
|
||||
Base: metadata.Base{Properties: map[string]string{connectionStringKey: fakeConnectionString}},
|
||||
Base: metadata.Base{Properties: map[string]string{"connectionString": fakeConnectionString}},
|
||||
}
|
||||
|
||||
err := pgs.Init(*metadata)
|
||||
|
|
|
|||
|
|
@ -20,7 +20,6 @@ import (
|
|||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/go-redis/redis/v8"
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
|
||||
"github.com/dapr/components-contrib/contenttype"
|
||||
|
|
@ -92,7 +91,7 @@ const (
|
|||
// StateStore is a Redis state store.
|
||||
type StateStore struct {
|
||||
state.DefaultBulkStore
|
||||
client redis.UniversalClient
|
||||
client rediscomponent.RedisClient
|
||||
clientSettings *rediscomponent.Settings
|
||||
json jsoniter.API
|
||||
metadata rediscomponent.Metadata
|
||||
|
|
@ -119,7 +118,7 @@ func NewRedisStateStore(logger logger.Logger) state.Store {
|
|||
}
|
||||
|
||||
func (r *StateStore) Ping() error {
|
||||
if _, err := r.client.Ping(context.Background()).Result(); err != nil {
|
||||
if _, err := r.client.PingResult(context.Background()); err != nil {
|
||||
return fmt.Errorf("redis store: error connecting to redis at %s: %s", r.clientSettings.Host, err)
|
||||
}
|
||||
|
||||
|
|
@ -147,7 +146,7 @@ func (r *StateStore) Init(metadata state.Metadata) error {
|
|||
|
||||
r.ctx, r.cancel = context.WithCancel(context.Background())
|
||||
|
||||
if _, err = r.client.Ping(r.ctx).Result(); err != nil {
|
||||
if _, err = r.client.PingResult(r.ctx); err != nil {
|
||||
return fmt.Errorf("redis store: error connecting to redis at %s: %v", r.clientSettings.Host, err)
|
||||
}
|
||||
|
||||
|
|
@ -168,7 +167,7 @@ func (r *StateStore) Features() []state.Feature {
|
|||
}
|
||||
|
||||
func (r *StateStore) getConnectedSlaves() (int, error) {
|
||||
res, err := r.client.Do(r.ctx, "INFO", "replication").Result()
|
||||
res, err := r.client.DoRead(r.ctx, "INFO", "replication")
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
|
@ -209,12 +208,12 @@ func (r *StateStore) Delete(ctx context.Context, req *state.DeleteRequest) error
|
|||
}
|
||||
|
||||
var delQuery string
|
||||
if contentType, ok := req.Metadata[daprmetadata.ContentType]; ok && contentType == contenttype.JSONContentType {
|
||||
if contentType, ok := req.Metadata[daprmetadata.ContentType]; ok && contentType == contenttype.JSONContentType && rediscomponent.ClientHasJSONSupport(r.client) {
|
||||
delQuery = delJSONQuery
|
||||
} else {
|
||||
delQuery = delDefaultQuery
|
||||
}
|
||||
_, err = r.client.Do(ctx, "EVAL", delQuery, 1, req.Key, *req.ETag).Result()
|
||||
err = r.client.DoWrite(ctx, "EVAL", delQuery, 1, req.Key, *req.ETag)
|
||||
if err != nil {
|
||||
return state.NewETagError(state.ETagMismatch, err)
|
||||
}
|
||||
|
|
@ -222,9 +221,8 @@ func (r *StateStore) Delete(ctx context.Context, req *state.DeleteRequest) error
|
|||
return nil
|
||||
}
|
||||
|
||||
// Delete performs a delete operation.
|
||||
func (r *StateStore) directGet(ctx context.Context, req *state.GetRequest) (*state.GetResponse, error) {
|
||||
res, err := r.client.Do(r.ctx, "GET", req.Key).Result()
|
||||
res, err := r.client.DoRead(ctx, "GET", req.Key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -241,14 +239,23 @@ func (r *StateStore) directGet(ctx context.Context, req *state.GetRequest) (*sta
|
|||
}
|
||||
|
||||
func (r *StateStore) getDefault(ctx context.Context, req *state.GetRequest) (*state.GetResponse, error) {
|
||||
res, err := r.client.Do(ctx, "HGETALL", req.Key).Result() // Prefer values with ETags
|
||||
res, err := r.client.DoRead(ctx, "HGETALL", req.Key) // Prefer values with ETags
|
||||
if err != nil {
|
||||
return r.directGet(ctx, req) // Falls back to original get for backward compats.
|
||||
}
|
||||
if res == nil {
|
||||
return &state.GetResponse{}, nil
|
||||
}
|
||||
vals := res.([]interface{})
|
||||
vals, ok := res.([]interface{})
|
||||
if !ok {
|
||||
// we retrieved a JSON value from a non-JSON store
|
||||
valMap := res.(map[interface{}]interface{})
|
||||
// convert valMap to []interface{}
|
||||
vals = make([]interface{}, 0, len(valMap))
|
||||
for k, v := range valMap {
|
||||
vals = append(vals, k, v)
|
||||
}
|
||||
}
|
||||
if len(vals) == 0 {
|
||||
return &state.GetResponse{}, nil
|
||||
}
|
||||
|
|
@ -265,7 +272,7 @@ func (r *StateStore) getDefault(ctx context.Context, req *state.GetRequest) (*st
|
|||
}
|
||||
|
||||
func (r *StateStore) getJSON(req *state.GetRequest) (*state.GetResponse, error) {
|
||||
res, err := r.client.Do(r.ctx, "JSON.GET", req.Key).Result()
|
||||
res, err := r.client.DoRead(r.ctx, "JSON.GET", req.Key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -303,7 +310,7 @@ func (r *StateStore) getJSON(req *state.GetRequest) (*state.GetResponse, error)
|
|||
|
||||
// Get retrieves state from redis with a key.
|
||||
func (r *StateStore) Get(ctx context.Context, req *state.GetRequest) (*state.GetResponse, error) {
|
||||
if contentType, ok := req.Metadata[daprmetadata.ContentType]; ok && contentType == contenttype.JSONContentType {
|
||||
if contentType, ok := req.Metadata[daprmetadata.ContentType]; ok && contentType == contenttype.JSONContentType && rediscomponent.ClientHasJSONSupport(r.client) {
|
||||
return r.getJSON(req)
|
||||
}
|
||||
|
||||
|
|
@ -341,7 +348,7 @@ func (r *StateStore) Set(ctx context.Context, req *state.SetRequest) error {
|
|||
|
||||
var bt []byte
|
||||
var setQuery string
|
||||
if contentType, ok := req.Metadata[daprmetadata.ContentType]; ok && contentType == contenttype.JSONContentType {
|
||||
if contentType, ok := req.Metadata[daprmetadata.ContentType]; ok && contentType == contenttype.JSONContentType && rediscomponent.ClientHasJSONSupport(r.client) {
|
||||
setQuery = setJSONQuery
|
||||
bt, _ = utils.Marshal(&jsonEntry{Data: req.Value}, r.json.Marshal)
|
||||
} else {
|
||||
|
|
@ -349,7 +356,7 @@ func (r *StateStore) Set(ctx context.Context, req *state.SetRequest) error {
|
|||
bt, _ = utils.Marshal(req.Value, r.json.Marshal)
|
||||
}
|
||||
|
||||
err = r.client.Do(ctx, "EVAL", setQuery, 1, req.Key, ver, bt, firstWrite).Err()
|
||||
err = r.client.DoWrite(ctx, "EVAL", setQuery, 1, req.Key, ver, bt, firstWrite)
|
||||
if err != nil {
|
||||
if req.ETag != nil {
|
||||
return state.NewETagError(state.ETagMismatch, err)
|
||||
|
|
@ -359,21 +366,21 @@ func (r *StateStore) Set(ctx context.Context, req *state.SetRequest) error {
|
|||
}
|
||||
|
||||
if ttl != nil && *ttl > 0 {
|
||||
_, err = r.client.Do(ctx, "EXPIRE", req.Key, *ttl).Result()
|
||||
err = r.client.DoWrite(ctx, "EXPIRE", req.Key, *ttl)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to set key %s ttl: %s", req.Key, err)
|
||||
}
|
||||
}
|
||||
|
||||
if ttl != nil && *ttl <= 0 {
|
||||
_, err = r.client.Do(ctx, "PERSIST", req.Key).Result()
|
||||
err = r.client.DoWrite(ctx, "PERSIST", req.Key)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to persist key %s: %s", req.Key, err)
|
||||
}
|
||||
}
|
||||
|
||||
if req.Options.Consistency == state.Strong && r.replicas > 0 {
|
||||
_, err = r.client.Do(ctx, "WAIT", r.replicas, 1000).Result()
|
||||
err = r.client.DoWrite(ctx, "WAIT", r.replicas, 1000)
|
||||
if err != nil {
|
||||
return fmt.Errorf("redis waiting for %v replicas to acknowledge write, err: %s", r.replicas, err.Error())
|
||||
}
|
||||
|
|
@ -386,7 +393,7 @@ func (r *StateStore) Set(ctx context.Context, req *state.SetRequest) error {
|
|||
func (r *StateStore) Multi(ctx context.Context, request *state.TransactionalStateRequest) error {
|
||||
var setQuery, delQuery string
|
||||
var isJSON bool
|
||||
if contentType, ok := request.Metadata[daprmetadata.ContentType]; ok && contentType == contenttype.JSONContentType {
|
||||
if contentType, ok := request.Metadata[daprmetadata.ContentType]; ok && contentType == contenttype.JSONContentType && rediscomponent.ClientHasJSONSupport(r.client) {
|
||||
isJSON = true
|
||||
setQuery = setJSONQuery
|
||||
delQuery = delJSONQuery
|
||||
|
|
@ -434,7 +441,7 @@ func (r *StateStore) Multi(ctx context.Context, request *state.TransactionalStat
|
|||
}
|
||||
}
|
||||
|
||||
_, err := pipe.Exec(ctx)
|
||||
err := pipe.Exec(ctx)
|
||||
|
||||
return err
|
||||
}
|
||||
|
|
@ -442,15 +449,15 @@ func (r *StateStore) Multi(ctx context.Context, request *state.TransactionalStat
|
|||
func (r *StateStore) registerSchemas() error {
|
||||
for name, elem := range r.querySchemas {
|
||||
r.logger.Infof("redis: create query index %s", name)
|
||||
if err := r.client.Do(r.ctx, elem.schema...).Err(); err != nil {
|
||||
if err := r.client.DoWrite(r.ctx, elem.schema...); err != nil {
|
||||
if err.Error() != "Index already exists" {
|
||||
return err
|
||||
}
|
||||
r.logger.Infof("redis: drop stale query index %s", name)
|
||||
if err = r.client.Do(r.ctx, "FT.DROPINDEX", name).Err(); err != nil {
|
||||
if err = r.client.DoWrite(r.ctx, "FT.DROPINDEX", name); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = r.client.Do(r.ctx, elem.schema...).Err(); err != nil {
|
||||
if err = r.client.DoWrite(r.ctx, elem.schema...); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
|
@ -509,6 +516,9 @@ func (r *StateStore) parseTTL(req *state.SetRequest) (*int, error) {
|
|||
|
||||
// Query executes a query against store.
|
||||
func (r *StateStore) Query(ctx context.Context, req *state.QueryRequest) (*state.QueryResponse, error) {
|
||||
if !rediscomponent.ClientHasJSONSupport(r.client) {
|
||||
return nil, fmt.Errorf("redis-json server support is required for query capability")
|
||||
}
|
||||
indexName, ok := daprmetadata.TryGetQueryIndexName(req.Metadata)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("query index not found")
|
||||
|
|
|
|||
|
|
@ -20,10 +20,9 @@ import (
|
|||
"strconv"
|
||||
"strings"
|
||||
|
||||
rediscomponent "github.com/dapr/components-contrib/internal/component/redis"
|
||||
"github.com/dapr/components-contrib/state"
|
||||
"github.com/dapr/components-contrib/state/query"
|
||||
|
||||
"github.com/go-redis/redis/v8"
|
||||
)
|
||||
|
||||
var ErrMultipleSortBy error = errors.New("multiple SORTBY steps are not allowed. Sort multiple fields in a single step")
|
||||
|
|
@ -190,9 +189,9 @@ func (q *Query) Finalize(filters string, qq *query.Query) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (q *Query) execute(ctx context.Context, client redis.UniversalClient) ([]state.QueryItem, string, error) {
|
||||
func (q *Query) execute(ctx context.Context, client rediscomponent.RedisClient) ([]state.QueryItem, string, error) {
|
||||
query := append(append([]interface{}{"FT.SEARCH", q.schemaName}, q.query...), "RETURN", "2", "$.data", "$.version")
|
||||
ret, err := client.Do(ctx, query...).Result()
|
||||
ret, err := client.DoRead(ctx, query...)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -239,7 +239,7 @@ func TestTransactionalUpsert(t *testing.T) {
|
|||
})
|
||||
assert.Equal(t, nil, err)
|
||||
|
||||
res, err := c.Do(context.Background(), "HGETALL", "weapon").Result()
|
||||
res, err := c.DoRead(context.Background(), "HGETALL", "weapon")
|
||||
assert.Equal(t, nil, err)
|
||||
|
||||
vals := res.([]interface{})
|
||||
|
|
@ -248,15 +248,15 @@ func TestTransactionalUpsert(t *testing.T) {
|
|||
assert.Equal(t, ptr.Of("1"), version)
|
||||
assert.Equal(t, `"deathstar"`, data)
|
||||
|
||||
res, err = c.Do(context.Background(), "TTL", "weapon").Result()
|
||||
res, err = c.DoRead(context.Background(), "TTL", "weapon")
|
||||
assert.Equal(t, nil, err)
|
||||
assert.Equal(t, int64(-1), res)
|
||||
|
||||
res, err = c.Do(context.Background(), "TTL", "weapon2").Result()
|
||||
res, err = c.DoRead(context.Background(), "TTL", "weapon2")
|
||||
assert.Equal(t, nil, err)
|
||||
assert.Equal(t, int64(123), res)
|
||||
|
||||
res, err = c.Do(context.Background(), "TTL", "weapon3").Result()
|
||||
res, err = c.DoRead(context.Background(), "TTL", "weapon3")
|
||||
assert.Equal(t, nil, err)
|
||||
assert.Equal(t, int64(-1), res)
|
||||
}
|
||||
|
|
@ -290,7 +290,7 @@ func TestTransactionalDelete(t *testing.T) {
|
|||
})
|
||||
assert.Equal(t, nil, err)
|
||||
|
||||
res, err := c.Do(context.Background(), "HGETALL", "weapon").Result()
|
||||
res, err := c.DoRead(context.Background(), "HGETALL", "weapon")
|
||||
assert.Equal(t, nil, err)
|
||||
|
||||
vals := res.([]interface{})
|
||||
|
|
@ -335,7 +335,7 @@ func TestRequestsWithGlobalTTL(t *testing.T) {
|
|||
Key: "weapon100",
|
||||
Value: "deathstar100",
|
||||
})
|
||||
ttl, _ := ss.client.TTL(ss.ctx, "weapon100").Result()
|
||||
ttl, _ := ss.client.TTLResult(ss.ctx, "weapon100")
|
||||
|
||||
assert.Equal(t, time.Duration(globalTTLInSeconds)*time.Second, ttl)
|
||||
})
|
||||
|
|
@ -349,7 +349,7 @@ func TestRequestsWithGlobalTTL(t *testing.T) {
|
|||
"ttlInSeconds": strconv.Itoa(requestTTL),
|
||||
},
|
||||
})
|
||||
ttl, _ := ss.client.TTL(ss.ctx, "weapon100").Result()
|
||||
ttl, _ := ss.client.TTLResult(ss.ctx, "weapon100")
|
||||
|
||||
assert.Equal(t, time.Duration(requestTTL)*time.Second, ttl)
|
||||
})
|
||||
|
|
@ -388,7 +388,7 @@ func TestRequestsWithGlobalTTL(t *testing.T) {
|
|||
})
|
||||
assert.Equal(t, nil, err)
|
||||
|
||||
res, err := c.Do(context.Background(), "HGETALL", "weapon").Result()
|
||||
res, err := c.DoRead(context.Background(), "HGETALL", "weapon")
|
||||
assert.Equal(t, nil, err)
|
||||
|
||||
vals := res.([]interface{})
|
||||
|
|
@ -397,15 +397,15 @@ func TestRequestsWithGlobalTTL(t *testing.T) {
|
|||
assert.Equal(t, ptr.Of("1"), version)
|
||||
assert.Equal(t, `"deathstar"`, data)
|
||||
|
||||
res, err = c.Do(context.Background(), "TTL", "weapon").Result()
|
||||
res, err = c.DoRead(context.Background(), "TTL", "weapon")
|
||||
assert.Equal(t, nil, err)
|
||||
assert.Equal(t, int64(globalTTLInSeconds), res)
|
||||
|
||||
res, err = c.Do(context.Background(), "TTL", "weapon2").Result()
|
||||
res, err = c.DoRead(context.Background(), "TTL", "weapon2")
|
||||
assert.Equal(t, nil, err)
|
||||
assert.Equal(t, int64(123), res)
|
||||
|
||||
res, err = c.Do(context.Background(), "TTL", "weapon3").Result()
|
||||
res, err = c.DoRead(context.Background(), "TTL", "weapon3")
|
||||
assert.Equal(t, nil, err)
|
||||
assert.Equal(t, int64(-1), res)
|
||||
})
|
||||
|
|
@ -432,7 +432,7 @@ func TestSetRequestWithTTL(t *testing.T) {
|
|||
},
|
||||
})
|
||||
|
||||
ttl, _ := ss.client.TTL(ss.ctx, "weapon100").Result()
|
||||
ttl, _ := ss.client.TTLResult(ss.ctx, "weapon100")
|
||||
|
||||
assert.Equal(t, time.Duration(ttlInSeconds)*time.Second, ttl)
|
||||
})
|
||||
|
|
@ -443,7 +443,7 @@ func TestSetRequestWithTTL(t *testing.T) {
|
|||
Value: "deathstar200",
|
||||
})
|
||||
|
||||
ttl, _ := ss.client.TTL(ss.ctx, "weapon200").Result()
|
||||
ttl, _ := ss.client.TTLResult(ss.ctx, "weapon200")
|
||||
|
||||
assert.Equal(t, time.Duration(-1), ttl)
|
||||
})
|
||||
|
|
@ -453,7 +453,7 @@ func TestSetRequestWithTTL(t *testing.T) {
|
|||
Key: "weapon300",
|
||||
Value: "deathstar300",
|
||||
})
|
||||
ttl, _ := ss.client.TTL(ss.ctx, "weapon300").Result()
|
||||
ttl, _ := ss.client.TTLResult(ss.ctx, "weapon300")
|
||||
assert.Equal(t, time.Duration(-1), ttl)
|
||||
|
||||
// make the key no longer persistent
|
||||
|
|
@ -465,7 +465,7 @@ func TestSetRequestWithTTL(t *testing.T) {
|
|||
"ttlInSeconds": strconv.Itoa(ttlInSeconds),
|
||||
},
|
||||
})
|
||||
ttl, _ = ss.client.TTL(ss.ctx, "weapon300").Result()
|
||||
ttl, _ = ss.client.TTLResult(ss.ctx, "weapon300")
|
||||
assert.Equal(t, time.Duration(ttlInSeconds)*time.Second, ttl)
|
||||
|
||||
// make the key persistent again
|
||||
|
|
@ -476,7 +476,7 @@ func TestSetRequestWithTTL(t *testing.T) {
|
|||
"ttlInSeconds": strconv.Itoa(-1),
|
||||
},
|
||||
})
|
||||
ttl, _ = ss.client.TTL(ss.ctx, "weapon300").Result()
|
||||
ttl, _ = ss.client.TTLResult(ss.ctx, "weapon300")
|
||||
assert.Equal(t, time.Duration(-1), ttl)
|
||||
})
|
||||
}
|
||||
|
|
@ -508,7 +508,7 @@ func TestTransactionalDeleteNoEtag(t *testing.T) {
|
|||
})
|
||||
assert.Equal(t, nil, err)
|
||||
|
||||
res, err := c.Do(context.Background(), "HGETALL", "weapon100").Result()
|
||||
res, err := c.DoRead(context.Background(), "HGETALL", "weapon100")
|
||||
assert.Equal(t, nil, err)
|
||||
|
||||
vals := res.([]interface{})
|
||||
|
|
@ -532,7 +532,7 @@ func TestGetMetadata(t *testing.T) {
|
|||
assert.Equal(t, metadataInfo["idleCheckFrequency"], "redis.Duration")
|
||||
}
|
||||
|
||||
func setupMiniredis() (*miniredis.Miniredis, *redis.Client) {
|
||||
func setupMiniredis() (*miniredis.Miniredis, rediscomponent.RedisClient) {
|
||||
s, err := miniredis.Run()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
|
|
@ -542,5 +542,5 @@ func setupMiniredis() (*miniredis.Miniredis, *redis.Client) {
|
|||
DB: defaultDB,
|
||||
}
|
||||
|
||||
return s, redis.NewClient(opts)
|
||||
return s, rediscomponent.ClientFromV8Client(redis.NewClient(opts))
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,40 @@
|
|||
/*
|
||||
Copyright 2021 The Dapr Authors
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package utils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// Key used for "ttlInSeconds" in metadata.
|
||||
const MetadataTTLKey = "ttlInSeconds"
|
||||
|
||||
// ParseTTL parses the "ttlInSeconds" metadata property.
|
||||
func ParseTTL(requestMetadata map[string]string) (*int, error) {
|
||||
val, found := requestMetadata[MetadataTTLKey]
|
||||
if found && val != "" {
|
||||
parsedVal, err := strconv.ParseInt(val, 10, 0)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("incorrect value for metadata '%s': %w", MetadataTTLKey, err)
|
||||
}
|
||||
if parsedVal < -1 || parsedVal > math.MaxInt32 {
|
||||
return nil, fmt.Errorf("incorrect value for metadata '%s': must be -1 or greater", MetadataTTLKey)
|
||||
}
|
||||
i := int(parsedVal)
|
||||
return &i, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
|
@ -0,0 +1,74 @@
|
|||
/*
|
||||
Copyright 2021 The Dapr Authors
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package utils
|
||||
|
||||
import (
|
||||
"math"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestParseTTL(t *testing.T) {
|
||||
t.Run("TTL Not an integer", func(t *testing.T) {
|
||||
ttlInSeconds := "not an integer"
|
||||
ttl, err := ParseTTL(map[string]string{
|
||||
MetadataTTLKey: ttlInSeconds,
|
||||
})
|
||||
require.Error(t, err)
|
||||
assert.Nil(t, ttl)
|
||||
})
|
||||
|
||||
t.Run("TTL specified with wrong key", func(t *testing.T) {
|
||||
ttlInSeconds := 12345
|
||||
ttl, err := ParseTTL(map[string]string{
|
||||
"expirationTime": strconv.Itoa(ttlInSeconds),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
assert.Nil(t, ttl)
|
||||
})
|
||||
|
||||
t.Run("TTL is a number", func(t *testing.T) {
|
||||
ttlInSeconds := 12345
|
||||
ttl, err := ParseTTL(map[string]string{
|
||||
MetadataTTLKey: strconv.Itoa(ttlInSeconds),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, *ttl, ttlInSeconds)
|
||||
})
|
||||
|
||||
t.Run("TTL not set", func(t *testing.T) {
|
||||
ttl, err := ParseTTL(map[string]string{})
|
||||
require.NoError(t, err)
|
||||
assert.Nil(t, ttl)
|
||||
})
|
||||
|
||||
t.Run("TTL < -1", func(t *testing.T) {
|
||||
ttl, err := ParseTTL(map[string]string{
|
||||
MetadataTTLKey: "-3",
|
||||
})
|
||||
require.Error(t, err)
|
||||
assert.Nil(t, ttl)
|
||||
})
|
||||
|
||||
t.Run("TTL bigger than 32-bit", func(t *testing.T) {
|
||||
ttl, err := ParseTTL(map[string]string{
|
||||
MetadataTTLKey: strconv.FormatInt(math.MaxInt32+1, 10),
|
||||
})
|
||||
require.Error(t, err)
|
||||
assert.Nil(t, ttl)
|
||||
})
|
||||
}
|
||||
|
|
@ -1,98 +0,0 @@
|
|||
apiVersion: dapr.io/v1alpha1
|
||||
kind: Component
|
||||
metadata:
|
||||
name: messagebus
|
||||
spec:
|
||||
type: bindings.kafka
|
||||
version: v1
|
||||
metadata:
|
||||
- name: topics # Input binding topic
|
||||
value: neworder
|
||||
- name: publishTopic # Outpub binding topic
|
||||
value: neworder
|
||||
- name: consumeRetryEnabled # enable consumer retry
|
||||
value: true
|
||||
- name: brokers
|
||||
value: localhost:19094,localhost:29094,localhost:39094
|
||||
- name: consumerGroup
|
||||
value: kafkaCertification2
|
||||
- name: initialOffset
|
||||
value: oldest
|
||||
- name: backOffDuration
|
||||
value: 50ms
|
||||
- name: authType
|
||||
value: mtls
|
||||
- name: caCert
|
||||
value: |
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDJjCCAg6gAwIBAgIUJPqvjfNx6kMf7mE5FtW81+X8HekwDQYJKoZIhvcNAQEL
|
||||
BQAwKzESMBAGA1UECxMJRGFwciBUZXN0MRUwEwYDVQQDEwxEYXByIFRlc3QgQ0Ew
|
||||
HhcNMjExMjA0MTYyNzAwWhcNMjYxMjAzMTYyNzAwWjArMRIwEAYDVQQLEwlEYXBy
|
||||
IFRlc3QxFTATBgNVBAMTDERhcHIgVGVzdCBDQTCCASIwDQYJKoZIhvcNAQEBBQAD
|
||||
ggEPADCCAQoCggEBAMPLpsfCUdYf+7RAY7mktcj4/qJJyNroHxS8ChwSeJ0M/dLk
|
||||
I6G4kyty3TGvzmrdxkr2DW2B+ZmrZFzSVQg+kNESMhEWLJt4MtyGMNuDZcwV5kJL
|
||||
NPltLYmov2z8hyD2v6agZNyiWM0k2p/dl+Ikp4DJmd08PSd+nhc5Wj9X33gsEAoK
|
||||
jKptl+XGGvSlC3tIbHmBhRsP42QlLjqk5PWxINbMDePHOiYFmau3VRrbPweKTFuF
|
||||
bY0Y0w8t1qOFX55hU7LkMEXjLmuUfFUEZvn3NUTvH80gKDioiJTC7NBRE6sCYAlm
|
||||
b4Vvix3p9Y/yNKbMA5J3chaZdTZfVqAXplZY3jMCAwEAAaNCMEAwDgYDVR0PAQH/
|
||||
BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFH6X+clU0D49SZ9ezWRg
|
||||
lsF83glvMA0GCSqGSIb3DQEBCwUAA4IBAQAjeaFNxIhWZkDYiwsVP5R2JqFifZbq
|
||||
A/m9YJypRwA+rUeBLFGuIh4QPFf2fZlskJYmFaDB3aplQGoSIzB1HCC0OAhJM5Ec
|
||||
z6gm+bhqDfCaWz1HfmpvvQes1l/mUzYx5GfiX202W87CMKMQ+5WSg1IsCPFwYN2w
|
||||
nZkGKYkh9D9TzIFMfi2b1G+O+BuUUyOAXvT8zcJ17GexRHHdc1Gq+1PgDPDL1Sug
|
||||
rLHmo+dDTZhIV5D14wvxsNHTTr5tt0aaQw3fJqo6P2HE2dBiqadSYnlwS7BQ9Jxc
|
||||
MlmFggFubM9/QGQ/hGQYmTp+LSlM5ndaVA80o7+SOQZ2aliuH0fQN3ST
|
||||
-----END CERTIFICATE-----
|
||||
- name: clientCert
|
||||
value: |
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDpTCCAo2gAwIBAgIUTAjabskCLxIqbh2E4MnYIsivipswDQYJKoZIhvcNAQEL
|
||||
BQAwKzESMBAGA1UECxMJRGFwciBUZXN0MRUwEwYDVQQDEwxEYXByIFRlc3QgQ0Ew
|
||||
HhcNMjExMjA0MTkwMjAwWhcNMjIxMjA0MTkwMjAwWjAjMRIwEAYDVQQKEwlEYXBy
|
||||
IFRlc3QxDTALBgNVBAMTBGRhcHIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
|
||||
AoIBAQC5rlhpdzY2RuRRRKevotZnLUx/dh2wLvCMluSxKFJYvC7DXK3cHZh1+6Wo
|
||||
cdlsEYY3ZQ7Pt/N8DkV7ODqSvFyhJu+1fCY3elMfZcxSw24UJ2aXzlx5RbNhLAI0
|
||||
E804ugAp3qss4ygCwQ4U2jMGXqeVpi7gyGsYybEUOMSorx5OBgiJAKkaATNMBqdp
|
||||
MX2FKzBU3owpAcuXhIGSdKblYQuZJmAfITnaJFO4ffLyn9m4I9n/dDfZag/TCZBL
|
||||
27uIo79mZO99YfhMfdrifH3FkvE/14/JUPhwHAChoCbDol0/V/KDv0tp3vQbQH+7
|
||||
1dyrAWhszswSXQGgADYm8y74dlQpAgMBAAGjgcgwgcUwDgYDVR0PAQH/BAQDAgWg
|
||||
MB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAMBgNVHRMBAf8EAjAAMB0G
|
||||
A1UdDgQWBBQ4eToXZz4AH4YbuW23vy99T8d8OTAfBgNVHSMEGDAWgBR+l/nJVNA+
|
||||
PUmfXs1kYJbBfN4JbzBGBgNVHREEPzA9ggRkYXBygglsb2NhbGhvc3SCB2thZmth
|
||||
LTGCB2thZmthLTKCB2thZmthLTOCD2thZmFrLWJvb3RzdHJhcDANBgkqhkiG9w0B
|
||||
AQsFAAOCAQEAAapIJIdQhGF2qz/N4i/nIwJHGxUapgtVrydC8kw7DeuQi2usG62Y
|
||||
hGNnBAoJCR0auSQ2P3SWEO19o1doZjFroqFkNIXdTT+aHxLg0k89H203oeMSI43x
|
||||
xTlmJCjBNw4zQD9jC1c6u/W6WBwN2SJGBZrdmA95KQrz+gan9nh6ecPYeGF89io2
|
||||
G20dRE2cGwbt7LAImK87M8LXbw/Of28gYMh3L14CNy6oma3izMix9xhUhDVACnVy
|
||||
TaltjNIiAlFP2g4GIsPSYTMAOeIzIU/LxKlxg8mLg1bTPwb5IZK1wFwPBY5rnNqx
|
||||
OrycW7rZKfrg2eZml8FnYlzO64u41oC47A==
|
||||
-----END CERTIFICATE-----
|
||||
- name: clientKey
|
||||
value: |
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEogIBAAKCAQEAua5YaXc2NkbkUUSnr6LWZy1Mf3YdsC7wjJbksShSWLwuw1yt
|
||||
3B2YdfulqHHZbBGGN2UOz7fzfA5Fezg6krxcoSbvtXwmN3pTH2XMUsNuFCdml85c
|
||||
eUWzYSwCNBPNOLoAKd6rLOMoAsEOFNozBl6nlaYu4MhrGMmxFDjEqK8eTgYIiQCp
|
||||
GgEzTAanaTF9hSswVN6MKQHLl4SBknSm5WELmSZgHyE52iRTuH3y8p/ZuCPZ/3Q3
|
||||
2WoP0wmQS9u7iKO/ZmTvfWH4TH3a4nx9xZLxP9ePyVD4cBwAoaAmw6JdP1fyg79L
|
||||
ad70G0B/u9XcqwFobM7MEl0BoAA2JvMu+HZUKQIDAQABAoIBACZz2JNewLdUzwuV
|
||||
cDSLQGN1mhX7XAKUdfRne0zE0OjXb8e9dbPT3TLxvki36xLaPjVSlFKoAaB7RCBU
|
||||
cKzanUQyUAoBf9iVWIl0B3BMUIuT7Uca0UO8D33cI0itoR5SRp5lIoXVNP/9AvGG
|
||||
jnKPP51aIPMkDim/+w/5AaD9QwVdGC2BWNn8bFykz/DfIB0tiVTec8/pWaP7vHGM
|
||||
FriQbL07Yrj3BE0ndp5cL52ZbH9OmQ/hXUHCk6vCuV/yrqljeLPGbEYkpmhm/fMO
|
||||
Fa3pX6wR+QgZ5lta870jK52bexyoGWgsMcTTl8+7q4DYM2YREEKicAlbOh92bdm4
|
||||
tnjIiVECgYEA1btWqCtxWat5tzXeYAowYs/uia/ANbmg+SGqIeVqGn4EyLIBYnmZ
|
||||
jexfWliLj7Nk802fbNIO9sStMt6q7vvRbYR2ZHFPU0Th9m/XVPdJKJ9qpMkSWdY3
|
||||
P7VlQuYHSZvU1ny/QtDc8dGoaxluiaJsIBde0UUcwOo/tA66OnP2n7cCgYEA3mbf
|
||||
hz6W+ThofDPyJN5kFTnx4g+uNA8hnqyJeh9xcnh1t/A5BH4faZBPhokoskahUWis
|
||||
yI4v6e552CHkF9jo6k397xUb/W/HO0BlKhapf8prdrG4zSE5pr140eTdr10h95SD
|
||||
Wr4twfEaBNsSXRnaMxAMaVbPKfLuW0+N1Qbk6x8CgYA8EZnKS+Ngk0vzDOXB0jtF
|
||||
GjFtawK3VsOCIU8ClcqbRX2stjKjbY+VjrBB4Q7gRUgDBXbgC61+90nCOUiLQCTd
|
||||
BdSMaDgmK/7h1w8K5zEdhKhhRc2tiAIhGqcqBSJZMr2/xnGuoqrmH8mYyB4D+q0u
|
||||
28KfSDBLm8ppnZYDZaITwwKBgDv76xYDH50gRa4aJJklEkFXW5HpQMbxvdOaHYo+
|
||||
qM6DBt0RgY9gpQBH1+slW0CaJDBc1x1QnEOv+lT87xQvgMKRPogZXW9Bkq68c4yi
|
||||
iBzbb5iX3owVBgOe3tNdsxz1NZAdEkCLQrQoXygoHg/WRS+4iGBw9XcO+pLOJibq
|
||||
sRtpAoGARUL0cfedOtIgGOQTNzfHqQZsRbLEKx64FI6Q8g1womr7lWWXy6RX4BZv
|
||||
vm41g/PkdiES9ZfaNihRHcEhaNuA26OhiCbXe/FRcyZRX9TeCkuyQgNn9nssPIgR
|
||||
edWdnN8kZKQ7ReZwMlw2UpXenAwlVoQQbHw9zpkcD2Exmp/TLAk=
|
||||
-----END RSA PRIVATE KEY-----
|
||||
|
|
@ -1,53 +0,0 @@
|
|||
apiVersion: dapr.io/v1alpha1
|
||||
kind: Component
|
||||
metadata:
|
||||
name: messagebus
|
||||
spec:
|
||||
type: bindings.kafka
|
||||
version: v1
|
||||
metadata:
|
||||
- name: topics # Input binding topic
|
||||
value: neworder
|
||||
- name: publishTopic # Outpub binding topic
|
||||
value: neworder
|
||||
- name: consumeRetryEnabled # enable consumer retry
|
||||
value: true
|
||||
- name: brokers
|
||||
value: localhost:19093,localhost:29093,localhost:39093
|
||||
- name: consumerGroup
|
||||
value: kafkaCertification2
|
||||
- name: authType
|
||||
value: "oidc"
|
||||
- name: initialOffset
|
||||
value: oldest
|
||||
- name: backOffDuration
|
||||
value: 50ms
|
||||
- name: oidcTokenEndpoint
|
||||
value: https://localhost:4443/oauth2/token
|
||||
- name: oidcClientID
|
||||
value: "dapr"
|
||||
- name: oidcClientSecret
|
||||
value: "dapr-test"
|
||||
- name: oidcScopes
|
||||
value: openid,kafka
|
||||
- name: caCert
|
||||
value: |
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDJjCCAg6gAwIBAgIUJPqvjfNx6kMf7mE5FtW81+X8HekwDQYJKoZIhvcNAQEL
|
||||
BQAwKzESMBAGA1UECxMJRGFwciBUZXN0MRUwEwYDVQQDEwxEYXByIFRlc3QgQ0Ew
|
||||
HhcNMjExMjA0MTYyNzAwWhcNMjYxMjAzMTYyNzAwWjArMRIwEAYDVQQLEwlEYXBy
|
||||
IFRlc3QxFTATBgNVBAMTDERhcHIgVGVzdCBDQTCCASIwDQYJKoZIhvcNAQEBBQAD
|
||||
ggEPADCCAQoCggEBAMPLpsfCUdYf+7RAY7mktcj4/qJJyNroHxS8ChwSeJ0M/dLk
|
||||
I6G4kyty3TGvzmrdxkr2DW2B+ZmrZFzSVQg+kNESMhEWLJt4MtyGMNuDZcwV5kJL
|
||||
NPltLYmov2z8hyD2v6agZNyiWM0k2p/dl+Ikp4DJmd08PSd+nhc5Wj9X33gsEAoK
|
||||
jKptl+XGGvSlC3tIbHmBhRsP42QlLjqk5PWxINbMDePHOiYFmau3VRrbPweKTFuF
|
||||
bY0Y0w8t1qOFX55hU7LkMEXjLmuUfFUEZvn3NUTvH80gKDioiJTC7NBRE6sCYAlm
|
||||
b4Vvix3p9Y/yNKbMA5J3chaZdTZfVqAXplZY3jMCAwEAAaNCMEAwDgYDVR0PAQH/
|
||||
BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFH6X+clU0D49SZ9ezWRg
|
||||
lsF83glvMA0GCSqGSIb3DQEBCwUAA4IBAQAjeaFNxIhWZkDYiwsVP5R2JqFifZbq
|
||||
A/m9YJypRwA+rUeBLFGuIh4QPFf2fZlskJYmFaDB3aplQGoSIzB1HCC0OAhJM5Ec
|
||||
z6gm+bhqDfCaWz1HfmpvvQes1l/mUzYx5GfiX202W87CMKMQ+5WSg1IsCPFwYN2w
|
||||
nZkGKYkh9D9TzIFMfi2b1G+O+BuUUyOAXvT8zcJ17GexRHHdc1Gq+1PgDPDL1Sug
|
||||
rLHmo+dDTZhIV5D14wvxsNHTTr5tt0aaQw3fJqo6P2HE2dBiqadSYnlwS7BQ9Jxc
|
||||
MlmFggFubM9/QGQ/hGQYmTp+LSlM5ndaVA80o7+SOQZ2aliuH0fQN3ST
|
||||
-----END CERTIFICATE-----
|
||||
|
|
@ -1,24 +1,26 @@
|
|||
apiVersion: dapr.io/v1alpha1
|
||||
kind: Component
|
||||
metadata:
|
||||
name: messagebus
|
||||
spec:
|
||||
type: bindings.kafka
|
||||
version: v1
|
||||
metadata:
|
||||
- name: topics # Input binding topic
|
||||
value: neworder
|
||||
- name: publishTopic # Outpub binding topic
|
||||
value: neworder
|
||||
- name: brokers
|
||||
value: localhost:19092,localhost:29092,localhost:39092
|
||||
- name: consumerGroup
|
||||
value: kafkaCertification1
|
||||
- name: authType
|
||||
value: "none"
|
||||
- name: initialOffset
|
||||
value: oldest
|
||||
- name: backOffDuration
|
||||
value: 50ms
|
||||
- name: backOffDuration
|
||||
value: 50ms
|
||||
apiVersion: dapr.io/v1alpha1
|
||||
kind: Component
|
||||
metadata:
|
||||
name: messagebus
|
||||
spec:
|
||||
type: bindings.kafka
|
||||
version: v1
|
||||
metadata:
|
||||
- name: topics # Input binding topic
|
||||
value: neworder
|
||||
- name: publishTopic # Outpub binding topic
|
||||
value: neworder
|
||||
- name: consumeRetryEnabled # enable consumer retry
|
||||
value: true
|
||||
- name: brokers
|
||||
value: localhost:19092,localhost:29092,localhost:39092
|
||||
- name: consumerGroup
|
||||
value: kafkaCertification1
|
||||
- name: authType
|
||||
value: "none"
|
||||
- name: initialOffset
|
||||
value: oldest
|
||||
- name: backOffDuration
|
||||
value: 50ms
|
||||
- name: backOffDuration
|
||||
value: 50ms
|
||||
|
|
|
|||
|
|
@ -1,26 +1,26 @@
|
|||
apiVersion: dapr.io/v1alpha1
|
||||
kind: Component
|
||||
metadata:
|
||||
name: messagebus
|
||||
spec:
|
||||
type: bindings.kafka
|
||||
version: v1
|
||||
metadata:
|
||||
- name: topics # Input binding topic
|
||||
value: neworder
|
||||
- name: publishTopic # Outpub binding topic
|
||||
value: neworder
|
||||
- name: consumeRetryEnabled # enable consumer retry
|
||||
value: true
|
||||
- name: brokers
|
||||
value: localhost:19092,localhost:29092,localhost:39092
|
||||
- name: consumerGroup
|
||||
value: kafkaCertification1
|
||||
- name: authType
|
||||
value: "none"
|
||||
- name: initialOffset
|
||||
value: oldest
|
||||
- name: backOffDuration
|
||||
value: 50ms
|
||||
- name: backOffDuration
|
||||
value: 50ms
|
||||
apiVersion: dapr.io/v1alpha1
|
||||
kind: Component
|
||||
metadata:
|
||||
name: messagebus
|
||||
spec:
|
||||
type: bindings.kafka
|
||||
version: v1
|
||||
metadata:
|
||||
- name: topics # Input binding topic
|
||||
value: neworder
|
||||
- name: publishTopic # Outpub binding topic
|
||||
value: neworder
|
||||
- name: consumeRetryEnabled # enable consumer retry
|
||||
value: true
|
||||
- name: brokers
|
||||
value: localhost:19092,localhost:29092,localhost:39092
|
||||
- name: consumerGroup
|
||||
value: kafkaCertification2
|
||||
- name: authType
|
||||
value: "none"
|
||||
- name: initialOffset
|
||||
value: oldest
|
||||
- name: backOffDuration
|
||||
value: 50ms
|
||||
- name: backOffDuration
|
||||
value: 50ms
|
||||
|
|
@ -1,96 +0,0 @@
|
|||
apiVersion: dapr.io/v1alpha1
|
||||
kind: Component
|
||||
metadata:
|
||||
name: messagebus
|
||||
spec:
|
||||
type: bindings.kafka
|
||||
version: v1
|
||||
metadata:
|
||||
- name: topics # Input binding topic
|
||||
value: neworder
|
||||
- name: publishTopic # Outpub binding topic
|
||||
value: neworder
|
||||
- name: brokers
|
||||
value: localhost:19094,localhost:29094,localhost:39094
|
||||
- name: consumerGroup
|
||||
value: kafkaCertification2
|
||||
- name: initialOffset
|
||||
value: oldest
|
||||
- name: backOffDuration
|
||||
value: 50ms
|
||||
- name: authType
|
||||
value: mtls
|
||||
- name: caCert
|
||||
value: |
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDJjCCAg6gAwIBAgIUJPqvjfNx6kMf7mE5FtW81+X8HekwDQYJKoZIhvcNAQEL
|
||||
BQAwKzESMBAGA1UECxMJRGFwciBUZXN0MRUwEwYDVQQDEwxEYXByIFRlc3QgQ0Ew
|
||||
HhcNMjExMjA0MTYyNzAwWhcNMjYxMjAzMTYyNzAwWjArMRIwEAYDVQQLEwlEYXBy
|
||||
IFRlc3QxFTATBgNVBAMTDERhcHIgVGVzdCBDQTCCASIwDQYJKoZIhvcNAQEBBQAD
|
||||
ggEPADCCAQoCggEBAMPLpsfCUdYf+7RAY7mktcj4/qJJyNroHxS8ChwSeJ0M/dLk
|
||||
I6G4kyty3TGvzmrdxkr2DW2B+ZmrZFzSVQg+kNESMhEWLJt4MtyGMNuDZcwV5kJL
|
||||
NPltLYmov2z8hyD2v6agZNyiWM0k2p/dl+Ikp4DJmd08PSd+nhc5Wj9X33gsEAoK
|
||||
jKptl+XGGvSlC3tIbHmBhRsP42QlLjqk5PWxINbMDePHOiYFmau3VRrbPweKTFuF
|
||||
bY0Y0w8t1qOFX55hU7LkMEXjLmuUfFUEZvn3NUTvH80gKDioiJTC7NBRE6sCYAlm
|
||||
b4Vvix3p9Y/yNKbMA5J3chaZdTZfVqAXplZY3jMCAwEAAaNCMEAwDgYDVR0PAQH/
|
||||
BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFH6X+clU0D49SZ9ezWRg
|
||||
lsF83glvMA0GCSqGSIb3DQEBCwUAA4IBAQAjeaFNxIhWZkDYiwsVP5R2JqFifZbq
|
||||
A/m9YJypRwA+rUeBLFGuIh4QPFf2fZlskJYmFaDB3aplQGoSIzB1HCC0OAhJM5Ec
|
||||
z6gm+bhqDfCaWz1HfmpvvQes1l/mUzYx5GfiX202W87CMKMQ+5WSg1IsCPFwYN2w
|
||||
nZkGKYkh9D9TzIFMfi2b1G+O+BuUUyOAXvT8zcJ17GexRHHdc1Gq+1PgDPDL1Sug
|
||||
rLHmo+dDTZhIV5D14wvxsNHTTr5tt0aaQw3fJqo6P2HE2dBiqadSYnlwS7BQ9Jxc
|
||||
MlmFggFubM9/QGQ/hGQYmTp+LSlM5ndaVA80o7+SOQZ2aliuH0fQN3ST
|
||||
-----END CERTIFICATE-----
|
||||
- name: clientCert
|
||||
value: |
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDpTCCAo2gAwIBAgIUTAjabskCLxIqbh2E4MnYIsivipswDQYJKoZIhvcNAQEL
|
||||
BQAwKzESMBAGA1UECxMJRGFwciBUZXN0MRUwEwYDVQQDEwxEYXByIFRlc3QgQ0Ew
|
||||
HhcNMjExMjA0MTkwMjAwWhcNMjIxMjA0MTkwMjAwWjAjMRIwEAYDVQQKEwlEYXBy
|
||||
IFRlc3QxDTALBgNVBAMTBGRhcHIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
|
||||
AoIBAQC5rlhpdzY2RuRRRKevotZnLUx/dh2wLvCMluSxKFJYvC7DXK3cHZh1+6Wo
|
||||
cdlsEYY3ZQ7Pt/N8DkV7ODqSvFyhJu+1fCY3elMfZcxSw24UJ2aXzlx5RbNhLAI0
|
||||
E804ugAp3qss4ygCwQ4U2jMGXqeVpi7gyGsYybEUOMSorx5OBgiJAKkaATNMBqdp
|
||||
MX2FKzBU3owpAcuXhIGSdKblYQuZJmAfITnaJFO4ffLyn9m4I9n/dDfZag/TCZBL
|
||||
27uIo79mZO99YfhMfdrifH3FkvE/14/JUPhwHAChoCbDol0/V/KDv0tp3vQbQH+7
|
||||
1dyrAWhszswSXQGgADYm8y74dlQpAgMBAAGjgcgwgcUwDgYDVR0PAQH/BAQDAgWg
|
||||
MB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAMBgNVHRMBAf8EAjAAMB0G
|
||||
A1UdDgQWBBQ4eToXZz4AH4YbuW23vy99T8d8OTAfBgNVHSMEGDAWgBR+l/nJVNA+
|
||||
PUmfXs1kYJbBfN4JbzBGBgNVHREEPzA9ggRkYXBygglsb2NhbGhvc3SCB2thZmth
|
||||
LTGCB2thZmthLTKCB2thZmthLTOCD2thZmFrLWJvb3RzdHJhcDANBgkqhkiG9w0B
|
||||
AQsFAAOCAQEAAapIJIdQhGF2qz/N4i/nIwJHGxUapgtVrydC8kw7DeuQi2usG62Y
|
||||
hGNnBAoJCR0auSQ2P3SWEO19o1doZjFroqFkNIXdTT+aHxLg0k89H203oeMSI43x
|
||||
xTlmJCjBNw4zQD9jC1c6u/W6WBwN2SJGBZrdmA95KQrz+gan9nh6ecPYeGF89io2
|
||||
G20dRE2cGwbt7LAImK87M8LXbw/Of28gYMh3L14CNy6oma3izMix9xhUhDVACnVy
|
||||
TaltjNIiAlFP2g4GIsPSYTMAOeIzIU/LxKlxg8mLg1bTPwb5IZK1wFwPBY5rnNqx
|
||||
OrycW7rZKfrg2eZml8FnYlzO64u41oC47A==
|
||||
-----END CERTIFICATE-----
|
||||
- name: clientKey
|
||||
value: |
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEogIBAAKCAQEAua5YaXc2NkbkUUSnr6LWZy1Mf3YdsC7wjJbksShSWLwuw1yt
|
||||
3B2YdfulqHHZbBGGN2UOz7fzfA5Fezg6krxcoSbvtXwmN3pTH2XMUsNuFCdml85c
|
||||
eUWzYSwCNBPNOLoAKd6rLOMoAsEOFNozBl6nlaYu4MhrGMmxFDjEqK8eTgYIiQCp
|
||||
GgEzTAanaTF9hSswVN6MKQHLl4SBknSm5WELmSZgHyE52iRTuH3y8p/ZuCPZ/3Q3
|
||||
2WoP0wmQS9u7iKO/ZmTvfWH4TH3a4nx9xZLxP9ePyVD4cBwAoaAmw6JdP1fyg79L
|
||||
ad70G0B/u9XcqwFobM7MEl0BoAA2JvMu+HZUKQIDAQABAoIBACZz2JNewLdUzwuV
|
||||
cDSLQGN1mhX7XAKUdfRne0zE0OjXb8e9dbPT3TLxvki36xLaPjVSlFKoAaB7RCBU
|
||||
cKzanUQyUAoBf9iVWIl0B3BMUIuT7Uca0UO8D33cI0itoR5SRp5lIoXVNP/9AvGG
|
||||
jnKPP51aIPMkDim/+w/5AaD9QwVdGC2BWNn8bFykz/DfIB0tiVTec8/pWaP7vHGM
|
||||
FriQbL07Yrj3BE0ndp5cL52ZbH9OmQ/hXUHCk6vCuV/yrqljeLPGbEYkpmhm/fMO
|
||||
Fa3pX6wR+QgZ5lta870jK52bexyoGWgsMcTTl8+7q4DYM2YREEKicAlbOh92bdm4
|
||||
tnjIiVECgYEA1btWqCtxWat5tzXeYAowYs/uia/ANbmg+SGqIeVqGn4EyLIBYnmZ
|
||||
jexfWliLj7Nk802fbNIO9sStMt6q7vvRbYR2ZHFPU0Th9m/XVPdJKJ9qpMkSWdY3
|
||||
P7VlQuYHSZvU1ny/QtDc8dGoaxluiaJsIBde0UUcwOo/tA66OnP2n7cCgYEA3mbf
|
||||
hz6W+ThofDPyJN5kFTnx4g+uNA8hnqyJeh9xcnh1t/A5BH4faZBPhokoskahUWis
|
||||
yI4v6e552CHkF9jo6k397xUb/W/HO0BlKhapf8prdrG4zSE5pr140eTdr10h95SD
|
||||
Wr4twfEaBNsSXRnaMxAMaVbPKfLuW0+N1Qbk6x8CgYA8EZnKS+Ngk0vzDOXB0jtF
|
||||
GjFtawK3VsOCIU8ClcqbRX2stjKjbY+VjrBB4Q7gRUgDBXbgC61+90nCOUiLQCTd
|
||||
BdSMaDgmK/7h1w8K5zEdhKhhRc2tiAIhGqcqBSJZMr2/xnGuoqrmH8mYyB4D+q0u
|
||||
28KfSDBLm8ppnZYDZaITwwKBgDv76xYDH50gRa4aJJklEkFXW5HpQMbxvdOaHYo+
|
||||
qM6DBt0RgY9gpQBH1+slW0CaJDBc1x1QnEOv+lT87xQvgMKRPogZXW9Bkq68c4yi
|
||||
iBzbb5iX3owVBgOe3tNdsxz1NZAdEkCLQrQoXygoHg/WRS+4iGBw9XcO+pLOJibq
|
||||
sRtpAoGARUL0cfedOtIgGOQTNzfHqQZsRbLEKx64FI6Q8g1womr7lWWXy6RX4BZv
|
||||
vm41g/PkdiES9ZfaNihRHcEhaNuA26OhiCbXe/FRcyZRX9TeCkuyQgNn9nssPIgR
|
||||
edWdnN8kZKQ7ReZwMlw2UpXenAwlVoQQbHw9zpkcD2Exmp/TLAk=
|
||||
-----END RSA PRIVATE KEY-----
|
||||
|
|
@ -1,51 +0,0 @@
|
|||
apiVersion: dapr.io/v1alpha1
|
||||
kind: Component
|
||||
metadata:
|
||||
name: messagebus
|
||||
spec:
|
||||
type: bindings.kafka
|
||||
version: v1
|
||||
metadata:
|
||||
- name: topics # Input binding topic
|
||||
value: neworder
|
||||
- name: publishTopic # Outpub binding topic
|
||||
value: neworder
|
||||
- name: brokers
|
||||
value: localhost:19093,localhost:29093,localhost:39093
|
||||
- name: consumerGroup
|
||||
value: kafkaCertification2
|
||||
- name: authType
|
||||
value: "oidc"
|
||||
- name: initialOffset
|
||||
value: oldest
|
||||
- name: backOffDuration
|
||||
value: 50ms
|
||||
- name: oidcTokenEndpoint
|
||||
value: https://localhost:4443/oauth2/token
|
||||
- name: oidcClientID
|
||||
value: "dapr"
|
||||
- name: oidcClientSecret
|
||||
value: "dapr-test"
|
||||
- name: oidcScopes
|
||||
value: openid,kafka
|
||||
- name: caCert
|
||||
value: |
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDJjCCAg6gAwIBAgIUJPqvjfNx6kMf7mE5FtW81+X8HekwDQYJKoZIhvcNAQEL
|
||||
BQAwKzESMBAGA1UECxMJRGFwciBUZXN0MRUwEwYDVQQDEwxEYXByIFRlc3QgQ0Ew
|
||||
HhcNMjExMjA0MTYyNzAwWhcNMjYxMjAzMTYyNzAwWjArMRIwEAYDVQQLEwlEYXBy
|
||||
IFRlc3QxFTATBgNVBAMTDERhcHIgVGVzdCBDQTCCASIwDQYJKoZIhvcNAQEBBQAD
|
||||
ggEPADCCAQoCggEBAMPLpsfCUdYf+7RAY7mktcj4/qJJyNroHxS8ChwSeJ0M/dLk
|
||||
I6G4kyty3TGvzmrdxkr2DW2B+ZmrZFzSVQg+kNESMhEWLJt4MtyGMNuDZcwV5kJL
|
||||
NPltLYmov2z8hyD2v6agZNyiWM0k2p/dl+Ikp4DJmd08PSd+nhc5Wj9X33gsEAoK
|
||||
jKptl+XGGvSlC3tIbHmBhRsP42QlLjqk5PWxINbMDePHOiYFmau3VRrbPweKTFuF
|
||||
bY0Y0w8t1qOFX55hU7LkMEXjLmuUfFUEZvn3NUTvH80gKDioiJTC7NBRE6sCYAlm
|
||||
b4Vvix3p9Y/yNKbMA5J3chaZdTZfVqAXplZY3jMCAwEAAaNCMEAwDgYDVR0PAQH/
|
||||
BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFH6X+clU0D49SZ9ezWRg
|
||||
lsF83glvMA0GCSqGSIb3DQEBCwUAA4IBAQAjeaFNxIhWZkDYiwsVP5R2JqFifZbq
|
||||
A/m9YJypRwA+rUeBLFGuIh4QPFf2fZlskJYmFaDB3aplQGoSIzB1HCC0OAhJM5Ec
|
||||
z6gm+bhqDfCaWz1HfmpvvQes1l/mUzYx5GfiX202W87CMKMQ+5WSg1IsCPFwYN2w
|
||||
nZkGKYkh9D9TzIFMfi2b1G+O+BuUUyOAXvT8zcJ17GexRHHdc1Gq+1PgDPDL1Sug
|
||||
rLHmo+dDTZhIV5D14wvxsNHTTr5tt0aaQw3fJqo6P2HE2dBiqadSYnlwS7BQ9Jxc
|
||||
MlmFggFubM9/QGQ/hGQYmTp+LSlM5ndaVA80o7+SOQZ2aliuH0fQN3ST
|
||||
-----END CERTIFICATE-----
|
||||
|
|
@ -1,191 +1,68 @@
|
|||
version: "3.7"
|
||||
services:
|
||||
zookeeper:
|
||||
image: confluentinc/cp-zookeeper:5.4.0
|
||||
hostname: zookeeper
|
||||
container_name: zookeeper
|
||||
ports:
|
||||
- "2181:2181"
|
||||
environment:
|
||||
ZOOKEEPER_CLIENT_PORT: 2181
|
||||
ZOOKEEPER_TICK_TIME: 2000
|
||||
kafka1:
|
||||
image: quay.io/strimzi/kafka:0.26.0-kafka-3.0.0
|
||||
hostname: kafka-1
|
||||
container_name: kafka-1
|
||||
read_only: false
|
||||
entrypoint:
|
||||
/bin/bash -c "mkdir -p /var/opt/kafka && chown -R kafka:0 /var/lib/kafka/data /var/opt/kafka && su kafka -p -c '/opt/kafka/kafka_run.sh'"
|
||||
user: root
|
||||
depends_on:
|
||||
- zookeeper
|
||||
ports:
|
||||
- "19094:19094"
|
||||
- "19093:19093"
|
||||
- "19092:19092"
|
||||
volumes:
|
||||
- type: bind
|
||||
source: ./strimzi-ca-certs
|
||||
target: /opt/kafka/cluster-ca-certs
|
||||
read_only: true
|
||||
- type: bind
|
||||
source: ./strimzi-broker-certs
|
||||
target: /opt/kafka/broker-certs
|
||||
read_only: true
|
||||
- type: bind
|
||||
source: ./strimzi-client-ca
|
||||
target: /opt/kafka/client-ca-certs
|
||||
read_only: true
|
||||
- type: bind
|
||||
source: ./strimzi-listener-certs
|
||||
target: /opt/kafka/certificates/custom-mtls-9094-certs
|
||||
read_only: true
|
||||
- type: bind
|
||||
source: ./strimzi-listener-certs
|
||||
target: /opt/kafka/certificates/custom-oauth-9093-certs
|
||||
read_only: true
|
||||
- type: bind
|
||||
source: ./strimzi-listener-certs
|
||||
target: /opt/kafka/certificates/oauth-oauth-9093-certs
|
||||
read_only: true
|
||||
- type: bind
|
||||
source: ./strimzi-kafka1-config
|
||||
target: /opt/kafka/custom-config
|
||||
read_only: true
|
||||
- type: volume
|
||||
source: kafka1-data
|
||||
target: /var/lib/kafka/data
|
||||
environment:
|
||||
KAFKA_METRICS_ENABLED: "false"
|
||||
STRIMZI_KAFKA_GC_LOG_ENABLED: "false"
|
||||
KAFKA_HEAP_OPTS: "-Xms128M"
|
||||
|
||||
kafka2:
|
||||
image: quay.io/strimzi/kafka:0.26.0-kafka-3.0.0
|
||||
hostname: kafka-2
|
||||
container_name: kafka-2
|
||||
read_only: false
|
||||
entrypoint:
|
||||
/bin/bash -c "mkdir -p /var/opt/kafka && chown -R kafka:0 /var/lib/kafka/data /var/opt/kafka && su kafka -p -c '/opt/kafka/kafka_run.sh'"
|
||||
user: root
|
||||
depends_on:
|
||||
- zookeeper
|
||||
ports:
|
||||
- "29094:29094"
|
||||
- "29093:29093"
|
||||
- "29092:29092"
|
||||
volumes:
|
||||
- type: bind
|
||||
source: ./strimzi-ca-certs
|
||||
target: /opt/kafka/cluster-ca-certs
|
||||
read_only: true
|
||||
- type: bind
|
||||
source: ./strimzi-broker-certs
|
||||
target: /opt/kafka/broker-certs
|
||||
read_only: true
|
||||
- type: bind
|
||||
source: ./strimzi-client-ca
|
||||
target: /opt/kafka/client-ca-certs
|
||||
read_only: true
|
||||
- type: bind
|
||||
source: ./strimzi-listener-certs
|
||||
target: /opt/kafka/certificates/custom-mtls-9094-certs
|
||||
read_only: true
|
||||
- type: bind
|
||||
source: ./strimzi-listener-certs
|
||||
target: /opt/kafka/certificates/custom-oauth-9093-certs
|
||||
read_only: true
|
||||
- type: bind
|
||||
source: ./strimzi-listener-certs
|
||||
target: /opt/kafka/certificates/oauth-oauth-9093-certs
|
||||
read_only: true
|
||||
- type: bind
|
||||
source: ./strimzi-kafka2-config
|
||||
target: /opt/kafka/custom-config
|
||||
read_only: true
|
||||
- type: volume
|
||||
source: kafka2-data
|
||||
target: /var/lib/kafka/data
|
||||
environment:
|
||||
KAFKA_METRICS_ENABLED: "false"
|
||||
STRIMZI_KAFKA_GC_LOG_ENABLED: "false"
|
||||
KAFKA_HEAP_OPTS: "-Xms128M"
|
||||
|
||||
kafka3:
|
||||
image: quay.io/strimzi/kafka:0.26.0-kafka-3.0.0
|
||||
hostname: kafka-3
|
||||
container_name: kafka-3
|
||||
read_only: false
|
||||
entrypoint:
|
||||
/bin/bash -c "mkdir -p /var/opt/kafka && chown -R kafka:0 /var/lib/kafka/data /var/opt/kafka && su kafka -p -c '/opt/kafka/kafka_run.sh'"
|
||||
user: root
|
||||
depends_on:
|
||||
- zookeeper
|
||||
ports:
|
||||
- "39094:39094"
|
||||
- "39093:39093"
|
||||
- "39092:39092"
|
||||
volumes:
|
||||
- type: bind
|
||||
source: ./strimzi-ca-certs
|
||||
target: /opt/kafka/cluster-ca-certs
|
||||
read_only: true
|
||||
- type: bind
|
||||
source: ./strimzi-broker-certs
|
||||
target: /opt/kafka/broker-certs
|
||||
read_only: true
|
||||
- type: bind
|
||||
source: ./strimzi-client-ca
|
||||
target: /opt/kafka/client-ca-certs
|
||||
read_only: true
|
||||
- type: bind
|
||||
source: ./strimzi-listener-certs
|
||||
target: /opt/kafka/certificates/custom-mtls-9094-certs
|
||||
read_only: true
|
||||
- type: bind
|
||||
source: ./strimzi-listener-certs
|
||||
target: /opt/kafka/certificates/custom-oauth-9093-certs
|
||||
read_only: true
|
||||
- type: bind
|
||||
source: ./strimzi-listener-certs
|
||||
target: /opt/kafka/certificates/oauth-oauth-9093-certs
|
||||
read_only: true
|
||||
- type: bind
|
||||
source: ./strimzi-kafka3-config
|
||||
target: /opt/kafka/custom-config
|
||||
read_only: true
|
||||
- type: volume
|
||||
source: kafka3-data
|
||||
target: /var/lib/kafka/data
|
||||
environment:
|
||||
KAFKA_METRICS_ENABLED: "false"
|
||||
STRIMZI_KAFKA_GC_LOG_ENABLED: "false"
|
||||
KAFKA_HEAP_OPTS: "-Xms128M"
|
||||
hydra:
|
||||
image: oryd/hydra:v1.10.6-sqlite
|
||||
hostname: hydra
|
||||
container_name: hydra
|
||||
ports:
|
||||
- "4443:4443"
|
||||
- "4444:4444"
|
||||
read_only: false
|
||||
entrypoint: hydra serve all -c /config/config.yaml --sqa-opt-out
|
||||
volumes:
|
||||
- type: bind
|
||||
source: ./oauth-config
|
||||
target: /config
|
||||
read_only: true
|
||||
hydra-config:
|
||||
image: oryd/hydra:v1.10.6-sqlite
|
||||
hostname: hydra-config
|
||||
container_name: hydra-config
|
||||
depends_on:
|
||||
- hydra
|
||||
entrypoint: |
|
||||
/bin/sh -c "sleep 20;hydra clients create --skip-tls-verify -g client_credentials --id dapr -n dapr -r token -a openid,kafka --secret dapr-test; hydra clients create --skip-tls-verify -g client_credentials --id kafka -n kafka -r token -a openid --secret dapr-test"
|
||||
environment:
|
||||
HYDRA_ADMIN_URL: https://hydra:4444
|
||||
volumes:
|
||||
kafka1-data: {}
|
||||
kafka2-data: {}
|
||||
kafka3-data: {}
|
||||
version: "3.7"
|
||||
services:
|
||||
zookeeper:
|
||||
image: confluentinc/cp-zookeeper:7.3.0
|
||||
hostname: zookeeper
|
||||
container_name: zookeeper
|
||||
ports:
|
||||
- "2181:2181"
|
||||
environment:
|
||||
ZOOKEEPER_CLIENT_PORT: 2181
|
||||
ZOOKEEPER_TICK_TIME: 2000
|
||||
|
||||
kafka1:
|
||||
image: confluentinc/cp-server:7.3.0
|
||||
hostname: kafka1
|
||||
container_name: kafka1
|
||||
depends_on:
|
||||
- zookeeper
|
||||
ports:
|
||||
- "19092:19092"
|
||||
environment:
|
||||
KAFKA_BROKER_ID: 1
|
||||
KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181'
|
||||
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
|
||||
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka1:9092,PLAINTEXT_HOST://localhost:19092
|
||||
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 3
|
||||
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
|
||||
KAFKA_CONFLUENT_LICENSE_TOPIC_REPLICATION_FACTOR: 1
|
||||
KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'true'
|
||||
KAFKA_NUM_PARTITIONS: 10
|
||||
|
||||
kafka2:
|
||||
image: confluentinc/cp-server:7.3.0
|
||||
hostname: kafka2
|
||||
container_name: kafka2
|
||||
depends_on:
|
||||
- zookeeper
|
||||
ports:
|
||||
- "29092:29092"
|
||||
environment:
|
||||
KAFKA_BROKER_ID: 2
|
||||
KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181'
|
||||
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
|
||||
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka2:9092,PLAINTEXT_HOST://localhost:29092
|
||||
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 3
|
||||
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
|
||||
KAFKA_CONFLUENT_LICENSE_TOPIC_REPLICATION_FACTOR: 1
|
||||
KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'true'
|
||||
KAFKA_NUM_PARTITIONS: 10
|
||||
|
||||
kafka3:
|
||||
image: confluentinc/cp-server:7.3.0
|
||||
hostname: kafka3
|
||||
container_name: kafka3
|
||||
depends_on:
|
||||
- zookeeper
|
||||
ports:
|
||||
- "39092:39092"
|
||||
environment:
|
||||
KAFKA_BROKER_ID: 3
|
||||
KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181'
|
||||
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
|
||||
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka3:9092,PLAINTEXT_HOST://localhost:39092
|
||||
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 3
|
||||
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
|
||||
KAFKA_CONFLUENT_LICENSE_TOPIC_REPLICATION_FACTOR: 1
|
||||
KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'true'
|
||||
KAFKA_NUM_PARTITIONS: 10
|
||||
|
|
@ -1,339 +0,0 @@
|
|||
/*
|
||||
Copyright 2021 The Dapr Authors
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package kafka_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/Shopify/sarama"
|
||||
"github.com/cenkalti/backoff/v4"
|
||||
"github.com/google/uuid"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/multierr"
|
||||
|
||||
// Pub/Sub.
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
|
||||
// Dapr runtime and Go-SDK
|
||||
"github.com/dapr/dapr/pkg/runtime"
|
||||
dapr "github.com/dapr/go-sdk/client"
|
||||
"github.com/dapr/go-sdk/service/common"
|
||||
kit_retry "github.com/dapr/kit/retry"
|
||||
|
||||
// Certification testing runnables
|
||||
"github.com/dapr/components-contrib/tests/certification/embedded"
|
||||
"github.com/dapr/components-contrib/tests/certification/flow"
|
||||
"github.com/dapr/components-contrib/tests/certification/flow/app"
|
||||
"github.com/dapr/components-contrib/tests/certification/flow/dockercompose"
|
||||
"github.com/dapr/components-contrib/tests/certification/flow/network"
|
||||
"github.com/dapr/components-contrib/tests/certification/flow/retry"
|
||||
"github.com/dapr/components-contrib/tests/certification/flow/sidecar"
|
||||
"github.com/dapr/components-contrib/tests/certification/flow/simulate"
|
||||
"github.com/dapr/components-contrib/tests/certification/flow/watcher"
|
||||
)
|
||||
|
||||
func TestKafka_with_retry(t *testing.T) {
|
||||
// For Kafka, we should ensure messages are received in order.
|
||||
consumerGroup1 := watcher.NewOrdered()
|
||||
// This watcher is across multiple consumers in the same group
|
||||
// so exact ordering is not expected.
|
||||
consumerGroup2 := watcher.NewUnordered()
|
||||
|
||||
// Application logic that tracks messages from a topic.
|
||||
application := func(appName string, watcher *watcher.Watcher) app.SetupFn {
|
||||
return func(ctx flow.Context, s common.Service) error {
|
||||
// Simulate periodic errors.
|
||||
sim := simulate.PeriodicError(ctx, 100)
|
||||
|
||||
// Setup the /orders event handler.
|
||||
return multierr.Combine(
|
||||
s.AddBindingInvocationHandler(bindingName, func(_ context.Context, in *common.BindingEvent) (out []byte, err error) {
|
||||
if err := sim(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Track/Observe the data of the event.
|
||||
watcher.Observe(string(in.Data))
|
||||
ctx.Logf("======== %s received event: %s\n", appName, string(in.Data))
|
||||
return in.Data, nil
|
||||
}),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// Set the partition key on all messages so they
|
||||
// are written to the same partition.
|
||||
// This allows for checking of ordered messages.
|
||||
metadata := map[string]string{
|
||||
messageKey: "test",
|
||||
}
|
||||
|
||||
// Test logic that sends messages to a topic and
|
||||
// verifies the application has received them.
|
||||
sendRecvTest := func(metadata map[string]string, watchers ...*watcher.Watcher) flow.Runnable {
|
||||
_, hasKey := metadata[messageKey]
|
||||
return func(ctx flow.Context) error {
|
||||
client := sidecar.GetClient(ctx, sidecarName1)
|
||||
|
||||
// Declare what is expected BEFORE performing any steps
|
||||
// that will satisfy the test.
|
||||
msgs := make([]string, numMessages)
|
||||
for i := range msgs {
|
||||
msgs[i] = fmt.Sprintf("Hello, Messages %03d", i)
|
||||
}
|
||||
for _, m := range watchers {
|
||||
m.ExpectStrings(msgs...)
|
||||
}
|
||||
// If no key it provided, create a random one.
|
||||
// For Kafka, this will spread messages across
|
||||
// the topic's partitions.
|
||||
if !hasKey {
|
||||
metadata[messageKey] = uuid.NewString()
|
||||
}
|
||||
|
||||
// Send events that the application above will observe.
|
||||
ctx.Log("Sending messages!")
|
||||
for _, msg := range msgs {
|
||||
ctx.Logf("Sending: %q", msg)
|
||||
err := client.InvokeOutputBinding(ctx, &dapr.InvokeBindingRequest{
|
||||
Name: bindingName,
|
||||
Operation: string(bindings.CreateOperation),
|
||||
Data: []byte(msg),
|
||||
Metadata: metadata,
|
||||
})
|
||||
require.NoError(ctx, err, "error output binding message")
|
||||
}
|
||||
|
||||
// Do the messages we observed match what we expect?
|
||||
for _, m := range watchers {
|
||||
m.Assert(ctx, time.Minute)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// sendMessagesInBackground and assertMessages are
|
||||
// Runnables for testing publishing and consuming
|
||||
// messages reliably when infrastructure and network
|
||||
// interruptions occur.
|
||||
var task flow.AsyncTask
|
||||
sendMessagesInBackground := func(watchers ...*watcher.Watcher) flow.Runnable {
|
||||
return func(ctx flow.Context) error {
|
||||
client := sidecar.GetClient(ctx, sidecarName1)
|
||||
for _, m := range watchers {
|
||||
m.Reset()
|
||||
}
|
||||
|
||||
t := time.NewTicker(100 * time.Millisecond)
|
||||
defer t.Stop()
|
||||
|
||||
counter := 1
|
||||
for {
|
||||
select {
|
||||
case <-task.Done():
|
||||
return nil
|
||||
case <-t.C:
|
||||
msg := fmt.Sprintf("Background message - %03d", counter)
|
||||
for _, m := range watchers {
|
||||
m.Prepare(msg) // Track for observation
|
||||
}
|
||||
|
||||
// Publish with retries.
|
||||
bo := backoff.WithContext(backoff.NewConstantBackOff(time.Second), task)
|
||||
if err := kit_retry.NotifyRecover(func() error {
|
||||
return client.InvokeOutputBinding(ctx, &dapr.InvokeBindingRequest{
|
||||
Name: bindingName,
|
||||
Operation: string(bindings.CreateOperation),
|
||||
Data: []byte(msg),
|
||||
Metadata: metadata,
|
||||
})
|
||||
}, bo, func(err error, t time.Duration) {
|
||||
ctx.Logf("Error outpub binding message, retrying in %s", t)
|
||||
}, func() {}); err == nil {
|
||||
for _, m := range watchers {
|
||||
m.Add(msg) // Success
|
||||
}
|
||||
counter++
|
||||
} else {
|
||||
for _, m := range watchers {
|
||||
m.Remove(msg) // Remove from Tracking
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
assertMessages := func(messages ...*watcher.Watcher) flow.Runnable {
|
||||
return func(ctx flow.Context) error {
|
||||
// Signal sendMessagesInBackground to stop and wait for it to complete.
|
||||
task.CancelAndWait()
|
||||
for _, m := range messages {
|
||||
m.Assert(ctx, 5*time.Minute)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
flow.New(t, "kafka certification with retry").
|
||||
// Run Kafka using Docker Compose.
|
||||
Step(dockercompose.Run(clusterName, dockerComposeYAML)).
|
||||
Step("wait for broker sockets",
|
||||
network.WaitForAddresses(5*time.Minute, brokers...)).
|
||||
Step("wait", flow.Sleep(5*time.Second)).
|
||||
Step("wait for kafka readiness", retry.Do(10*time.Second, 30, func(ctx flow.Context) error {
|
||||
config := sarama.NewConfig()
|
||||
config.ClientID = "test-consumer"
|
||||
config.Consumer.Return.Errors = true
|
||||
|
||||
// Create new consumer
|
||||
client, err := sarama.NewConsumer(brokers, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
// Ensure the brokers are ready by attempting to consume
|
||||
// a topic partition.
|
||||
_, err = client.ConsumePartition("myTopic", 0, sarama.OffsetOldest)
|
||||
|
||||
return err
|
||||
})).
|
||||
Step("wait for Dapr OAuth client", retry.Do(20*time.Second, 6, func(ctx flow.Context) error {
|
||||
httpClient := &http.Client{
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: &tls.Config{
|
||||
InsecureSkipVerify: true, // test server certificate is not trusted.
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
resp, err := httpClient.Get(oauthClientQuery)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resp.StatusCode != 200 {
|
||||
return fmt.Errorf("oauth client query for 'dapr' not successful")
|
||||
}
|
||||
return nil
|
||||
})).
|
||||
|
||||
// Run the application logic above.
|
||||
Step(app.Run(appID1, fmt.Sprintf(":%d", appPort),
|
||||
application(appID1, consumerGroup1))).
|
||||
//
|
||||
// Run the Dapr sidecar with the Kafka component.
|
||||
Step(sidecar.Run(sidecarName1,
|
||||
embedded.WithComponentsPath("./components-retry/consumer1"),
|
||||
embedded.WithAppProtocol(runtime.HTTPProtocol, appPort),
|
||||
embedded.WithDaprGRPCPort(runtime.DefaultDaprAPIGRPCPort),
|
||||
embedded.WithDaprHTTPPort(runtime.DefaultDaprHTTPPort),
|
||||
componentRuntimeOptions(),
|
||||
)).
|
||||
//
|
||||
// Run the second application.
|
||||
Step(app.Run(appID2, fmt.Sprintf(":%d", appPort+portOffset),
|
||||
application(appID2, consumerGroup2))).
|
||||
//
|
||||
// Run the Dapr sidecar with the Kafka component.
|
||||
Step(sidecar.Run(sidecarName2,
|
||||
embedded.WithComponentsPath("./components-retry/mtls-consumer"),
|
||||
embedded.WithAppProtocol(runtime.HTTPProtocol, appPort+portOffset),
|
||||
embedded.WithDaprGRPCPort(runtime.DefaultDaprAPIGRPCPort+portOffset),
|
||||
embedded.WithDaprHTTPPort(runtime.DefaultDaprHTTPPort+portOffset),
|
||||
embedded.WithProfilePort(runtime.DefaultProfilePort+portOffset),
|
||||
componentRuntimeOptions(),
|
||||
)).
|
||||
//
|
||||
// Send messages using the same metadata/message key so we can expect
|
||||
// in-order processing.
|
||||
Step("send and wait(in-order)", sendRecvTest(metadata, consumerGroup2)).
|
||||
|
||||
// Run the third application.
|
||||
Step(app.Run(appID3, fmt.Sprintf(":%d", appPort+portOffset*2),
|
||||
application(appID3, consumerGroup2))).
|
||||
//
|
||||
// Run the Dapr sidecar with the Kafka component.
|
||||
Step(sidecar.Run(sidecarName3,
|
||||
embedded.WithComponentsPath("./components-retry/oauth-consumer"),
|
||||
embedded.WithAppProtocol(runtime.HTTPProtocol, appPort+portOffset*2),
|
||||
embedded.WithDaprGRPCPort(runtime.DefaultDaprAPIGRPCPort+portOffset*2),
|
||||
embedded.WithDaprHTTPPort(runtime.DefaultDaprHTTPPort+portOffset*2),
|
||||
embedded.WithProfilePort(runtime.DefaultProfilePort+portOffset*2),
|
||||
componentRuntimeOptions(),
|
||||
)).
|
||||
Step("reset", flow.Reset(consumerGroup2)).
|
||||
//
|
||||
// Send messages with random keys to test message consumption
|
||||
// across more than one consumer group and consumers per group.
|
||||
Step("send and wait(no-order)", sendRecvTest(map[string]string{}, consumerGroup2)).
|
||||
|
||||
// Gradually stop each broker.
|
||||
// This tests the components ability to handle reconnections
|
||||
// when brokers are shutdown cleanly.
|
||||
StepAsync("steady flow of messages to publish", &task,
|
||||
sendMessagesInBackground(consumerGroup1, consumerGroup2)).
|
||||
Step("wait", flow.Sleep(5*time.Second)).
|
||||
Step("stop broker 1", dockercompose.Stop(clusterName, dockerComposeYAML, "kafka1")).
|
||||
Step("wait", flow.Sleep(5*time.Second)).
|
||||
//
|
||||
// Errors will likely start occurring here since quorum is lost.
|
||||
Step("stop broker 2", dockercompose.Stop(clusterName, dockerComposeYAML, "kafka2")).
|
||||
Step("wait", flow.Sleep(10*time.Second)).
|
||||
//
|
||||
// Errors will definitely occur here.
|
||||
Step("stop broker 3", dockercompose.Stop(clusterName, dockerComposeYAML, "kafka3")).
|
||||
Step("wait", flow.Sleep(30*time.Second)).
|
||||
Step("restart broker 3", dockercompose.Start(clusterName, dockerComposeYAML, "kafka3")).
|
||||
Step("restart broker 2", dockercompose.Start(clusterName, dockerComposeYAML, "kafka2")).
|
||||
Step("restart broker 1", dockercompose.Start(clusterName, dockerComposeYAML, "kafka1")).
|
||||
//
|
||||
// Component should recover at this point.
|
||||
Step("wait", flow.Sleep(30*time.Second)).
|
||||
Step("assert messages(Component reconnect)", assertMessages(consumerGroup1, consumerGroup2)).
|
||||
//
|
||||
// Simulate a network interruption.
|
||||
// This tests the components ability to handle reconnections
|
||||
// when Dapr is disconnected abnormally.
|
||||
StepAsync("steady flow of messages to publish", &task,
|
||||
sendMessagesInBackground(consumerGroup1, consumerGroup2)).
|
||||
Step("wait", flow.Sleep(5*time.Second)).
|
||||
//
|
||||
// Errors will occurring here.
|
||||
Step("interrupt network",
|
||||
network.InterruptNetwork(30*time.Second, nil, nil, "19092", "29092", "39092")).
|
||||
//
|
||||
// Component should recover at this point.
|
||||
Step("wait", flow.Sleep(30*time.Second)).
|
||||
Step("assert messages(network interruption)", assertMessages(consumerGroup1, consumerGroup2)).
|
||||
|
||||
// Reset and test that all messages are received during a
|
||||
// consumer rebalance.
|
||||
Step("reset", flow.Reset(consumerGroup2)).
|
||||
StepAsync("steady flow of messages to publish", &task,
|
||||
sendMessagesInBackground(consumerGroup2)).
|
||||
Step("wait", flow.Sleep(15*time.Second)).
|
||||
Step("stop sidecar 2", sidecar.Stop(sidecarName2)).
|
||||
Step("wait", flow.Sleep(3*time.Second)).
|
||||
Step("stop app 2", app.Stop(appID2)).
|
||||
Step("wait", flow.Sleep(30*time.Second)).
|
||||
Step("assert messages(consumer rebalance)", assertMessages(consumerGroup2)).
|
||||
Run()
|
||||
}
|
||||
|
|
@ -1,374 +1,360 @@
|
|||
/*
|
||||
Copyright 2021 The Dapr Authors
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package kafka_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/Shopify/sarama"
|
||||
"github.com/cenkalti/backoff/v4"
|
||||
"github.com/google/uuid"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/multierr"
|
||||
|
||||
// Pub/Sub.
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
bindings_kafka "github.com/dapr/components-contrib/bindings/kafka"
|
||||
bindings_loader "github.com/dapr/dapr/pkg/components/bindings"
|
||||
|
||||
// Dapr runtime and Go-SDK
|
||||
"github.com/dapr/dapr/pkg/runtime"
|
||||
dapr "github.com/dapr/go-sdk/client"
|
||||
"github.com/dapr/go-sdk/service/common"
|
||||
"github.com/dapr/kit/logger"
|
||||
kit_retry "github.com/dapr/kit/retry"
|
||||
|
||||
// Certification testing runnables
|
||||
"github.com/dapr/components-contrib/tests/certification/embedded"
|
||||
"github.com/dapr/components-contrib/tests/certification/flow"
|
||||
"github.com/dapr/components-contrib/tests/certification/flow/app"
|
||||
"github.com/dapr/components-contrib/tests/certification/flow/dockercompose"
|
||||
"github.com/dapr/components-contrib/tests/certification/flow/network"
|
||||
"github.com/dapr/components-contrib/tests/certification/flow/retry"
|
||||
"github.com/dapr/components-contrib/tests/certification/flow/sidecar"
|
||||
"github.com/dapr/components-contrib/tests/certification/flow/watcher"
|
||||
)
|
||||
|
||||
const (
|
||||
sidecarName1 = "dapr-1"
|
||||
sidecarName2 = "dapr-2"
|
||||
sidecarName3 = "dapr-3"
|
||||
appID1 = "app-1"
|
||||
appID2 = "app-2"
|
||||
appID3 = "app-3"
|
||||
clusterName = "kafkacertification"
|
||||
dockerComposeYAML = "docker-compose.yml"
|
||||
numMessages = 1000
|
||||
appPort = 8000
|
||||
portOffset = 2
|
||||
messageKey = "partitionKey"
|
||||
|
||||
bindingName = "messagebus"
|
||||
topicName = "neworder"
|
||||
)
|
||||
|
||||
var (
|
||||
brokers = []string{"localhost:19092", "localhost:29092", "localhost:39092"}
|
||||
oauthClientQuery = "https://localhost:4444/clients/dapr"
|
||||
)
|
||||
|
||||
func TestKafka(t *testing.T) {
|
||||
// For Kafka, we should ensure messages are received in order.
|
||||
consumerGroup1 := watcher.NewOrdered()
|
||||
// This watcher is across multiple consumers in the same group
|
||||
// so exact ordering is not expected.
|
||||
consumerGroup2 := watcher.NewUnordered()
|
||||
|
||||
// Application logic that tracks messages from a topic.
|
||||
application := func(appName string, watcher *watcher.Watcher) app.SetupFn {
|
||||
return func(ctx flow.Context, s common.Service) error {
|
||||
// Setup the /orders event handler.
|
||||
return multierr.Combine(
|
||||
s.AddBindingInvocationHandler(bindingName, func(_ context.Context, in *common.BindingEvent) (out []byte, err error) {
|
||||
// Track/Observe the data of the event.
|
||||
watcher.Observe(string(in.Data))
|
||||
ctx.Logf("======== %s received event: %s\n", appName, string(in.Data))
|
||||
return in.Data, nil
|
||||
}),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// Set the partition key on all messages so they
|
||||
// are written to the same partition.
|
||||
// This allows for checking of ordered messages.
|
||||
metadata := map[string]string{
|
||||
messageKey: "test",
|
||||
}
|
||||
|
||||
// Test logic that sends messages to a topic and
|
||||
// verifies the application has received them.
|
||||
sendRecvTest := func(metadata map[string]string, watchers ...*watcher.Watcher) flow.Runnable {
|
||||
_, hasKey := metadata[messageKey]
|
||||
return func(ctx flow.Context) error {
|
||||
client := sidecar.GetClient(ctx, sidecarName1)
|
||||
|
||||
// Declare what is expected BEFORE performing any steps
|
||||
// that will satisfy the test.
|
||||
msgs := make([]string, numMessages)
|
||||
for i := range msgs {
|
||||
msgs[i] = fmt.Sprintf("Hello, Messages %03d", i)
|
||||
}
|
||||
for _, m := range watchers {
|
||||
m.ExpectStrings(msgs...)
|
||||
}
|
||||
// If no key it provided, create a random one.
|
||||
// For Kafka, this will spread messages across
|
||||
// the topic's partitions.
|
||||
if !hasKey {
|
||||
metadata[messageKey] = uuid.NewString()
|
||||
}
|
||||
|
||||
// Send events that the application above will observe.
|
||||
ctx.Log("Sending messages!")
|
||||
for _, msg := range msgs {
|
||||
err := client.InvokeOutputBinding(ctx, &dapr.InvokeBindingRequest{
|
||||
Name: bindingName,
|
||||
Operation: string(bindings.CreateOperation),
|
||||
Data: []byte(msg),
|
||||
Metadata: metadata,
|
||||
})
|
||||
require.NoError(ctx, err, "error publishing message")
|
||||
}
|
||||
|
||||
// Do the messages we observed match what we expect?
|
||||
for _, m := range watchers {
|
||||
m.Assert(ctx, time.Minute)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// sendMessagesInBackground and assertMessages are
|
||||
// Runnables for testing publishing and consuming
|
||||
// messages reliably when infrastructure and network
|
||||
// interruptions occur.
|
||||
var task flow.AsyncTask
|
||||
sendMessagesInBackground := func(watchers ...*watcher.Watcher) flow.Runnable {
|
||||
return func(ctx flow.Context) error {
|
||||
client := sidecar.GetClient(ctx, sidecarName1)
|
||||
for _, m := range watchers {
|
||||
m.Reset()
|
||||
}
|
||||
|
||||
t := time.NewTicker(100 * time.Millisecond)
|
||||
defer t.Stop()
|
||||
|
||||
counter := 1
|
||||
for {
|
||||
select {
|
||||
case <-task.Done():
|
||||
return nil
|
||||
case <-t.C:
|
||||
msg := fmt.Sprintf("Background message - %03d", counter)
|
||||
for _, m := range watchers {
|
||||
m.Prepare(msg) // Track for observation
|
||||
}
|
||||
|
||||
// Publish with retries.
|
||||
bo := backoff.WithContext(backoff.NewConstantBackOff(time.Second), task)
|
||||
if err := kit_retry.NotifyRecover(func() error {
|
||||
return client.InvokeOutputBinding(ctx, &dapr.InvokeBindingRequest{
|
||||
Name: bindingName,
|
||||
Operation: string(bindings.CreateOperation),
|
||||
Data: []byte(msg),
|
||||
Metadata: metadata,
|
||||
})
|
||||
}, bo, func(err error, t time.Duration) {
|
||||
ctx.Logf("Error outpub binding message, retrying in %s", t)
|
||||
}, func() {}); err == nil {
|
||||
for _, m := range watchers {
|
||||
m.Add(msg) // Success
|
||||
}
|
||||
counter++
|
||||
} else {
|
||||
for _, m := range watchers {
|
||||
m.Remove(msg) // Remove from Tracking
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
assertMessages := func(watchers ...*watcher.Watcher) flow.Runnable {
|
||||
return func(ctx flow.Context) error {
|
||||
// Signal sendMessagesInBackground to stop and wait for it to complete.
|
||||
task.CancelAndWait()
|
||||
for _, m := range watchers {
|
||||
m.Assert(ctx, 5*time.Minute)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
flow.New(t, "kafka certification").
|
||||
// Run Kafka using Docker Compose.
|
||||
Step(dockercompose.Run(clusterName, dockerComposeYAML)).
|
||||
Step("wait for broker sockets",
|
||||
network.WaitForAddresses(5*time.Minute, brokers...)).
|
||||
Step("wait", flow.Sleep(5*time.Second)).
|
||||
Step("wait for kafka readiness", retry.Do(10*time.Second, 30, func(ctx flow.Context) error {
|
||||
config := sarama.NewConfig()
|
||||
config.ClientID = "test-consumer"
|
||||
config.Consumer.Return.Errors = true
|
||||
|
||||
// Create new consumer
|
||||
client, err := sarama.NewConsumer(brokers, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
// Ensure the brokers are ready by attempting to consume
|
||||
// a topic partition.
|
||||
_, err = client.ConsumePartition("myTopic", 0, sarama.OffsetOldest)
|
||||
|
||||
return err
|
||||
})).
|
||||
Step("wait for Dapr OAuth client", retry.Do(20*time.Second, 6, func(ctx flow.Context) error {
|
||||
httpClient := &http.Client{
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: &tls.Config{
|
||||
InsecureSkipVerify: true, // test server certificate is not trusted.
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
resp, err := httpClient.Get(oauthClientQuery)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resp.StatusCode != 200 {
|
||||
return fmt.Errorf("oauth client query for 'dapr' not successful")
|
||||
}
|
||||
return nil
|
||||
})).
|
||||
//
|
||||
// Run the application logic above.
|
||||
Step(app.Run(appID1, fmt.Sprintf(":%d", appPort),
|
||||
application(appID1, consumerGroup1))).
|
||||
//
|
||||
// Run the Dapr sidecar with the Kafka component.
|
||||
Step(sidecar.Run(sidecarName1,
|
||||
embedded.WithComponentsPath("./components/consumer1"),
|
||||
embedded.WithAppProtocol(runtime.HTTPProtocol, appPort),
|
||||
embedded.WithDaprGRPCPort(runtime.DefaultDaprAPIGRPCPort),
|
||||
embedded.WithDaprHTTPPort(runtime.DefaultDaprHTTPPort),
|
||||
componentRuntimeOptions(),
|
||||
)).
|
||||
//
|
||||
// Run the second application.
|
||||
Step(app.Run(appID2, fmt.Sprintf(":%d", appPort+portOffset),
|
||||
application(appID2, consumerGroup2))).
|
||||
//
|
||||
// Run the Dapr sidecar with the Kafka component.
|
||||
Step(sidecar.Run(sidecarName2,
|
||||
embedded.WithComponentsPath("./components/mtls-consumer"),
|
||||
embedded.WithAppProtocol(runtime.HTTPProtocol, appPort+portOffset),
|
||||
embedded.WithDaprGRPCPort(runtime.DefaultDaprAPIGRPCPort+portOffset),
|
||||
embedded.WithDaprHTTPPort(runtime.DefaultDaprHTTPPort+portOffset),
|
||||
embedded.WithProfilePort(runtime.DefaultProfilePort+portOffset),
|
||||
componentRuntimeOptions(),
|
||||
)).
|
||||
//
|
||||
// Send messages using the same metadata/message key so we can expect
|
||||
// in-order processing.
|
||||
Step("send and wait(in-order)", sendRecvTest(metadata, consumerGroup1, consumerGroup2)).
|
||||
//
|
||||
// Run the third application.
|
||||
Step(app.Run(appID3, fmt.Sprintf(":%d", appPort+portOffset*2),
|
||||
application(appID3, consumerGroup2))).
|
||||
//
|
||||
// Run the Dapr sidecar with the Kafka component.
|
||||
Step(sidecar.Run(sidecarName3,
|
||||
embedded.WithComponentsPath("./components/oauth-consumer"),
|
||||
embedded.WithAppProtocol(runtime.HTTPProtocol, appPort+portOffset*2),
|
||||
embedded.WithDaprGRPCPort(runtime.DefaultDaprAPIGRPCPort+portOffset*2),
|
||||
embedded.WithDaprHTTPPort(runtime.DefaultDaprHTTPPort+portOffset*2),
|
||||
embedded.WithProfilePort(runtime.DefaultProfilePort+portOffset*2),
|
||||
componentRuntimeOptions(),
|
||||
)).
|
||||
Step("reset", flow.Reset(consumerGroup2)).
|
||||
//
|
||||
// Send messages with random keys to test message consumption
|
||||
// across more than one consumer group and consumers per group.
|
||||
Step("send and wait(no-order)", sendRecvTest(map[string]string{}, consumerGroup2)).
|
||||
//
|
||||
// Gradually stop each broker.
|
||||
// This tests the components ability to handle reconnections
|
||||
// when brokers are shutdown cleanly.
|
||||
StepAsync("steady flow of messages to publish", &task,
|
||||
sendMessagesInBackground(consumerGroup1, consumerGroup2)).
|
||||
Step("wait", flow.Sleep(5*time.Second)).
|
||||
Step("stop broker 1", dockercompose.Stop(clusterName, dockerComposeYAML, "kafka1")).
|
||||
Step("wait", flow.Sleep(5*time.Second)).
|
||||
//
|
||||
// Errors will likely start occurring here since quorum is lost.
|
||||
Step("stop broker 2", dockercompose.Stop(clusterName, dockerComposeYAML, "kafka2")).
|
||||
Step("wait", flow.Sleep(10*time.Second)).
|
||||
//
|
||||
// Errors will definitely occur here.
|
||||
Step("stop broker 3", dockercompose.Stop(clusterName, dockerComposeYAML, "kafka3")).
|
||||
Step("wait", flow.Sleep(30*time.Second)).
|
||||
Step("restart broker 3", dockercompose.Start(clusterName, dockerComposeYAML, "kafka3")).
|
||||
Step("restart broker 2", dockercompose.Start(clusterName, dockerComposeYAML, "kafka2")).
|
||||
Step("restart broker 1", dockercompose.Start(clusterName, dockerComposeYAML, "kafka1")).
|
||||
//
|
||||
// Component should recover at this point.
|
||||
Step("wait", flow.Sleep(30*time.Second)).
|
||||
Step("assert messages(Component reconnect)", assertMessages(consumerGroup1, consumerGroup2)).
|
||||
//
|
||||
// Simulate a network interruption.
|
||||
// This tests the components ability to handle reconnections
|
||||
// when Dapr is disconnected abnormally.
|
||||
StepAsync("steady flow of messages to publish", &task,
|
||||
sendMessagesInBackground(consumerGroup1, consumerGroup2)).
|
||||
Step("wait", flow.Sleep(5*time.Second)).
|
||||
//
|
||||
// Errors will occurring here.
|
||||
Step("interrupt network",
|
||||
network.InterruptNetwork(30*time.Second, nil, nil, "19092", "29092", "39092")).
|
||||
//
|
||||
// Component should recover at this point.
|
||||
Step("wait", flow.Sleep(30*time.Second)).
|
||||
Step("assert messages(network interruption)", assertMessages(consumerGroup1, consumerGroup2)).
|
||||
//
|
||||
// Reset and test that all messages are received during a
|
||||
// consumer rebalance.
|
||||
Step("reset", flow.Reset(consumerGroup2)).
|
||||
StepAsync("steady flow of messages to publish", &task,
|
||||
sendMessagesInBackground(consumerGroup2)).
|
||||
Step("wait", flow.Sleep(15*time.Second)).
|
||||
Step("stop sidecar 2", sidecar.Stop(sidecarName2)).
|
||||
Step("wait", flow.Sleep(3*time.Second)).
|
||||
Step("stop app 2", app.Stop(appID2)).
|
||||
Step("wait", flow.Sleep(30*time.Second)).
|
||||
Step("assert messages(consumer rebalance)", assertMessages(consumerGroup2)).
|
||||
Run()
|
||||
}
|
||||
|
||||
func componentRuntimeOptions() []runtime.Option {
|
||||
log := logger.NewLogger("dapr.components")
|
||||
|
||||
bindingsRegistry := bindings_loader.NewRegistry()
|
||||
bindingsRegistry.Logger = log
|
||||
bindingsRegistry.RegisterInputBinding(func(l logger.Logger) bindings.InputBinding {
|
||||
return bindings_kafka.NewKafka(l)
|
||||
}, "kafka")
|
||||
bindingsRegistry.RegisterOutputBinding(func(l logger.Logger) bindings.OutputBinding {
|
||||
return bindings_kafka.NewKafka(l)
|
||||
}, "kafka")
|
||||
|
||||
return []runtime.Option{
|
||||
runtime.WithBindings(bindingsRegistry),
|
||||
}
|
||||
}
|
||||
/*
|
||||
Copyright 2021 The Dapr Authors
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package kafka_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/Shopify/sarama"
|
||||
"github.com/cenkalti/backoff/v4"
|
||||
"github.com/google/uuid"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/multierr"
|
||||
|
||||
// Pub/Sub.
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
bindings_kafka "github.com/dapr/components-contrib/bindings/kafka"
|
||||
bindings_loader "github.com/dapr/dapr/pkg/components/bindings"
|
||||
|
||||
// Dapr runtime and Go-SDK
|
||||
"github.com/dapr/dapr/pkg/runtime"
|
||||
dapr "github.com/dapr/go-sdk/client"
|
||||
"github.com/dapr/go-sdk/service/common"
|
||||
"github.com/dapr/kit/logger"
|
||||
kit_retry "github.com/dapr/kit/retry"
|
||||
|
||||
// Certification testing runnables
|
||||
"github.com/dapr/components-contrib/tests/certification/embedded"
|
||||
"github.com/dapr/components-contrib/tests/certification/flow"
|
||||
"github.com/dapr/components-contrib/tests/certification/flow/app"
|
||||
"github.com/dapr/components-contrib/tests/certification/flow/dockercompose"
|
||||
"github.com/dapr/components-contrib/tests/certification/flow/network"
|
||||
"github.com/dapr/components-contrib/tests/certification/flow/retry"
|
||||
"github.com/dapr/components-contrib/tests/certification/flow/sidecar"
|
||||
"github.com/dapr/components-contrib/tests/certification/flow/simulate"
|
||||
"github.com/dapr/components-contrib/tests/certification/flow/watcher"
|
||||
)
|
||||
|
||||
const (
|
||||
sidecarName1 = "dapr-1"
|
||||
sidecarName2 = "dapr-2"
|
||||
sidecarName3 = "dapr-3"
|
||||
appID1 = "app-1"
|
||||
appID2 = "app-2"
|
||||
appID3 = "app-3"
|
||||
clusterName = "kafkacertification"
|
||||
dockerComposeYAML = "docker-compose.yml"
|
||||
numMessages = 1000
|
||||
appPort = 8000
|
||||
portOffset = 2
|
||||
messageKey = "partitionKey"
|
||||
|
||||
bindingName = "messagebus"
|
||||
topicName = "neworder"
|
||||
)
|
||||
|
||||
var (
|
||||
brokers = []string{"localhost:19092", "localhost:29092", "localhost:39092"}
|
||||
)
|
||||
|
||||
func TestKafka_with_retry(t *testing.T) {
|
||||
// For Kafka, we should ensure messages are received in order.
|
||||
consumerGroup1 := watcher.NewOrdered()
|
||||
// This watcher is across multiple consumers in the same group
|
||||
// so exact ordering is not expected.
|
||||
consumerGroup2 := watcher.NewUnordered()
|
||||
|
||||
// Application logic that tracks messages from a topic.
|
||||
application := func(appName string, watcher *watcher.Watcher) app.SetupFn {
|
||||
return func(ctx flow.Context, s common.Service) error {
|
||||
// Simulate periodic errors.
|
||||
sim := simulate.PeriodicError(ctx, 100)
|
||||
|
||||
// Setup the /orders event handler.
|
||||
return multierr.Combine(
|
||||
s.AddBindingInvocationHandler(bindingName, func(_ context.Context, in *common.BindingEvent) (out []byte, err error) {
|
||||
if err := sim(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Track/Observe the data of the event.
|
||||
watcher.Observe(string(in.Data))
|
||||
ctx.Logf("======== %s received event: %s\n", appName, string(in.Data))
|
||||
return in.Data, nil
|
||||
}),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// Set the partition key on all messages so they
|
||||
// are written to the same partition.
|
||||
// This allows for checking of ordered messages.
|
||||
metadata := map[string]string{
|
||||
messageKey: "test",
|
||||
}
|
||||
|
||||
// Test logic that sends messages to a topic and
|
||||
// verifies the application has received them.
|
||||
sendRecvTest := func(metadata map[string]string, watchers ...*watcher.Watcher) flow.Runnable {
|
||||
_, hasKey := metadata[messageKey]
|
||||
return func(ctx flow.Context) error {
|
||||
client := sidecar.GetClient(ctx, sidecarName1)
|
||||
|
||||
// Declare what is expected BEFORE performing any steps
|
||||
// that will satisfy the test.
|
||||
msgs := make([]string, numMessages)
|
||||
for i := range msgs {
|
||||
msgs[i] = fmt.Sprintf("Hello, Messages %03d", i)
|
||||
}
|
||||
for _, m := range watchers {
|
||||
m.ExpectStrings(msgs...)
|
||||
}
|
||||
// If no key it provided, create a random one.
|
||||
// For Kafka, this will spread messages across
|
||||
// the topic's partitions.
|
||||
if !hasKey {
|
||||
metadata[messageKey] = uuid.NewString()
|
||||
}
|
||||
|
||||
// Send events that the application above will observe.
|
||||
ctx.Log("Sending messages!")
|
||||
for _, msg := range msgs {
|
||||
ctx.Logf("Sending: %q", msg)
|
||||
err := client.InvokeOutputBinding(ctx, &dapr.InvokeBindingRequest{
|
||||
Name: bindingName,
|
||||
Operation: string(bindings.CreateOperation),
|
||||
Data: []byte(msg),
|
||||
Metadata: metadata,
|
||||
})
|
||||
require.NoError(ctx, err, "error publishing message")
|
||||
}
|
||||
|
||||
// Do the messages we observed match what we expect?
|
||||
for _, m := range watchers {
|
||||
m.Assert(ctx, time.Minute)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// sendMessagesInBackground and assertMessages are
|
||||
// Runnables for testing publishing and consuming
|
||||
// messages reliably when infrastructure and network
|
||||
// interruptions occur.
|
||||
var task flow.AsyncTask
|
||||
sendMessagesInBackground := func(watchers ...*watcher.Watcher) flow.Runnable {
|
||||
return func(ctx flow.Context) error {
|
||||
client := sidecar.GetClient(ctx, sidecarName1)
|
||||
for _, m := range watchers {
|
||||
m.Reset()
|
||||
}
|
||||
|
||||
t := time.NewTicker(100 * time.Millisecond)
|
||||
defer t.Stop()
|
||||
|
||||
counter := 1
|
||||
for {
|
||||
select {
|
||||
case <-task.Done():
|
||||
return nil
|
||||
case <-t.C:
|
||||
msg := fmt.Sprintf("Background message - %03d", counter)
|
||||
for _, m := range watchers {
|
||||
m.Prepare(msg) // Track for observation
|
||||
}
|
||||
|
||||
// Publish with retries.
|
||||
bo := backoff.WithContext(backoff.NewConstantBackOff(time.Second), task)
|
||||
if err := kit_retry.NotifyRecover(func() error {
|
||||
return client.InvokeOutputBinding(ctx, &dapr.InvokeBindingRequest{
|
||||
Name: bindingName,
|
||||
Operation: string(bindings.CreateOperation),
|
||||
Data: []byte(msg),
|
||||
Metadata: metadata,
|
||||
})
|
||||
}, bo, func(err error, t time.Duration) {
|
||||
ctx.Logf("Error output binding message, retrying in %s", t)
|
||||
}, func() {}); err == nil {
|
||||
for _, m := range watchers {
|
||||
m.Add(msg) // Success
|
||||
}
|
||||
counter++
|
||||
} else {
|
||||
for _, m := range watchers {
|
||||
m.Remove(msg) // Remove from Tracking
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
assertMessages := func(watchers ...*watcher.Watcher) flow.Runnable {
|
||||
return func(ctx flow.Context) error {
|
||||
// Signal sendMessagesInBackground to stop and wait for it to complete.
|
||||
task.CancelAndWait()
|
||||
for _, m := range watchers {
|
||||
m.Assert(ctx, 5*time.Minute)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
flow.New(t, "kafka certification with retry").
|
||||
// Run Kafka using Docker Compose.
|
||||
Step(dockercompose.Run(clusterName, dockerComposeYAML)).
|
||||
Step("wait for broker sockets",
|
||||
network.WaitForAddresses(5*time.Minute, brokers...)).
|
||||
Step("wait", flow.Sleep(5*time.Second)).
|
||||
Step("wait for kafka readiness", retry.Do(10*time.Second, 30, func(ctx flow.Context) error {
|
||||
config := sarama.NewConfig()
|
||||
config.ClientID = "test-consumer"
|
||||
config.Consumer.Return.Errors = true
|
||||
|
||||
// Create new consumer
|
||||
client, err := sarama.NewConsumer(brokers, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
// Ensure the brokers are ready by attempting to consume
|
||||
// a topic partition.
|
||||
_, err = client.ConsumePartition("myTopic", 0, sarama.OffsetOldest)
|
||||
|
||||
return err
|
||||
})).
|
||||
// Run the application logic above.
|
||||
Step(app.Run(appID1, fmt.Sprintf(":%d", appPort),
|
||||
application(appID1, consumerGroup1))).
|
||||
//
|
||||
// Run the Dapr sidecar with the Kafka component.
|
||||
Step(sidecar.Run(sidecarName1,
|
||||
embedded.WithComponentsPath("./components/consumer1"),
|
||||
embedded.WithAppProtocol(runtime.HTTPProtocol, appPort),
|
||||
embedded.WithDaprGRPCPort(runtime.DefaultDaprAPIGRPCPort),
|
||||
embedded.WithDaprHTTPPort(runtime.DefaultDaprHTTPPort),
|
||||
componentRuntimeOptions(),
|
||||
)).
|
||||
//
|
||||
// Run the second application.
|
||||
Step(app.Run(appID2, fmt.Sprintf(":%d", appPort+portOffset),
|
||||
application(appID2, consumerGroup2))).
|
||||
//
|
||||
// Run the Dapr sidecar with the Kafka component.
|
||||
Step(sidecar.Run(sidecarName2,
|
||||
embedded.WithComponentsPath("./components/consumer2"),
|
||||
embedded.WithAppProtocol(runtime.HTTPProtocol, appPort+portOffset),
|
||||
embedded.WithDaprGRPCPort(runtime.DefaultDaprAPIGRPCPort+portOffset),
|
||||
embedded.WithDaprHTTPPort(runtime.DefaultDaprHTTPPort+portOffset),
|
||||
embedded.WithProfilePort(runtime.DefaultProfilePort+portOffset),
|
||||
componentRuntimeOptions(),
|
||||
)).
|
||||
//
|
||||
// Send messages using the same metadata/message key so we can expect
|
||||
// in-order processing.
|
||||
Step("send and wait(in-order)", sendRecvTest(metadata, consumerGroup1, consumerGroup2)).
|
||||
//
|
||||
// Run the third application.
|
||||
Step(app.Run(appID3, fmt.Sprintf(":%d", appPort+portOffset*2),
|
||||
application(appID3, consumerGroup2))).
|
||||
//
|
||||
// Run the Dapr sidecar with the Kafka component.
|
||||
Step(sidecar.Run(sidecarName3,
|
||||
embedded.WithComponentsPath("./components/consumer2"),
|
||||
embedded.WithAppProtocol(runtime.HTTPProtocol, appPort+portOffset*2),
|
||||
embedded.WithDaprGRPCPort(runtime.DefaultDaprAPIGRPCPort+portOffset*2),
|
||||
embedded.WithDaprHTTPPort(runtime.DefaultDaprHTTPPort+portOffset*2),
|
||||
embedded.WithProfilePort(runtime.DefaultProfilePort+portOffset*2),
|
||||
componentRuntimeOptions(),
|
||||
)).
|
||||
Step("reset", flow.Reset(consumerGroup2)).
|
||||
//
|
||||
// Send messages with random keys to test message consumption
|
||||
// across more than one consumer group and consumers per group.
|
||||
Step("send and wait(no-order)", sendRecvTest(map[string]string{}, consumerGroup2)).
|
||||
//
|
||||
// Gradually stop each broker.
|
||||
// This tests the components ability to handle reconnections
|
||||
// when brokers are shutdown cleanly.
|
||||
StepAsync("steady flow of messages to publish", &task,
|
||||
sendMessagesInBackground(consumerGroup1, consumerGroup2)).
|
||||
Step("wait", flow.Sleep(5*time.Second)).
|
||||
Step("stop broker 1", dockercompose.Stop(clusterName, dockerComposeYAML, "kafka1")).
|
||||
Step("wait", flow.Sleep(5*time.Second)).
|
||||
//
|
||||
// Errors will likely start occurring here since quorum is lost.
|
||||
Step("stop broker 2", dockercompose.Stop(clusterName, dockerComposeYAML, "kafka2")).
|
||||
Step("wait", flow.Sleep(10*time.Second)).
|
||||
//
|
||||
// Errors will definitely occur here.
|
||||
Step("stop broker 3", dockercompose.Stop(clusterName, dockerComposeYAML, "kafka3")).
|
||||
Step("wait", flow.Sleep(30*time.Second)).
|
||||
Step("restart broker 3", dockercompose.Start(clusterName, dockerComposeYAML, "kafka3")).
|
||||
Step("restart broker 2", dockercompose.Start(clusterName, dockerComposeYAML, "kafka2")).
|
||||
Step("restart broker 1", dockercompose.Start(clusterName, dockerComposeYAML, "kafka1")).
|
||||
//
|
||||
// Component should recover at this point.
|
||||
Step("wait", flow.Sleep(30*time.Second)).
|
||||
Step("assert messages(Component reconnect)", assertMessages(consumerGroup1, consumerGroup2)).
|
||||
//
|
||||
// Simulate a network interruption.
|
||||
// This tests the components ability to handle reconnections
|
||||
// when Dapr is disconnected abnormally.
|
||||
StepAsync("steady flow of messages to publish", &task,
|
||||
sendMessagesInBackground(consumerGroup1, consumerGroup2)).
|
||||
Step("wait", flow.Sleep(5*time.Second)).
|
||||
//
|
||||
// Errors will occurring here.
|
||||
Step("interrupt network",
|
||||
network.InterruptNetwork(30*time.Second, nil, nil, "19092", "29092", "39092")).
|
||||
//
|
||||
// Component should recover at this point.
|
||||
Step("wait", flow.Sleep(30*time.Second)).
|
||||
Step("assert messages(network interruption)", assertMessages(consumerGroup1, consumerGroup2)).
|
||||
//
|
||||
// Reset and test that all messages are received during a
|
||||
// consumer rebalance.
|
||||
Step("reset", flow.Reset(consumerGroup2)).
|
||||
StepAsync("steady flow of messages to publish", &task,
|
||||
sendMessagesInBackground(consumerGroup2)).
|
||||
Step("wait", flow.Sleep(15*time.Second)).
|
||||
Step("stop sidecar 2", sidecar.Stop(sidecarName2)).
|
||||
Step("wait", flow.Sleep(3*time.Second)).
|
||||
Step("stop app 2", app.Stop(appID2)).
|
||||
Step("wait", flow.Sleep(30*time.Second)).
|
||||
Step("assert messages(consumer rebalance)", assertMessages(consumerGroup2)).
|
||||
Run()
|
||||
}
|
||||
|
||||
func componentRuntimeOptions() []runtime.Option {
|
||||
log := logger.NewLogger("dapr.components")
|
||||
|
||||
bindingsRegistry := bindings_loader.NewRegistry()
|
||||
bindingsRegistry.Logger = log
|
||||
bindingsRegistry.RegisterInputBinding(func(l logger.Logger) bindings.InputBinding {
|
||||
return bindings_kafka.NewKafka(l)
|
||||
}, "kafka")
|
||||
bindingsRegistry.RegisterOutputBinding(func(l logger.Logger) bindings.OutputBinding {
|
||||
return bindings_kafka.NewKafka(l)
|
||||
}, "kafka")
|
||||
|
||||
return []runtime.Option{
|
||||
runtime.WithBindings(bindingsRegistry),
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,22 +0,0 @@
|
|||
serve:
|
||||
admin:
|
||||
host: 0.0.0.0
|
||||
port: 4444
|
||||
public:
|
||||
host: 0.0.0.0
|
||||
port: 4443
|
||||
tls:
|
||||
cert:
|
||||
path: /config/tls/hydra.crt
|
||||
key:
|
||||
path: /config/tls/hydra.key
|
||||
dsn: memory
|
||||
log:
|
||||
leak_sensitive_values: true
|
||||
level: debug
|
||||
urls:
|
||||
self:
|
||||
issuer: https://hydra:4443
|
||||
strategies:
|
||||
access_token: opaque
|
||||
|
||||
|
|
@ -1,21 +0,0 @@
|
|||
-----BEGIN CERTIFICATE-----
|
||||
MIIDejCCAmKgAwIBAgIUIgMF15XiDisW+e4I+clKWYvxcfMwDQYJKoZIhvcNAQEL
|
||||
BQAwKzESMBAGA1UECxMJRGFwciBUZXN0MRUwEwYDVQQDEwxEYXByIFRlc3QgQ0Ew
|
||||
HhcNMjExMjEwMDA1ODAwWhcNMjIxMjEwMDA1ODAwWjAjMRIwEAYDVQQKEwlEYXBy
|
||||
IFRlc3QxDTALBgNVBAMTBGRhcHIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
|
||||
AoIBAQDICbhBmpxFPFtoRTjdiki2ouZQbUoHE4llIQnJz3ta/+gWi/czrOmC3aHz
|
||||
x9pJ1kifBG5MlbdnH8WCQXx/vPXP5hpTmTDjAp87Fygk2KWdb/bQBrpRTIEgAuK3
|
||||
IWJ9tYhcDDxSwEF52xNnRkklxZpVRZX1SmcdndEqioaAnxWEM1x+JJcjrk6Ud4dv
|
||||
aX0G1xw8g6u0KT1I61Aja2OAAj+iPih6RK6xSRdxvELXbehClBHOpJP6sRw03Xw4
|
||||
HRJEesWqrGAFEp0qSZulKwn2MHAW80VVF/U9hogUQrBVFTKw/5oS9eu+BV2AY3Rh
|
||||
8DACB0blpEkjIachjjo2A8wuhBeNAgMBAAGjgZ0wgZowDgYDVR0PAQH/BAQDAgWg
|
||||
MB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAMBgNVHRMBAf8EAjAAMB0G
|
||||
A1UdDgQWBBRVxfGJ7a+7DBz2PM2w/U5aeJFOfjAfBgNVHSMEGDAWgBR+l/nJVNA+
|
||||
PUmfXs1kYJbBfN4JbzAbBgNVHREEFDASggVoeWRyYYIJbG9jYWxob3N0MA0GCSqG
|
||||
SIb3DQEBCwUAA4IBAQA+0zkBNBZ8okLiEl9B4nbfBvQXdkYOl9H9TdDYlWLNKb1S
|
||||
8Y4SNQ4hrfKspYVIBVvWfuwnphdLeexs4ovU6OkXeVPFPSsjihX9I+sJ3bFCLvkj
|
||||
lVXY/pJy/Z6QQlPg71LkCiH0Hv2RIvGZ1UtTu12d8BiF3oO8Nnzq4kiyfpPJ5QDR
|
||||
GsTKmXxEzgCcR+DI4g05hI2BQuq8Xjw4jZzt0IOcWhR2ZxBwfzLQp/hAQK69iPCN
|
||||
3DfD/eMr1EF8kAWec4eo3CFwHvrPpEdIMeNE7q9fuyfVPJGQZFKNHl7rF4YqYn/F
|
||||
4XGJxRCjd860JkJDLrmXazED6cLE1IvYPCLUsfK8
|
||||
-----END CERTIFICATE-----
|
||||
|
|
@ -1,27 +0,0 @@
|
|||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEowIBAAKCAQEAyAm4QZqcRTxbaEU43YpItqLmUG1KBxOJZSEJyc97Wv/oFov3
|
||||
M6zpgt2h88faSdZInwRuTJW3Zx/FgkF8f7z1z+YaU5kw4wKfOxcoJNilnW/20Aa6
|
||||
UUyBIALityFifbWIXAw8UsBBedsTZ0ZJJcWaVUWV9UpnHZ3RKoqGgJ8VhDNcfiSX
|
||||
I65OlHeHb2l9BtccPIOrtCk9SOtQI2tjgAI/oj4oekSusUkXcbxC123oQpQRzqST
|
||||
+rEcNN18OB0SRHrFqqxgBRKdKkmbpSsJ9jBwFvNFVRf1PYaIFEKwVRUysP+aEvXr
|
||||
vgVdgGN0YfAwAgdG5aRJIyGnIY46NgPMLoQXjQIDAQABAoIBAQDEErLmqxOt0aGP
|
||||
LPq2PEtVqYqzHszG7uFnnOCpTZQN+HSXVQ4zOrOQMIoEF8rhQQbhx0gODVo93KiO
|
||||
Kn5L/v26kEMR2kBO400McIBKzYhYL1zvPwj1k1Wl+O4crr6JlZxZDS07t3L2bEQy
|
||||
oHQmb+/80T5RtmIoZ36Ugj+gZ06BytKPY2yZRpLnF/p9V77JK2BT2pg1EXahU5LL
|
||||
wGhodg+MqFrKPk0TpdQ7edipHEiqprk/sEH9KA4cPfa83LBv6xRcHYBzlA0mHnZo
|
||||
jgGdptDAFJeJcMLwywF1CvI/x5Y0mAkDN95uFcw8/ozX2pKGuIZYY9BjR444zKm2
|
||||
8V7Br2gBAoGBAN2n2BlBXTjOgZ7c50fGFA+oR23C90r3AHwnh1FOnCzKOUNbW48F
|
||||
tsKvmI0DUK+sg+ZkGIEz1ll81FVzCAZQ8sii3LV5qnW7QVhZszHbKWtI9ulcFDqe
|
||||
ZqKlOahy5GmcGfxbniufrHaBlP+Y1gwJd8NXjoFKNxLLtQ8S25e4QwKNAoGBAOcI
|
||||
ZH+eaZ3653fFPzuJtsbbfqB5HW6bTLIUqnwNRGghvMP0JTLzYYVlcaLMrI2L50Qf
|
||||
Z5IEl7+uVeTmRehkoe5J3r5tIifKrVGnQM7inpTfkCOlY2tsAL8/XvQ/6ikBEt2J
|
||||
r166mOk3RfjuuXuBFrPwfpZ5fMggFa92e5ukWqkBAoGAQ12VsedJu9AXWP7uU8QB
|
||||
qNiODO/qVKBJR3KED9QCZyJ20N/dLdSgvP69MG5HgXy/AbB+OhZVGRF1Pxsc3z6O
|
||||
6yeESKtXgTyOGZn5ejePmQmt8TKI+1/U9a2dnnJ8tRQ6WZZGth9rPQEZFa2PsEzY
|
||||
V0gvCWBS6KV8u74Re0UHKKkCgYB9j8Ae49d+9rgKDfd5wjTGCtDdIjXuwRSDzFuD
|
||||
pCpDdeKDlRMKh9++gg2qbxZwr1J3YaIGZ9yZXoRsLQJddSPUv+0BDYr8mVhtAjtk
|
||||
tSF+w6ow1VgdL8uQJT7T/FClDGJWaNgY4cztIw8yZXwFNXlDPjduTISWt2lRvVEc
|
||||
m8xyAQKBgF+aAk2qJ8/MM4aXoWgjkWiDGvgfVmWsYMpalz34PDP+hzPg3LxaGKsn
|
||||
jm+LQs9Z/WX26hxZK0HWQbcCsJ81mBvgeXnUrY/T50Zvd7zUFF+1WG7Is9KUlLA1
|
||||
ceQzJcixurQtuUSkwj2PfVziiufkHk43tuzDQ57carUX6kg2OwAD
|
||||
-----END RSA PRIVATE KEY-----
|
||||
|
|
@ -1,22 +0,0 @@
|
|||
-----BEGIN CERTIFICATE-----
|
||||
MIIDpTCCAo2gAwIBAgIUTAjabskCLxIqbh2E4MnYIsivipswDQYJKoZIhvcNAQEL
|
||||
BQAwKzESMBAGA1UECxMJRGFwciBUZXN0MRUwEwYDVQQDEwxEYXByIFRlc3QgQ0Ew
|
||||
HhcNMjExMjA0MTkwMjAwWhcNMjIxMjA0MTkwMjAwWjAjMRIwEAYDVQQKEwlEYXBy
|
||||
IFRlc3QxDTALBgNVBAMTBGRhcHIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
|
||||
AoIBAQC5rlhpdzY2RuRRRKevotZnLUx/dh2wLvCMluSxKFJYvC7DXK3cHZh1+6Wo
|
||||
cdlsEYY3ZQ7Pt/N8DkV7ODqSvFyhJu+1fCY3elMfZcxSw24UJ2aXzlx5RbNhLAI0
|
||||
E804ugAp3qss4ygCwQ4U2jMGXqeVpi7gyGsYybEUOMSorx5OBgiJAKkaATNMBqdp
|
||||
MX2FKzBU3owpAcuXhIGSdKblYQuZJmAfITnaJFO4ffLyn9m4I9n/dDfZag/TCZBL
|
||||
27uIo79mZO99YfhMfdrifH3FkvE/14/JUPhwHAChoCbDol0/V/KDv0tp3vQbQH+7
|
||||
1dyrAWhszswSXQGgADYm8y74dlQpAgMBAAGjgcgwgcUwDgYDVR0PAQH/BAQDAgWg
|
||||
MB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAMBgNVHRMBAf8EAjAAMB0G
|
||||
A1UdDgQWBBQ4eToXZz4AH4YbuW23vy99T8d8OTAfBgNVHSMEGDAWgBR+l/nJVNA+
|
||||
PUmfXs1kYJbBfN4JbzBGBgNVHREEPzA9ggRkYXBygglsb2NhbGhvc3SCB2thZmth
|
||||
LTGCB2thZmthLTKCB2thZmthLTOCD2thZmFrLWJvb3RzdHJhcDANBgkqhkiG9w0B
|
||||
AQsFAAOCAQEAAapIJIdQhGF2qz/N4i/nIwJHGxUapgtVrydC8kw7DeuQi2usG62Y
|
||||
hGNnBAoJCR0auSQ2P3SWEO19o1doZjFroqFkNIXdTT+aHxLg0k89H203oeMSI43x
|
||||
xTlmJCjBNw4zQD9jC1c6u/W6WBwN2SJGBZrdmA95KQrz+gan9nh6ecPYeGF89io2
|
||||
G20dRE2cGwbt7LAImK87M8LXbw/Of28gYMh3L14CNy6oma3izMix9xhUhDVACnVy
|
||||
TaltjNIiAlFP2g4GIsPSYTMAOeIzIU/LxKlxg8mLg1bTPwb5IZK1wFwPBY5rnNqx
|
||||
OrycW7rZKfrg2eZml8FnYlzO64u41oC47A==
|
||||
-----END CERTIFICATE-----
|
||||
|
|
@ -1 +0,0 @@
|
|||
kafka.crt
|
||||
|
|
@ -1 +0,0 @@
|
|||
kafka.key
|
||||
|
|
@ -1 +0,0 @@
|
|||
kafka.p12
|
||||
|
|
@ -1 +0,0 @@
|
|||
kafka.password
|
||||
|
|
@ -1 +0,0 @@
|
|||
kafka.crt
|
||||
|
|
@ -1 +0,0 @@
|
|||
kafka.key
|
||||
|
|
@ -1 +0,0 @@
|
|||
kafka.password
|
||||
|
|
@ -1 +0,0 @@
|
|||
kafka.crt
|
||||
|
|
@ -1 +0,0 @@
|
|||
kafka.key
|
||||
|
|
@ -1 +0,0 @@
|
|||
kafka.p12
|
||||
|
|
@ -1 +0,0 @@
|
|||
kafka.password
|
||||
|
|
@ -1,22 +0,0 @@
|
|||
-----BEGIN CERTIFICATE-----
|
||||
MIIDpTCCAo2gAwIBAgIUTAjabskCLxIqbh2E4MnYIsivipswDQYJKoZIhvcNAQEL
|
||||
BQAwKzESMBAGA1UECxMJRGFwciBUZXN0MRUwEwYDVQQDEwxEYXByIFRlc3QgQ0Ew
|
||||
HhcNMjExMjA0MTkwMjAwWhcNMjIxMjA0MTkwMjAwWjAjMRIwEAYDVQQKEwlEYXBy
|
||||
IFRlc3QxDTALBgNVBAMTBGRhcHIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
|
||||
AoIBAQC5rlhpdzY2RuRRRKevotZnLUx/dh2wLvCMluSxKFJYvC7DXK3cHZh1+6Wo
|
||||
cdlsEYY3ZQ7Pt/N8DkV7ODqSvFyhJu+1fCY3elMfZcxSw24UJ2aXzlx5RbNhLAI0
|
||||
E804ugAp3qss4ygCwQ4U2jMGXqeVpi7gyGsYybEUOMSorx5OBgiJAKkaATNMBqdp
|
||||
MX2FKzBU3owpAcuXhIGSdKblYQuZJmAfITnaJFO4ffLyn9m4I9n/dDfZag/TCZBL
|
||||
27uIo79mZO99YfhMfdrifH3FkvE/14/JUPhwHAChoCbDol0/V/KDv0tp3vQbQH+7
|
||||
1dyrAWhszswSXQGgADYm8y74dlQpAgMBAAGjgcgwgcUwDgYDVR0PAQH/BAQDAgWg
|
||||
MB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAMBgNVHRMBAf8EAjAAMB0G
|
||||
A1UdDgQWBBQ4eToXZz4AH4YbuW23vy99T8d8OTAfBgNVHSMEGDAWgBR+l/nJVNA+
|
||||
PUmfXs1kYJbBfN4JbzBGBgNVHREEPzA9ggRkYXBygglsb2NhbGhvc3SCB2thZmth
|
||||
LTGCB2thZmthLTKCB2thZmthLTOCD2thZmFrLWJvb3RzdHJhcDANBgkqhkiG9w0B
|
||||
AQsFAAOCAQEAAapIJIdQhGF2qz/N4i/nIwJHGxUapgtVrydC8kw7DeuQi2usG62Y
|
||||
hGNnBAoJCR0auSQ2P3SWEO19o1doZjFroqFkNIXdTT+aHxLg0k89H203oeMSI43x
|
||||
xTlmJCjBNw4zQD9jC1c6u/W6WBwN2SJGBZrdmA95KQrz+gan9nh6ecPYeGF89io2
|
||||
G20dRE2cGwbt7LAImK87M8LXbw/Of28gYMh3L14CNy6oma3izMix9xhUhDVACnVy
|
||||
TaltjNIiAlFP2g4GIsPSYTMAOeIzIU/LxKlxg8mLg1bTPwb5IZK1wFwPBY5rnNqx
|
||||
OrycW7rZKfrg2eZml8FnYlzO64u41oC47A==
|
||||
-----END CERTIFICATE-----
|
||||
|
|
@ -1,27 +0,0 @@
|
|||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEogIBAAKCAQEAua5YaXc2NkbkUUSnr6LWZy1Mf3YdsC7wjJbksShSWLwuw1yt
|
||||
3B2YdfulqHHZbBGGN2UOz7fzfA5Fezg6krxcoSbvtXwmN3pTH2XMUsNuFCdml85c
|
||||
eUWzYSwCNBPNOLoAKd6rLOMoAsEOFNozBl6nlaYu4MhrGMmxFDjEqK8eTgYIiQCp
|
||||
GgEzTAanaTF9hSswVN6MKQHLl4SBknSm5WELmSZgHyE52iRTuH3y8p/ZuCPZ/3Q3
|
||||
2WoP0wmQS9u7iKO/ZmTvfWH4TH3a4nx9xZLxP9ePyVD4cBwAoaAmw6JdP1fyg79L
|
||||
ad70G0B/u9XcqwFobM7MEl0BoAA2JvMu+HZUKQIDAQABAoIBACZz2JNewLdUzwuV
|
||||
cDSLQGN1mhX7XAKUdfRne0zE0OjXb8e9dbPT3TLxvki36xLaPjVSlFKoAaB7RCBU
|
||||
cKzanUQyUAoBf9iVWIl0B3BMUIuT7Uca0UO8D33cI0itoR5SRp5lIoXVNP/9AvGG
|
||||
jnKPP51aIPMkDim/+w/5AaD9QwVdGC2BWNn8bFykz/DfIB0tiVTec8/pWaP7vHGM
|
||||
FriQbL07Yrj3BE0ndp5cL52ZbH9OmQ/hXUHCk6vCuV/yrqljeLPGbEYkpmhm/fMO
|
||||
Fa3pX6wR+QgZ5lta870jK52bexyoGWgsMcTTl8+7q4DYM2YREEKicAlbOh92bdm4
|
||||
tnjIiVECgYEA1btWqCtxWat5tzXeYAowYs/uia/ANbmg+SGqIeVqGn4EyLIBYnmZ
|
||||
jexfWliLj7Nk802fbNIO9sStMt6q7vvRbYR2ZHFPU0Th9m/XVPdJKJ9qpMkSWdY3
|
||||
P7VlQuYHSZvU1ny/QtDc8dGoaxluiaJsIBde0UUcwOo/tA66OnP2n7cCgYEA3mbf
|
||||
hz6W+ThofDPyJN5kFTnx4g+uNA8hnqyJeh9xcnh1t/A5BH4faZBPhokoskahUWis
|
||||
yI4v6e552CHkF9jo6k397xUb/W/HO0BlKhapf8prdrG4zSE5pr140eTdr10h95SD
|
||||
Wr4twfEaBNsSXRnaMxAMaVbPKfLuW0+N1Qbk6x8CgYA8EZnKS+Ngk0vzDOXB0jtF
|
||||
GjFtawK3VsOCIU8ClcqbRX2stjKjbY+VjrBB4Q7gRUgDBXbgC61+90nCOUiLQCTd
|
||||
BdSMaDgmK/7h1w8K5zEdhKhhRc2tiAIhGqcqBSJZMr2/xnGuoqrmH8mYyB4D+q0u
|
||||
28KfSDBLm8ppnZYDZaITwwKBgDv76xYDH50gRa4aJJklEkFXW5HpQMbxvdOaHYo+
|
||||
qM6DBt0RgY9gpQBH1+slW0CaJDBc1x1QnEOv+lT87xQvgMKRPogZXW9Bkq68c4yi
|
||||
iBzbb5iX3owVBgOe3tNdsxz1NZAdEkCLQrQoXygoHg/WRS+4iGBw9XcO+pLOJibq
|
||||
sRtpAoGARUL0cfedOtIgGOQTNzfHqQZsRbLEKx64FI6Q8g1womr7lWWXy6RX4BZv
|
||||
vm41g/PkdiES9ZfaNihRHcEhaNuA26OhiCbXe/FRcyZRX9TeCkuyQgNn9nssPIgR
|
||||
edWdnN8kZKQ7ReZwMlw2UpXenAwlVoQQbHw9zpkcD2Exmp/TLAk=
|
||||
-----END RSA PRIVATE KEY-----
|
||||
Binary file not shown.
|
|
@ -1 +0,0 @@
|
|||
dapr-test
|
||||
|
|
@ -1 +0,0 @@
|
|||
kafka.p12
|
||||
|
|
@ -1,19 +0,0 @@
|
|||
-----BEGIN CERTIFICATE-----
|
||||
MIIDJjCCAg6gAwIBAgIUJPqvjfNx6kMf7mE5FtW81+X8HekwDQYJKoZIhvcNAQEL
|
||||
BQAwKzESMBAGA1UECxMJRGFwciBUZXN0MRUwEwYDVQQDEwxEYXByIFRlc3QgQ0Ew
|
||||
HhcNMjExMjA0MTYyNzAwWhcNMjYxMjAzMTYyNzAwWjArMRIwEAYDVQQLEwlEYXBy
|
||||
IFRlc3QxFTATBgNVBAMTDERhcHIgVGVzdCBDQTCCASIwDQYJKoZIhvcNAQEBBQAD
|
||||
ggEPADCCAQoCggEBAMPLpsfCUdYf+7RAY7mktcj4/qJJyNroHxS8ChwSeJ0M/dLk
|
||||
I6G4kyty3TGvzmrdxkr2DW2B+ZmrZFzSVQg+kNESMhEWLJt4MtyGMNuDZcwV5kJL
|
||||
NPltLYmov2z8hyD2v6agZNyiWM0k2p/dl+Ikp4DJmd08PSd+nhc5Wj9X33gsEAoK
|
||||
jKptl+XGGvSlC3tIbHmBhRsP42QlLjqk5PWxINbMDePHOiYFmau3VRrbPweKTFuF
|
||||
bY0Y0w8t1qOFX55hU7LkMEXjLmuUfFUEZvn3NUTvH80gKDioiJTC7NBRE6sCYAlm
|
||||
b4Vvix3p9Y/yNKbMA5J3chaZdTZfVqAXplZY3jMCAwEAAaNCMEAwDgYDVR0PAQH/
|
||||
BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFH6X+clU0D49SZ9ezWRg
|
||||
lsF83glvMA0GCSqGSIb3DQEBCwUAA4IBAQAjeaFNxIhWZkDYiwsVP5R2JqFifZbq
|
||||
A/m9YJypRwA+rUeBLFGuIh4QPFf2fZlskJYmFaDB3aplQGoSIzB1HCC0OAhJM5Ec
|
||||
z6gm+bhqDfCaWz1HfmpvvQes1l/mUzYx5GfiX202W87CMKMQ+5WSg1IsCPFwYN2w
|
||||
nZkGKYkh9D9TzIFMfi2b1G+O+BuUUyOAXvT8zcJ17GexRHHdc1Gq+1PgDPDL1Sug
|
||||
rLHmo+dDTZhIV5D14wvxsNHTTr5tt0aaQw3fJqo6P2HE2dBiqadSYnlwS7BQ9Jxc
|
||||
MlmFggFubM9/QGQ/hGQYmTp+LSlM5ndaVA80o7+SOQZ2aliuH0fQN3ST
|
||||
-----END CERTIFICATE-----
|
||||
Binary file not shown.
|
|
@ -1 +0,0 @@
|
|||
dapr-test
|
||||
|
|
@ -1,19 +0,0 @@
|
|||
-----BEGIN CERTIFICATE-----
|
||||
MIIDJjCCAg6gAwIBAgIUJPqvjfNx6kMf7mE5FtW81+X8HekwDQYJKoZIhvcNAQEL
|
||||
BQAwKzESMBAGA1UECxMJRGFwciBUZXN0MRUwEwYDVQQDEwxEYXByIFRlc3QgQ0Ew
|
||||
HhcNMjExMjA0MTYyNzAwWhcNMjYxMjAzMTYyNzAwWjArMRIwEAYDVQQLEwlEYXBy
|
||||
IFRlc3QxFTATBgNVBAMTDERhcHIgVGVzdCBDQTCCASIwDQYJKoZIhvcNAQEBBQAD
|
||||
ggEPADCCAQoCggEBAMPLpsfCUdYf+7RAY7mktcj4/qJJyNroHxS8ChwSeJ0M/dLk
|
||||
I6G4kyty3TGvzmrdxkr2DW2B+ZmrZFzSVQg+kNESMhEWLJt4MtyGMNuDZcwV5kJL
|
||||
NPltLYmov2z8hyD2v6agZNyiWM0k2p/dl+Ikp4DJmd08PSd+nhc5Wj9X33gsEAoK
|
||||
jKptl+XGGvSlC3tIbHmBhRsP42QlLjqk5PWxINbMDePHOiYFmau3VRrbPweKTFuF
|
||||
bY0Y0w8t1qOFX55hU7LkMEXjLmuUfFUEZvn3NUTvH80gKDioiJTC7NBRE6sCYAlm
|
||||
b4Vvix3p9Y/yNKbMA5J3chaZdTZfVqAXplZY3jMCAwEAAaNCMEAwDgYDVR0PAQH/
|
||||
BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFH6X+clU0D49SZ9ezWRg
|
||||
lsF83glvMA0GCSqGSIb3DQEBCwUAA4IBAQAjeaFNxIhWZkDYiwsVP5R2JqFifZbq
|
||||
A/m9YJypRwA+rUeBLFGuIh4QPFf2fZlskJYmFaDB3aplQGoSIzB1HCC0OAhJM5Ec
|
||||
z6gm+bhqDfCaWz1HfmpvvQes1l/mUzYx5GfiX202W87CMKMQ+5WSg1IsCPFwYN2w
|
||||
nZkGKYkh9D9TzIFMfi2b1G+O+BuUUyOAXvT8zcJ17GexRHHdc1Gq+1PgDPDL1Sug
|
||||
rLHmo+dDTZhIV5D14wvxsNHTTr5tt0aaQw3fJqo6P2HE2dBiqadSYnlwS7BQ9Jxc
|
||||
MlmFggFubM9/QGQ/hGQYmTp+LSlM5ndaVA80o7+SOQZ2aliuH0fQN3ST
|
||||
-----END CERTIFICATE-----
|
||||
Binary file not shown.
|
|
@ -1 +0,0 @@
|
|||
dapr-test
|
||||
|
|
@ -1 +0,0 @@
|
|||
PLAIN_9092_1://localhost MTLS_9094_1://localhost OAUTH_9093_1://localhost
|
||||
|
|
@ -1 +0,0 @@
|
|||
PLAIN_9092_1://19092 MTLS_9094_1://19094 OAUTH_9093_1://19093
|
||||
|
|
@ -1 +0,0 @@
|
|||
PLAIN_9092 MTLS_9094 OAUTH_9093
|
||||
|
|
@ -1,18 +0,0 @@
|
|||
log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender
|
||||
log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} %p %m (%c) [%t]%n
|
||||
kafka.root.logger.level=DEBUG
|
||||
log4j.rootLogger=${kafka.root.logger.level}, CONSOLE
|
||||
log4j.logger.org.I0Itec.zkclient.ZkClient=INFO
|
||||
log4j.logger.org.apache.zookeeper=INFO
|
||||
log4j.logger.kafka=INFO
|
||||
log4j.logger.org.apache.kafka=INFO
|
||||
log4j.logger.kafka.request.logger=WARN, CONSOLE
|
||||
log4j.logger.kafka.network.Processor=OFF
|
||||
log4j.logger.kafka.server.KafkaApis=OFF
|
||||
log4j.logger.kafka.network.RequestChannel$=WARN
|
||||
log4j.logger.kafka.controller=TRACE
|
||||
log4j.logger.kafka.log.LogCleaner=INFO
|
||||
log4j.logger.state.change.logger=TRACE
|
||||
log4j.logger.kafka.authorizer.logger=INFO
|
||||
|
||||
|
|
@ -1,101 +0,0 @@
|
|||
##############################
|
||||
##############################
|
||||
# This file is automatically generated by the Strimzi Cluster Operator
|
||||
# Any changes to this file will be ignored and overwritten!
|
||||
##############################
|
||||
##############################
|
||||
|
||||
##########
|
||||
# Broker ID
|
||||
##########
|
||||
broker.id=${STRIMZI_BROKER_ID}
|
||||
|
||||
##########
|
||||
# Zookeeper
|
||||
##########
|
||||
zookeeper.connect=zookeeper:2181
|
||||
zookeeper.clientCnxnSocket=org.apache.zookeeper.ClientCnxnSocketNetty
|
||||
zookeeper.ssl.client.enable=false
|
||||
|
||||
##########
|
||||
# Kafka message logs configuration
|
||||
##########
|
||||
log.dirs=/var/lib/kafka/data/kafka-log${STRIMZI_BROKER_ID}
|
||||
|
||||
##########
|
||||
# Control Plane listener
|
||||
##########
|
||||
listener.name.controlplane-9090.ssl.keystore.location=/tmp/kafka/cluster.keystore.p12
|
||||
listener.name.controlplane-9090.ssl.keystore.password=${CERTS_STORE_PASSWORD}
|
||||
listener.name.controlplane-9090.ssl.keystore.type=PKCS12
|
||||
listener.name.controlplane-9090.ssl.truststore.location=/tmp/kafka/cluster.truststore.p12
|
||||
listener.name.controlplane-9090.ssl.truststore.password=${CERTS_STORE_PASSWORD}
|
||||
listener.name.controlplane-9090.ssl.truststore.type=PKCS12
|
||||
listener.name.controlplane-9090.ssl.client.auth=required
|
||||
|
||||
##########
|
||||
# Replication listener
|
||||
##########
|
||||
listener.name.replication-9091.ssl.keystore.location=/tmp/kafka/cluster.keystore.p12
|
||||
listener.name.replication-9091.ssl.keystore.password=${CERTS_STORE_PASSWORD}
|
||||
listener.name.replication-9091.ssl.keystore.type=PKCS12
|
||||
listener.name.replication-9091.ssl.truststore.location=/tmp/kafka/cluster.truststore.p12
|
||||
listener.name.replication-9091.ssl.truststore.password=${CERTS_STORE_PASSWORD}
|
||||
listener.name.replication-9091.ssl.truststore.type=PKCS12
|
||||
listener.name.replication-9091.ssl.client.auth=required
|
||||
|
||||
##########
|
||||
# Listener configuration: MTLS-9094
|
||||
##########
|
||||
listener.name.mtls-9094.ssl.client.auth=required
|
||||
listener.name.mtls-9094.ssl.truststore.location=/tmp/kafka/clients.truststore.p12
|
||||
listener.name.mtls-9094.ssl.truststore.password=${CERTS_STORE_PASSWORD}
|
||||
listener.name.mtls-9094.ssl.truststore.type=PKCS12
|
||||
|
||||
listener.name.mtls-9094.ssl.keystore.location=/tmp/kafka/cluster.keystore.p12
|
||||
listener.name.mtls-9094.ssl.keystore.password=${CERTS_STORE_PASSWORD}
|
||||
listener.name.mtls-9094.ssl.keystore.type=PKCS12
|
||||
|
||||
|
||||
##########
|
||||
# Listener configuration: OAUTH-9093
|
||||
##########
|
||||
listener.name.oauth-9093.oauthbearer.sasl.server.callback.handler.class=io.strimzi.kafka.oauth.server.JaasServerOauthValidatorCallbackHandler
|
||||
listener.name.oauth-9093.oauthbearer.sasl.jaas.config=org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule required unsecuredLoginStringClaim_sub="admin" oauth.client.id="kafka" oauth.valid.issuer.uri="https://hydra:4443/" oauth.introspection.endpoint.uri="https://hydra:4444/oauth2/introspect" oauth.username.claim="sub" oauth.client.secret="dapr-test" oauth.ssl.truststore.location="/tmp/kafka/oauth-oauth-9093.truststore.p12" oauth.ssl.truststore.password="${CERTS_STORE_PASSWORD}" oauth.ssl.truststore.type="PKCS12";
|
||||
listener.name.oauth-9093.sasl.enabled.mechanisms=OAUTHBEARER
|
||||
listener.name.oauth-9093.connections.max.reauth.ms=1800000
|
||||
|
||||
listener.name.oauth-9093.ssl.keystore.location=/tmp/kafka/custom-oauth-9093.keystore.p12
|
||||
listener.name.oauth-9093.ssl.keystore.password=${CERTS_STORE_PASSWORD}
|
||||
listener.name.oauth-9093.ssl.keystore.type=PKCS12
|
||||
|
||||
|
||||
principal.builder.class=io.strimzi.kafka.oauth.server.OAuthKafkaPrincipalBuilder
|
||||
|
||||
##########
|
||||
# Common listener configuration
|
||||
##########
|
||||
listeners=CONTROLPLANE-9090://0.0.0.0:9090,REPLICATION-9091://0.0.0.0:9091,MTLS-9094://0.0.0.0:19094,OAUTH-9093://0.0.0.0:19093,PLAIN-9092://0.0.0.0:19092
|
||||
advertised.listeners=CONTROLPLANE-9090://kafka-1:9090,REPLICATION-9091://kafka-1:9091,MTLS-9094://${STRIMZI_MTLS_9094_ADVERTISED_HOSTNAME}:${STRIMZI_MTLS_9094_ADVERTISED_PORT},OAUTH-9093://${STRIMZI_OAUTH_9093_ADVERTISED_HOSTNAME}:${STRIMZI_OAUTH_9093_ADVERTISED_PORT},PLAIN-9092://${STRIMZI_PLAIN_9092_ADVERTISED_HOSTNAME}:${STRIMZI_PLAIN_9092_ADVERTISED_PORT}
|
||||
listener.security.protocol.map=CONTROLPLANE-9090:SSL,REPLICATION-9091:SSL,MTLS-9094:SSL,OAUTH-9093:SASL_SSL,PLAIN-9092:PLAINTEXT
|
||||
inter.broker.listener.name=REPLICATION-9091
|
||||
sasl.enabled.mechanisms=
|
||||
ssl.secure.random.implementation=SHA1PRNG
|
||||
ssl.endpoint.identification.algorithm=HTTPS
|
||||
|
||||
##########
|
||||
# Authorization
|
||||
##########
|
||||
allow.everyone.if.no.acl.found=true
|
||||
authorizer.class.name=kafka.security.authorizer.AclAuthorizer
|
||||
super.users=User:CN=dapr,O=Dapr Test
|
||||
|
||||
##########
|
||||
# User provided configuration
|
||||
##########
|
||||
num.partitions=10
|
||||
auto.create.topics.enable=true
|
||||
group.initial.rebalance.delay.ms=0
|
||||
offsets.topic.replication.factor=3
|
||||
inter.broker.protocol.version=3.0
|
||||
log.message.format.version=3.0
|
||||
|
|
@ -1 +0,0 @@
|
|||
PLAIN_9092_2://localhost MTLS_9094_2://localhost OAUTH_9093_2://localhost
|
||||
|
|
@ -1 +0,0 @@
|
|||
PLAIN_9092_2://29092 MTLS_9094_2://29094 OAUTH_9093_2://29093
|
||||
|
|
@ -1 +0,0 @@
|
|||
PLAIN_9092 MTLS_9094 OAUTH_9093
|
||||
|
|
@ -1,18 +0,0 @@
|
|||
log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender
|
||||
log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} %p %m (%c) [%t]%n
|
||||
kafka.root.logger.level=INFO
|
||||
log4j.rootLogger=${kafka.root.logger.level}, CONSOLE
|
||||
log4j.logger.org.I0Itec.zkclient.ZkClient=INFO
|
||||
log4j.logger.org.apache.zookeeper=INFO
|
||||
log4j.logger.kafka=INFO
|
||||
log4j.logger.org.apache.kafka=INFO
|
||||
log4j.logger.kafka.request.logger=WARN, CONSOLE
|
||||
log4j.logger.kafka.network.Processor=OFF
|
||||
log4j.logger.kafka.server.KafkaApis=OFF
|
||||
log4j.logger.kafka.network.RequestChannel$=WARN
|
||||
log4j.logger.kafka.controller=TRACE
|
||||
log4j.logger.kafka.log.LogCleaner=INFO
|
||||
log4j.logger.state.change.logger=TRACE
|
||||
log4j.logger.kafka.authorizer.logger=INFO
|
||||
|
||||
|
|
@ -1,101 +0,0 @@
|
|||
##############################
|
||||
##############################
|
||||
# This file is automatically generated by the Strimzi Cluster Operator
|
||||
# Any changes to this file will be ignored and overwritten!
|
||||
##############################
|
||||
##############################
|
||||
|
||||
##########
|
||||
# Broker ID
|
||||
##########
|
||||
broker.id=${STRIMZI_BROKER_ID}
|
||||
|
||||
##########
|
||||
# Zookeeper
|
||||
##########
|
||||
zookeeper.connect=zookeeper:2181
|
||||
zookeeper.clientCnxnSocket=org.apache.zookeeper.ClientCnxnSocketNetty
|
||||
zookeeper.ssl.client.enable=false
|
||||
|
||||
##########
|
||||
# Kafka message logs configuration
|
||||
##########
|
||||
log.dirs=/var/lib/kafka/data/kafka-log${STRIMZI_BROKER_ID}
|
||||
|
||||
##########
|
||||
# Control Plane listener
|
||||
##########
|
||||
listener.name.controlplane-9090.ssl.keystore.location=/tmp/kafka/cluster.keystore.p12
|
||||
listener.name.controlplane-9090.ssl.keystore.password=${CERTS_STORE_PASSWORD}
|
||||
listener.name.controlplane-9090.ssl.keystore.type=PKCS12
|
||||
listener.name.controlplane-9090.ssl.truststore.location=/tmp/kafka/cluster.truststore.p12
|
||||
listener.name.controlplane-9090.ssl.truststore.password=${CERTS_STORE_PASSWORD}
|
||||
listener.name.controlplane-9090.ssl.truststore.type=PKCS12
|
||||
listener.name.controlplane-9090.ssl.client.auth=required
|
||||
|
||||
##########
|
||||
# Replication listener
|
||||
##########
|
||||
listener.name.replication-9091.ssl.keystore.location=/tmp/kafka/cluster.keystore.p12
|
||||
listener.name.replication-9091.ssl.keystore.password=${CERTS_STORE_PASSWORD}
|
||||
listener.name.replication-9091.ssl.keystore.type=PKCS12
|
||||
listener.name.replication-9091.ssl.truststore.location=/tmp/kafka/cluster.truststore.p12
|
||||
listener.name.replication-9091.ssl.truststore.password=${CERTS_STORE_PASSWORD}
|
||||
listener.name.replication-9091.ssl.truststore.type=PKCS12
|
||||
listener.name.replication-9091.ssl.client.auth=required
|
||||
|
||||
##########
|
||||
# Listener configuration: MTLS-9094
|
||||
##########
|
||||
listener.name.mtls-9094.ssl.client.auth=required
|
||||
listener.name.mtls-9094.ssl.truststore.location=/tmp/kafka/clients.truststore.p12
|
||||
listener.name.mtls-9094.ssl.truststore.password=${CERTS_STORE_PASSWORD}
|
||||
listener.name.mtls-9094.ssl.truststore.type=PKCS12
|
||||
|
||||
listener.name.mtls-9094.ssl.keystore.location=/tmp/kafka/cluster.keystore.p12
|
||||
listener.name.mtls-9094.ssl.keystore.password=${CERTS_STORE_PASSWORD}
|
||||
listener.name.mtls-9094.ssl.keystore.type=PKCS12
|
||||
|
||||
|
||||
##########
|
||||
# Listener configuration: OAUTH-9093
|
||||
##########
|
||||
listener.name.oauth-9093.oauthbearer.sasl.server.callback.handler.class=io.strimzi.kafka.oauth.server.JaasServerOauthValidatorCallbackHandler
|
||||
listener.name.oauth-9093.oauthbearer.sasl.jaas.config=org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule required unsecuredLoginStringClaim_sub="admin" oauth.client.id="kafka" oauth.valid.issuer.uri="https://hydra:4443/" oauth.introspection.endpoint.uri="https://hydra:4444/oauth2/introspect" oauth.username.claim="sub" oauth.client.secret="dapr-test" oauth.ssl.truststore.location="/tmp/kafka/oauth-oauth-9093.truststore.p12" oauth.ssl.truststore.password="${CERTS_STORE_PASSWORD}" oauth.ssl.truststore.type="PKCS12";
|
||||
listener.name.oauth-9093.sasl.enabled.mechanisms=OAUTHBEARER
|
||||
listener.name.oauth-9093.connections.max.reauth.ms=1800000
|
||||
|
||||
listener.name.oauth-9093.ssl.keystore.location=/tmp/kafka/custom-oauth-9093.keystore.p12
|
||||
listener.name.oauth-9093.ssl.keystore.password=${CERTS_STORE_PASSWORD}
|
||||
listener.name.oauth-9093.ssl.keystore.type=PKCS12
|
||||
|
||||
|
||||
principal.builder.class=io.strimzi.kafka.oauth.server.OAuthKafkaPrincipalBuilder
|
||||
|
||||
##########
|
||||
# Common listener configuration
|
||||
##########
|
||||
listeners=CONTROLPLANE-9090://0.0.0.0:9090,REPLICATION-9091://0.0.0.0:9091,MTLS-9094://0.0.0.0:29094,OAUTH-9093://0.0.0.0:29093,PLAIN-9092://0.0.0.0:29092
|
||||
advertised.listeners=CONTROLPLANE-9090://kafka-2:9090,REPLICATION-9091://kafka-2:9091,MTLS-9094://${STRIMZI_MTLS_9094_ADVERTISED_HOSTNAME}:${STRIMZI_MTLS_9094_ADVERTISED_PORT},OAUTH-9093://${STRIMZI_OAUTH_9093_ADVERTISED_HOSTNAME}:${STRIMZI_OAUTH_9093_ADVERTISED_PORT},PLAIN-9092://${STRIMZI_PLAIN_9092_ADVERTISED_HOSTNAME}:${STRIMZI_PLAIN_9092_ADVERTISED_PORT}
|
||||
listener.security.protocol.map=CONTROLPLANE-9090:SSL,REPLICATION-9091:SSL,MTLS-9094:SSL,OAUTH-9093:SASL_SSL,PLAIN-9092:PLAINTEXT
|
||||
inter.broker.listener.name=REPLICATION-9091
|
||||
sasl.enabled.mechanisms=
|
||||
ssl.secure.random.implementation=SHA1PRNG
|
||||
ssl.endpoint.identification.algorithm=HTTPS
|
||||
|
||||
##########
|
||||
# Authorization
|
||||
##########
|
||||
allow.everyone.if.no.acl.found=true
|
||||
authorizer.class.name=kafka.security.authorizer.AclAuthorizer
|
||||
super.users=User:CN=dapr,O=Dapr Test
|
||||
|
||||
##########
|
||||
# User provided configuration
|
||||
##########
|
||||
num.partitions=10
|
||||
auto.create.topics.enable=true
|
||||
group.initial.rebalance.delay.ms=0
|
||||
offsets.topic.replication.factor=3
|
||||
inter.broker.protocol.version=3.0
|
||||
log.message.format.version=3.0
|
||||
|
|
@ -1 +0,0 @@
|
|||
PLAIN_9092_3://localhost MTLS_9094_3://localhost OAUTH_9093_3://localhost
|
||||
|
|
@ -1 +0,0 @@
|
|||
PLAIN_9092_3://39092 MTLS_9094_3://39094 OAUTH_9093_3://39093
|
||||
|
|
@ -1 +0,0 @@
|
|||
PLAIN_9092 MTLS_9094 OAUTH_9093
|
||||
|
|
@ -1,18 +0,0 @@
|
|||
log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender
|
||||
log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} %p %m (%c) [%t]%n
|
||||
kafka.root.logger.level=INFO
|
||||
log4j.rootLogger=${kafka.root.logger.level}, CONSOLE
|
||||
log4j.logger.org.I0Itec.zkclient.ZkClient=INFO
|
||||
log4j.logger.org.apache.zookeeper=INFO
|
||||
log4j.logger.kafka=INFO
|
||||
log4j.logger.org.apache.kafka=INFO
|
||||
log4j.logger.kafka.request.logger=WARN, CONSOLE
|
||||
log4j.logger.kafka.network.Processor=OFF
|
||||
log4j.logger.kafka.server.KafkaApis=OFF
|
||||
log4j.logger.kafka.network.RequestChannel$=WARN
|
||||
log4j.logger.kafka.controller=TRACE
|
||||
log4j.logger.kafka.log.LogCleaner=INFO
|
||||
log4j.logger.state.change.logger=TRACE
|
||||
log4j.logger.kafka.authorizer.logger=INFO
|
||||
|
||||
|
|
@ -1,101 +0,0 @@
|
|||
##############################
|
||||
##############################
|
||||
# This file is automatically generated by the Strimzi Cluster Operator
|
||||
# Any changes to this file will be ignored and overwritten!
|
||||
##############################
|
||||
##############################
|
||||
|
||||
##########
|
||||
# Broker ID
|
||||
##########
|
||||
broker.id=${STRIMZI_BROKER_ID}
|
||||
|
||||
##########
|
||||
# Zookeeper
|
||||
##########
|
||||
zookeeper.connect=zookeeper:2181
|
||||
zookeeper.clientCnxnSocket=org.apache.zookeeper.ClientCnxnSocketNetty
|
||||
zookeeper.ssl.client.enable=false
|
||||
|
||||
##########
|
||||
# Kafka message logs configuration
|
||||
##########
|
||||
log.dirs=/var/lib/kafka/data/kafka-log${STRIMZI_BROKER_ID}
|
||||
|
||||
##########
|
||||
# Control Plane listener
|
||||
##########
|
||||
listener.name.controlplane-9090.ssl.keystore.location=/tmp/kafka/cluster.keystore.p12
|
||||
listener.name.controlplane-9090.ssl.keystore.password=${CERTS_STORE_PASSWORD}
|
||||
listener.name.controlplane-9090.ssl.keystore.type=PKCS12
|
||||
listener.name.controlplane-9090.ssl.truststore.location=/tmp/kafka/cluster.truststore.p12
|
||||
listener.name.controlplane-9090.ssl.truststore.password=${CERTS_STORE_PASSWORD}
|
||||
listener.name.controlplane-9090.ssl.truststore.type=PKCS12
|
||||
listener.name.controlplane-9090.ssl.client.auth=required
|
||||
|
||||
##########
|
||||
# Replication listener
|
||||
##########
|
||||
listener.name.replication-9091.ssl.keystore.location=/tmp/kafka/cluster.keystore.p12
|
||||
listener.name.replication-9091.ssl.keystore.password=${CERTS_STORE_PASSWORD}
|
||||
listener.name.replication-9091.ssl.keystore.type=PKCS12
|
||||
listener.name.replication-9091.ssl.truststore.location=/tmp/kafka/cluster.truststore.p12
|
||||
listener.name.replication-9091.ssl.truststore.password=${CERTS_STORE_PASSWORD}
|
||||
listener.name.replication-9091.ssl.truststore.type=PKCS12
|
||||
listener.name.replication-9091.ssl.client.auth=required
|
||||
|
||||
##########
|
||||
# Listener configuration: MTLS-9094
|
||||
##########
|
||||
listener.name.mtls-9094.ssl.client.auth=required
|
||||
listener.name.mtls-9094.ssl.truststore.location=/tmp/kafka/clients.truststore.p12
|
||||
listener.name.mtls-9094.ssl.truststore.password=${CERTS_STORE_PASSWORD}
|
||||
listener.name.mtls-9094.ssl.truststore.type=PKCS12
|
||||
|
||||
listener.name.mtls-9094.ssl.keystore.location=/tmp/kafka/cluster.keystore.p12
|
||||
listener.name.mtls-9094.ssl.keystore.password=${CERTS_STORE_PASSWORD}
|
||||
listener.name.mtls-9094.ssl.keystore.type=PKCS12
|
||||
|
||||
|
||||
##########
|
||||
# Listener configuration: OAUTH-9093
|
||||
##########
|
||||
listener.name.oauth-9093.oauthbearer.sasl.server.callback.handler.class=io.strimzi.kafka.oauth.server.JaasServerOauthValidatorCallbackHandler
|
||||
listener.name.oauth-9093.oauthbearer.sasl.jaas.config=org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule required unsecuredLoginStringClaim_sub="admin" oauth.client.id="kafka" oauth.valid.issuer.uri="https://hydra:4443/" oauth.introspection.endpoint.uri="https://hydra:4444/oauth2/introspect" oauth.username.claim="sub" oauth.client.secret="dapr-test" oauth.ssl.truststore.location="/tmp/kafka/oauth-oauth-9093.truststore.p12" oauth.ssl.truststore.password="${CERTS_STORE_PASSWORD}" oauth.ssl.truststore.type="PKCS12";
|
||||
listener.name.oauth-9093.sasl.enabled.mechanisms=OAUTHBEARER
|
||||
listener.name.oauth-9093.connections.max.reauth.ms=1800000
|
||||
|
||||
listener.name.oauth-9093.ssl.keystore.location=/tmp/kafka/custom-oauth-9093.keystore.p12
|
||||
listener.name.oauth-9093.ssl.keystore.password=${CERTS_STORE_PASSWORD}
|
||||
listener.name.oauth-9093.ssl.keystore.type=PKCS12
|
||||
|
||||
|
||||
principal.builder.class=io.strimzi.kafka.oauth.server.OAuthKafkaPrincipalBuilder
|
||||
|
||||
##########
|
||||
# Common listener configuration
|
||||
##########
|
||||
listeners=CONTROLPLANE-9090://0.0.0.0:9090,REPLICATION-9091://0.0.0.0:9091,MTLS-9094://0.0.0.0:39094,OAUTH-9093://0.0.0.0:39093,PLAIN-9092://0.0.0.0:39092
|
||||
advertised.listeners=CONTROLPLANE-9090://kafka-3:9090,REPLICATION-9091://kafka-3:9091,MTLS-9094://${STRIMZI_MTLS_9094_ADVERTISED_HOSTNAME}:${STRIMZI_MTLS_9094_ADVERTISED_PORT},OAUTH-9093://${STRIMZI_OAUTH_9093_ADVERTISED_HOSTNAME}:${STRIMZI_OAUTH_9093_ADVERTISED_PORT},PLAIN-9092://${STRIMZI_PLAIN_9092_ADVERTISED_HOSTNAME}:${STRIMZI_PLAIN_9092_ADVERTISED_PORT}
|
||||
listener.security.protocol.map=CONTROLPLANE-9090:SSL,REPLICATION-9091:SSL,MTLS-9094:SSL,OAUTH-9093:SASL_SSL,PLAIN-9092:PLAINTEXT
|
||||
inter.broker.listener.name=REPLICATION-9091
|
||||
sasl.enabled.mechanisms=
|
||||
ssl.secure.random.implementation=SHA1PRNG
|
||||
ssl.endpoint.identification.algorithm=HTTPS
|
||||
|
||||
##########
|
||||
# Authorization
|
||||
##########
|
||||
allow.everyone.if.no.acl.found=true
|
||||
authorizer.class.name=kafka.security.authorizer.AclAuthorizer
|
||||
super.users=User:CN=dapr,O=Dapr Test
|
||||
|
||||
##########
|
||||
# User provided configuration
|
||||
##########
|
||||
num.partitions=10
|
||||
auto.create.topics.enable=true
|
||||
group.initial.rebalance.delay.ms=0
|
||||
offsets.topic.replication.factor=3
|
||||
inter.broker.protocol.version=3.0
|
||||
log.message.format.version=3.0
|
||||
|
|
@ -1,19 +0,0 @@
|
|||
-----BEGIN CERTIFICATE-----
|
||||
MIIDJjCCAg6gAwIBAgIUJPqvjfNx6kMf7mE5FtW81+X8HekwDQYJKoZIhvcNAQEL
|
||||
BQAwKzESMBAGA1UECxMJRGFwciBUZXN0MRUwEwYDVQQDEwxEYXByIFRlc3QgQ0Ew
|
||||
HhcNMjExMjA0MTYyNzAwWhcNMjYxMjAzMTYyNzAwWjArMRIwEAYDVQQLEwlEYXBy
|
||||
IFRlc3QxFTATBgNVBAMTDERhcHIgVGVzdCBDQTCCASIwDQYJKoZIhvcNAQEBBQAD
|
||||
ggEPADCCAQoCggEBAMPLpsfCUdYf+7RAY7mktcj4/qJJyNroHxS8ChwSeJ0M/dLk
|
||||
I6G4kyty3TGvzmrdxkr2DW2B+ZmrZFzSVQg+kNESMhEWLJt4MtyGMNuDZcwV5kJL
|
||||
NPltLYmov2z8hyD2v6agZNyiWM0k2p/dl+Ikp4DJmd08PSd+nhc5Wj9X33gsEAoK
|
||||
jKptl+XGGvSlC3tIbHmBhRsP42QlLjqk5PWxINbMDePHOiYFmau3VRrbPweKTFuF
|
||||
bY0Y0w8t1qOFX55hU7LkMEXjLmuUfFUEZvn3NUTvH80gKDioiJTC7NBRE6sCYAlm
|
||||
b4Vvix3p9Y/yNKbMA5J3chaZdTZfVqAXplZY3jMCAwEAAaNCMEAwDgYDVR0PAQH/
|
||||
BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFH6X+clU0D49SZ9ezWRg
|
||||
lsF83glvMA0GCSqGSIb3DQEBCwUAA4IBAQAjeaFNxIhWZkDYiwsVP5R2JqFifZbq
|
||||
A/m9YJypRwA+rUeBLFGuIh4QPFf2fZlskJYmFaDB3aplQGoSIzB1HCC0OAhJM5Ec
|
||||
z6gm+bhqDfCaWz1HfmpvvQes1l/mUzYx5GfiX202W87CMKMQ+5WSg1IsCPFwYN2w
|
||||
nZkGKYkh9D9TzIFMfi2b1G+O+BuUUyOAXvT8zcJ17GexRHHdc1Gq+1PgDPDL1Sug
|
||||
rLHmo+dDTZhIV5D14wvxsNHTTr5tt0aaQw3fJqo6P2HE2dBiqadSYnlwS7BQ9Jxc
|
||||
MlmFggFubM9/QGQ/hGQYmTp+LSlM5ndaVA80o7+SOQZ2aliuH0fQN3ST
|
||||
-----END CERTIFICATE-----
|
||||
|
|
@ -1,19 +0,0 @@
|
|||
-----BEGIN CERTIFICATE-----
|
||||
MIIDJjCCAg6gAwIBAgIUJPqvjfNx6kMf7mE5FtW81+X8HekwDQYJKoZIhvcNAQEL
|
||||
BQAwKzESMBAGA1UECxMJRGFwciBUZXN0MRUwEwYDVQQDEwxEYXByIFRlc3QgQ0Ew
|
||||
HhcNMjExMjA0MTYyNzAwWhcNMjYxMjAzMTYyNzAwWjArMRIwEAYDVQQLEwlEYXBy
|
||||
IFRlc3QxFTATBgNVBAMTDERhcHIgVGVzdCBDQTCCASIwDQYJKoZIhvcNAQEBBQAD
|
||||
ggEPADCCAQoCggEBAMPLpsfCUdYf+7RAY7mktcj4/qJJyNroHxS8ChwSeJ0M/dLk
|
||||
I6G4kyty3TGvzmrdxkr2DW2B+ZmrZFzSVQg+kNESMhEWLJt4MtyGMNuDZcwV5kJL
|
||||
NPltLYmov2z8hyD2v6agZNyiWM0k2p/dl+Ikp4DJmd08PSd+nhc5Wj9X33gsEAoK
|
||||
jKptl+XGGvSlC3tIbHmBhRsP42QlLjqk5PWxINbMDePHOiYFmau3VRrbPweKTFuF
|
||||
bY0Y0w8t1qOFX55hU7LkMEXjLmuUfFUEZvn3NUTvH80gKDioiJTC7NBRE6sCYAlm
|
||||
b4Vvix3p9Y/yNKbMA5J3chaZdTZfVqAXplZY3jMCAwEAAaNCMEAwDgYDVR0PAQH/
|
||||
BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFH6X+clU0D49SZ9ezWRg
|
||||
lsF83glvMA0GCSqGSIb3DQEBCwUAA4IBAQAjeaFNxIhWZkDYiwsVP5R2JqFifZbq
|
||||
A/m9YJypRwA+rUeBLFGuIh4QPFf2fZlskJYmFaDB3aplQGoSIzB1HCC0OAhJM5Ec
|
||||
z6gm+bhqDfCaWz1HfmpvvQes1l/mUzYx5GfiX202W87CMKMQ+5WSg1IsCPFwYN2w
|
||||
nZkGKYkh9D9TzIFMfi2b1G+O+BuUUyOAXvT8zcJ17GexRHHdc1Gq+1PgDPDL1Sug
|
||||
rLHmo+dDTZhIV5D14wvxsNHTTr5tt0aaQw3fJqo6P2HE2dBiqadSYnlwS7BQ9Jxc
|
||||
MlmFggFubM9/QGQ/hGQYmTp+LSlM5ndaVA80o7+SOQZ2aliuH0fQN3ST
|
||||
-----END CERTIFICATE-----
|
||||
|
|
@ -1,22 +0,0 @@
|
|||
-----BEGIN CERTIFICATE-----
|
||||
MIIDmzCCAoOgAwIBAgIUbM8Fssal+HxhavPplrJ1o4Fk/6kwDQYJKoZIhvcNAQEL
|
||||
BQAwKzESMBAGA1UECxMJRGFwciBUZXN0MRUwEwYDVQQDEwxEYXByIFRlc3QgQ0Ew
|
||||
HhcNMjExMjA0MTYzMjAwWhcNMjIxMjA0MTYzMjAwWjAjMRIwEAYDVQQKEwlEYXBy
|
||||
IFRlc3QxDTALBgNVBAMTBGRhcHIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
|
||||
AoIBAQC7jpeZVmiNi1I91j6Z7Z5W8z3MCuquNjConG2NjxyT7klQYFMAMlQ0j5v9
|
||||
x5hUQ6ks4JTmCBaI/gPtjDJypCPwQKtr9QIECWjM1tSSOs/lu5p9Fqd30klcivF9
|
||||
fEpuyui6KpRVobGdg8bZ27Mh4yee1fI1DhAj5ME6Ti3sLmA5uxRYLLPollNICgUs
|
||||
QME2iJrm30rUmSqbKpB721ULcB7kLTn3PPqMDU3qmXLTTlioN3+hXuC0aSS5c/6f
|
||||
IwHQ/l2bLApCF9rLc+bkSFBBMOEZD/iomaE7JolHGUt7vEhObSxgnJ6ZH0C+k0Y/
|
||||
RLdG9cmmrdIP6SHy8UYX4O0UsHxPAgMBAAGjgb4wgbswDgYDVR0PAQH/BAQDAgWg
|
||||
MBMGA1UdJQQMMAoGCCsGAQUFBwMBMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFLpv
|
||||
SEQ21Za3JzU4rosvyiFHfM5VMB8GA1UdIwQYMBaAFH6X+clU0D49SZ9ezWRglsF8
|
||||
3glvMEYGA1UdEQQ/MD2CBGRhcHKCCWxvY2FsaG9zdIIHa2Fma2EtMYIHa2Fma2Et
|
||||
MoIHa2Fma2EtM4IPa2FmYWstYm9vdHN0cmFwMA0GCSqGSIb3DQEBCwUAA4IBAQC1
|
||||
pNBOCYLN0CA3VZO+Pz+b7XnAUAZFisgH2/eBQBPOU9b4ImCLIASBqAMEJ4uq//MW
|
||||
IiF9iu1YcncXPP/8rPnEsxKLBVcjWH48PneBcW3qb/9he+xXCnB7fqMUDkggzTeH
|
||||
4ouZyY5/GnlQYgmFNgOIyI4pydoD8GkJQh88LzEn/YAKi0aVkwBMJ2eb2Fiz05WW
|
||||
TKqWZKNnOjLPz5fIYNCR+uZtuqADhqItyaBa+X9NVIQ9cPcPMohZS4o+DtrCQevf
|
||||
6QZuQEYh3IIY8Smi4+2OKUE0Gy2AnEKaEdwxbrCKYhuF/sUrLm76LmIH75HQJxyM
|
||||
zE20cNgzX3yurenT3tbN
|
||||
-----END CERTIFICATE-----
|
||||
|
|
@ -1,27 +0,0 @@
|
|||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEpQIBAAKCAQEAu46XmVZojYtSPdY+me2eVvM9zArqrjYwqJxtjY8ck+5JUGBT
|
||||
ADJUNI+b/ceYVEOpLOCU5ggWiP4D7YwycqQj8ECra/UCBAlozNbUkjrP5buafRan
|
||||
d9JJXIrxfXxKbsrouiqUVaGxnYPG2duzIeMnntXyNQ4QI+TBOk4t7C5gObsUWCyz
|
||||
6JZTSAoFLEDBNoia5t9K1JkqmyqQe9tVC3Ae5C059zz6jA1N6ply005YqDd/oV7g
|
||||
tGkkuXP+nyMB0P5dmywKQhfay3Pm5EhQQTDhGQ/4qJmhOyaJRxlLe7xITm0sYJye
|
||||
mR9AvpNGP0S3RvXJpq3SD+kh8vFGF+DtFLB8TwIDAQABAoIBAEchmh8eZUKhPwyS
|
||||
r2VDeBSz5ZD35u8xQB0CTo4sY4M7EPT5wyDE9aiFenyx8PSsQIHznqTrFljYNXcm
|
||||
/47472RTsm+cGSqcwvFE3JOk2GnhT4L3T4Yi6428aD/WHoiMTd0k/uLHEwyRCJ5h
|
||||
Mzu74a/cpiI29ioWvK23LrVvFTFvOro3UgJvyK4HUS/bg5gnjqMnh87eIrLhbpNI
|
||||
0zuoRKcAVIeQWOQ2CvfRAMmijrlp+VLovIqjn/xspvFwCYPMA2ocfjxOC/n6F7/4
|
||||
jc8+/Q46xYO+3+1svU2cH0ptyxibk24Iqr+yTtMAx7gs4t7hIOYtRMAw4qCMxHyW
|
||||
/hpc3OECgYEA0hd2q2gnadEOIAcKlWrAV772yXyqHKybezbvZHW/U8jOULLZGnpS
|
||||
sddCxHE6x8nxmf3YnO3QTYYLTeY9XdlR9Z5Xydu0HzZeGJIxd3wSGZ2hTz4WgbVn
|
||||
86JpikQBISW2/6T3MKFDsxhbLmivBrVdjVV1/TRM+UG5YL3bb0/wyz8CgYEA5IqK
|
||||
AoJ+zaMGkt6HD4GM7XxSuhICbCITrV5Obkxh17tLguYuAME/WdGbIISrcS/B41KR
|
||||
YkJWHMuvGxjW3GF/chcith4k8VDb51Pov1TqvelVDywSOIROUfht6NhtPYajaIGj
|
||||
GAC5oYOoQpfH7m5ubmbYh1ueb+POfO3hKtIzWvECgYEAkUTwJW2Lczu+zJ6RzudV
|
||||
wFanRoMRDWq8x+IgfhJ9DW4YWuyP+iMC8z2pSTQSNPuKN7SzBy/ZjQFW57KAVFhk
|
||||
t7WZdlaYocxyHANaeQgta9D3LVf9MAtDqc9vss97CHSPqQ1kbxfTPA9nXRu9iqH1
|
||||
4jhpsX9sih3MFPyysrFQCvkCgYEAgTjUUBb5G8zSKrkoJNxbkux42jzUoc+i0KRC
|
||||
NJt7tz9vstPzrvmVmHOsAvcA+T7HooFNMwHPLvj8SZYB5xo5tYjfV5ozyT6vGF2Z
|
||||
fJXHJRqJvcptgwdMQYz2mHHHUsKOIskqLqg6TdjjisPHiElop4P/aomjTCDC4GCg
|
||||
sFWqNAECgYEAzOQT86+Rz9NdVfDo/C/IqsMK9fmwMjGXFaBnVBuKMv1VBrh4Zh3g
|
||||
E8QrdKwf/pxR/G91O2dBXmqhUKFX+cEgtEvqhdCyVdR4jQhAKE4hsDd2q4psqbB2
|
||||
BUqaBzo3GawDeKcwCqSPLu7tBKFJCEWjkQZhIVB1lZ8d30i2LSVv2NM=
|
||||
-----END RSA PRIVATE KEY-----
|
||||
|
|
@ -39,6 +39,7 @@ require (
|
|||
github.com/go-openapi/jsonpointer v0.19.5 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.0 // indirect
|
||||
github.com/go-openapi/swag v0.19.14 // indirect
|
||||
github.com/go-redis/redis/v9 v9.0.0-rc.2 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/mock v1.6.0 // indirect
|
||||
|
|
|
|||
|
|
@ -157,6 +157,8 @@ github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5F
|
|||
github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
|
||||
github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI=
|
||||
github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo=
|
||||
github.com/go-redis/redis/v9 v9.0.0-rc.2 h1:IN1eI8AvJJeWHjMW/hlFAv2sAfvTun2DVksDDJ3a6a0=
|
||||
github.com/go-redis/redis/v9 v9.0.0-rc.2/go.mod h1:cgBknjwcBJa2prbnuHH/4k/Mlj4r0pWNV2HBanHujfY=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
|
|
@ -378,7 +380,7 @@ github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLA
|
|||
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
|
||||
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
|
||||
github.com/onsi/ginkgo/v2 v2.4.0 h1:+Ig9nvqgS5OBSACXNk15PLdp0U9XPYROt9CFzVdFGIs=
|
||||
github.com/onsi/gomega v1.23.0 h1:/oxKu9c2HVap+F3PfKort2Hw5DEU+HGlW8n+tguWsys=
|
||||
github.com/onsi/gomega v1.24.1 h1:KORJXNNTzJXzu4ScJWssJfJMnJ+2QJqhoQSRwNlze9E=
|
||||
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/openzipkin/zipkin-go v0.4.1 h1:kNd/ST2yLLWhaWrkgchya40TJabe8Hioj9udfPcEO5A=
|
||||
github.com/openzipkin/zipkin-go v0.4.1/go.mod h1:qY0VqDSN1pOBN94dBc6w2GJlWLiovAyg7Qt6/I9HecM=
|
||||
|
|
|
|||
|
|
@ -0,0 +1,19 @@
|
|||
|
||||
apiVersion: dapr.io/v1alpha1
|
||||
kind: Component
|
||||
metadata:
|
||||
name: messagebus
|
||||
spec:
|
||||
type: pubsub.kafka
|
||||
version: v1
|
||||
metadata:
|
||||
- name: brokers
|
||||
value: localhost:19092,localhost:29092,localhost:39092
|
||||
- name: consumerGroup
|
||||
value: kafkaCertification2
|
||||
- name: authType
|
||||
value: "none"
|
||||
- name: initialOffset
|
||||
value: oldest
|
||||
- name: backOffDuration
|
||||
value: 50ms
|
||||
|
|
@ -1,92 +0,0 @@
|
|||
apiVersion: dapr.io/v1alpha1
|
||||
kind: Component
|
||||
metadata:
|
||||
name: messagebus
|
||||
spec:
|
||||
type: pubsub.kafka
|
||||
version: v1
|
||||
metadata:
|
||||
- name: brokers
|
||||
value: localhost:19094,localhost:29094,localhost:39094
|
||||
- name: consumerGroup
|
||||
value: kafkaCertification2
|
||||
- name: initialOffset
|
||||
value: oldest
|
||||
- name: backOffDuration
|
||||
value: 50ms
|
||||
- name: authType
|
||||
value: mtls
|
||||
- name: caCert
|
||||
value: |
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDJjCCAg6gAwIBAgIUJPqvjfNx6kMf7mE5FtW81+X8HekwDQYJKoZIhvcNAQEL
|
||||
BQAwKzESMBAGA1UECxMJRGFwciBUZXN0MRUwEwYDVQQDEwxEYXByIFRlc3QgQ0Ew
|
||||
HhcNMjExMjA0MTYyNzAwWhcNMjYxMjAzMTYyNzAwWjArMRIwEAYDVQQLEwlEYXBy
|
||||
IFRlc3QxFTATBgNVBAMTDERhcHIgVGVzdCBDQTCCASIwDQYJKoZIhvcNAQEBBQAD
|
||||
ggEPADCCAQoCggEBAMPLpsfCUdYf+7RAY7mktcj4/qJJyNroHxS8ChwSeJ0M/dLk
|
||||
I6G4kyty3TGvzmrdxkr2DW2B+ZmrZFzSVQg+kNESMhEWLJt4MtyGMNuDZcwV5kJL
|
||||
NPltLYmov2z8hyD2v6agZNyiWM0k2p/dl+Ikp4DJmd08PSd+nhc5Wj9X33gsEAoK
|
||||
jKptl+XGGvSlC3tIbHmBhRsP42QlLjqk5PWxINbMDePHOiYFmau3VRrbPweKTFuF
|
||||
bY0Y0w8t1qOFX55hU7LkMEXjLmuUfFUEZvn3NUTvH80gKDioiJTC7NBRE6sCYAlm
|
||||
b4Vvix3p9Y/yNKbMA5J3chaZdTZfVqAXplZY3jMCAwEAAaNCMEAwDgYDVR0PAQH/
|
||||
BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFH6X+clU0D49SZ9ezWRg
|
||||
lsF83glvMA0GCSqGSIb3DQEBCwUAA4IBAQAjeaFNxIhWZkDYiwsVP5R2JqFifZbq
|
||||
A/m9YJypRwA+rUeBLFGuIh4QPFf2fZlskJYmFaDB3aplQGoSIzB1HCC0OAhJM5Ec
|
||||
z6gm+bhqDfCaWz1HfmpvvQes1l/mUzYx5GfiX202W87CMKMQ+5WSg1IsCPFwYN2w
|
||||
nZkGKYkh9D9TzIFMfi2b1G+O+BuUUyOAXvT8zcJ17GexRHHdc1Gq+1PgDPDL1Sug
|
||||
rLHmo+dDTZhIV5D14wvxsNHTTr5tt0aaQw3fJqo6P2HE2dBiqadSYnlwS7BQ9Jxc
|
||||
MlmFggFubM9/QGQ/hGQYmTp+LSlM5ndaVA80o7+SOQZ2aliuH0fQN3ST
|
||||
-----END CERTIFICATE-----
|
||||
- name: clientCert
|
||||
value: |
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDpTCCAo2gAwIBAgIUTAjabskCLxIqbh2E4MnYIsivipswDQYJKoZIhvcNAQEL
|
||||
BQAwKzESMBAGA1UECxMJRGFwciBUZXN0MRUwEwYDVQQDEwxEYXByIFRlc3QgQ0Ew
|
||||
HhcNMjExMjA0MTkwMjAwWhcNMjIxMjA0MTkwMjAwWjAjMRIwEAYDVQQKEwlEYXBy
|
||||
IFRlc3QxDTALBgNVBAMTBGRhcHIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
|
||||
AoIBAQC5rlhpdzY2RuRRRKevotZnLUx/dh2wLvCMluSxKFJYvC7DXK3cHZh1+6Wo
|
||||
cdlsEYY3ZQ7Pt/N8DkV7ODqSvFyhJu+1fCY3elMfZcxSw24UJ2aXzlx5RbNhLAI0
|
||||
E804ugAp3qss4ygCwQ4U2jMGXqeVpi7gyGsYybEUOMSorx5OBgiJAKkaATNMBqdp
|
||||
MX2FKzBU3owpAcuXhIGSdKblYQuZJmAfITnaJFO4ffLyn9m4I9n/dDfZag/TCZBL
|
||||
27uIo79mZO99YfhMfdrifH3FkvE/14/JUPhwHAChoCbDol0/V/KDv0tp3vQbQH+7
|
||||
1dyrAWhszswSXQGgADYm8y74dlQpAgMBAAGjgcgwgcUwDgYDVR0PAQH/BAQDAgWg
|
||||
MB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAMBgNVHRMBAf8EAjAAMB0G
|
||||
A1UdDgQWBBQ4eToXZz4AH4YbuW23vy99T8d8OTAfBgNVHSMEGDAWgBR+l/nJVNA+
|
||||
PUmfXs1kYJbBfN4JbzBGBgNVHREEPzA9ggRkYXBygglsb2NhbGhvc3SCB2thZmth
|
||||
LTGCB2thZmthLTKCB2thZmthLTOCD2thZmFrLWJvb3RzdHJhcDANBgkqhkiG9w0B
|
||||
AQsFAAOCAQEAAapIJIdQhGF2qz/N4i/nIwJHGxUapgtVrydC8kw7DeuQi2usG62Y
|
||||
hGNnBAoJCR0auSQ2P3SWEO19o1doZjFroqFkNIXdTT+aHxLg0k89H203oeMSI43x
|
||||
xTlmJCjBNw4zQD9jC1c6u/W6WBwN2SJGBZrdmA95KQrz+gan9nh6ecPYeGF89io2
|
||||
G20dRE2cGwbt7LAImK87M8LXbw/Of28gYMh3L14CNy6oma3izMix9xhUhDVACnVy
|
||||
TaltjNIiAlFP2g4GIsPSYTMAOeIzIU/LxKlxg8mLg1bTPwb5IZK1wFwPBY5rnNqx
|
||||
OrycW7rZKfrg2eZml8FnYlzO64u41oC47A==
|
||||
-----END CERTIFICATE-----
|
||||
- name: clientKey
|
||||
value: |
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEogIBAAKCAQEAua5YaXc2NkbkUUSnr6LWZy1Mf3YdsC7wjJbksShSWLwuw1yt
|
||||
3B2YdfulqHHZbBGGN2UOz7fzfA5Fezg6krxcoSbvtXwmN3pTH2XMUsNuFCdml85c
|
||||
eUWzYSwCNBPNOLoAKd6rLOMoAsEOFNozBl6nlaYu4MhrGMmxFDjEqK8eTgYIiQCp
|
||||
GgEzTAanaTF9hSswVN6MKQHLl4SBknSm5WELmSZgHyE52iRTuH3y8p/ZuCPZ/3Q3
|
||||
2WoP0wmQS9u7iKO/ZmTvfWH4TH3a4nx9xZLxP9ePyVD4cBwAoaAmw6JdP1fyg79L
|
||||
ad70G0B/u9XcqwFobM7MEl0BoAA2JvMu+HZUKQIDAQABAoIBACZz2JNewLdUzwuV
|
||||
cDSLQGN1mhX7XAKUdfRne0zE0OjXb8e9dbPT3TLxvki36xLaPjVSlFKoAaB7RCBU
|
||||
cKzanUQyUAoBf9iVWIl0B3BMUIuT7Uca0UO8D33cI0itoR5SRp5lIoXVNP/9AvGG
|
||||
jnKPP51aIPMkDim/+w/5AaD9QwVdGC2BWNn8bFykz/DfIB0tiVTec8/pWaP7vHGM
|
||||
FriQbL07Yrj3BE0ndp5cL52ZbH9OmQ/hXUHCk6vCuV/yrqljeLPGbEYkpmhm/fMO
|
||||
Fa3pX6wR+QgZ5lta870jK52bexyoGWgsMcTTl8+7q4DYM2YREEKicAlbOh92bdm4
|
||||
tnjIiVECgYEA1btWqCtxWat5tzXeYAowYs/uia/ANbmg+SGqIeVqGn4EyLIBYnmZ
|
||||
jexfWliLj7Nk802fbNIO9sStMt6q7vvRbYR2ZHFPU0Th9m/XVPdJKJ9qpMkSWdY3
|
||||
P7VlQuYHSZvU1ny/QtDc8dGoaxluiaJsIBde0UUcwOo/tA66OnP2n7cCgYEA3mbf
|
||||
hz6W+ThofDPyJN5kFTnx4g+uNA8hnqyJeh9xcnh1t/A5BH4faZBPhokoskahUWis
|
||||
yI4v6e552CHkF9jo6k397xUb/W/HO0BlKhapf8prdrG4zSE5pr140eTdr10h95SD
|
||||
Wr4twfEaBNsSXRnaMxAMaVbPKfLuW0+N1Qbk6x8CgYA8EZnKS+Ngk0vzDOXB0jtF
|
||||
GjFtawK3VsOCIU8ClcqbRX2stjKjbY+VjrBB4Q7gRUgDBXbgC61+90nCOUiLQCTd
|
||||
BdSMaDgmK/7h1w8K5zEdhKhhRc2tiAIhGqcqBSJZMr2/xnGuoqrmH8mYyB4D+q0u
|
||||
28KfSDBLm8ppnZYDZaITwwKBgDv76xYDH50gRa4aJJklEkFXW5HpQMbxvdOaHYo+
|
||||
qM6DBt0RgY9gpQBH1+slW0CaJDBc1x1QnEOv+lT87xQvgMKRPogZXW9Bkq68c4yi
|
||||
iBzbb5iX3owVBgOe3tNdsxz1NZAdEkCLQrQoXygoHg/WRS+4iGBw9XcO+pLOJibq
|
||||
sRtpAoGARUL0cfedOtIgGOQTNzfHqQZsRbLEKx64FI6Q8g1womr7lWWXy6RX4BZv
|
||||
vm41g/PkdiES9ZfaNihRHcEhaNuA26OhiCbXe/FRcyZRX9TeCkuyQgNn9nssPIgR
|
||||
edWdnN8kZKQ7ReZwMlw2UpXenAwlVoQQbHw9zpkcD2Exmp/TLAk=
|
||||
-----END RSA PRIVATE KEY-----
|
||||
|
|
@ -1,47 +0,0 @@
|
|||
apiVersion: dapr.io/v1alpha1
|
||||
kind: Component
|
||||
metadata:
|
||||
name: messagebus
|
||||
spec:
|
||||
type: pubsub.kafka
|
||||
version: v1
|
||||
metadata:
|
||||
- name: brokers
|
||||
value: localhost:19093,localhost:29093,localhost:39093
|
||||
- name: consumerGroup
|
||||
value: kafkaCertification2
|
||||
- name: authType
|
||||
value: "oidc"
|
||||
- name: initialOffset
|
||||
value: oldest
|
||||
- name: backOffDuration
|
||||
value: 50ms
|
||||
- name: oidcTokenEndpoint
|
||||
value: https://localhost:4443/oauth2/token
|
||||
- name: oidcClientID
|
||||
value: "dapr"
|
||||
- name: oidcClientSecret
|
||||
value: "dapr-test"
|
||||
- name: oidcScopes
|
||||
value: openid,kafka
|
||||
- name: caCert
|
||||
value: |
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDJjCCAg6gAwIBAgIUJPqvjfNx6kMf7mE5FtW81+X8HekwDQYJKoZIhvcNAQEL
|
||||
BQAwKzESMBAGA1UECxMJRGFwciBUZXN0MRUwEwYDVQQDEwxEYXByIFRlc3QgQ0Ew
|
||||
HhcNMjExMjA0MTYyNzAwWhcNMjYxMjAzMTYyNzAwWjArMRIwEAYDVQQLEwlEYXBy
|
||||
IFRlc3QxFTATBgNVBAMTDERhcHIgVGVzdCBDQTCCASIwDQYJKoZIhvcNAQEBBQAD
|
||||
ggEPADCCAQoCggEBAMPLpsfCUdYf+7RAY7mktcj4/qJJyNroHxS8ChwSeJ0M/dLk
|
||||
I6G4kyty3TGvzmrdxkr2DW2B+ZmrZFzSVQg+kNESMhEWLJt4MtyGMNuDZcwV5kJL
|
||||
NPltLYmov2z8hyD2v6agZNyiWM0k2p/dl+Ikp4DJmd08PSd+nhc5Wj9X33gsEAoK
|
||||
jKptl+XGGvSlC3tIbHmBhRsP42QlLjqk5PWxINbMDePHOiYFmau3VRrbPweKTFuF
|
||||
bY0Y0w8t1qOFX55hU7LkMEXjLmuUfFUEZvn3NUTvH80gKDioiJTC7NBRE6sCYAlm
|
||||
b4Vvix3p9Y/yNKbMA5J3chaZdTZfVqAXplZY3jMCAwEAAaNCMEAwDgYDVR0PAQH/
|
||||
BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFH6X+clU0D49SZ9ezWRg
|
||||
lsF83glvMA0GCSqGSIb3DQEBCwUAA4IBAQAjeaFNxIhWZkDYiwsVP5R2JqFifZbq
|
||||
A/m9YJypRwA+rUeBLFGuIh4QPFf2fZlskJYmFaDB3aplQGoSIzB1HCC0OAhJM5Ec
|
||||
z6gm+bhqDfCaWz1HfmpvvQes1l/mUzYx5GfiX202W87CMKMQ+5WSg1IsCPFwYN2w
|
||||
nZkGKYkh9D9TzIFMfi2b1G+O+BuUUyOAXvT8zcJ17GexRHHdc1Gq+1PgDPDL1Sug
|
||||
rLHmo+dDTZhIV5D14wvxsNHTTr5tt0aaQw3fJqo6P2HE2dBiqadSYnlwS7BQ9Jxc
|
||||
MlmFggFubM9/QGQ/hGQYmTp+LSlM5ndaVA80o7+SOQZ2aliuH0fQN3ST
|
||||
-----END CERTIFICATE-----
|
||||
|
|
@ -1,191 +1,68 @@
|
|||
version: "3.7"
|
||||
services:
|
||||
zookeeper:
|
||||
image: confluentinc/cp-zookeeper:5.4.0
|
||||
hostname: zookeeper
|
||||
container_name: zookeeper
|
||||
ports:
|
||||
- "2181:2181"
|
||||
environment:
|
||||
ZOOKEEPER_CLIENT_PORT: 2181
|
||||
ZOOKEEPER_TICK_TIME: 2000
|
||||
kafka1:
|
||||
image: quay.io/strimzi/kafka:0.26.0-kafka-3.0.0
|
||||
hostname: kafka-1
|
||||
container_name: kafka-1
|
||||
read_only: false
|
||||
entrypoint:
|
||||
/bin/bash -c "mkdir -p /var/opt/kafka && chown -R kafka:0 /var/lib/kafka/data /var/opt/kafka && su kafka -p -c '/opt/kafka/kafka_run.sh'"
|
||||
user: root
|
||||
depends_on:
|
||||
- zookeeper
|
||||
ports:
|
||||
- "19094:19094"
|
||||
- "19093:19093"
|
||||
- "19092:19092"
|
||||
volumes:
|
||||
- type: bind
|
||||
source: ./strimzi-ca-certs
|
||||
target: /opt/kafka/cluster-ca-certs
|
||||
read_only: true
|
||||
- type: bind
|
||||
source: ./strimzi-broker-certs
|
||||
target: /opt/kafka/broker-certs
|
||||
read_only: true
|
||||
- type: bind
|
||||
source: ./strimzi-client-ca
|
||||
target: /opt/kafka/client-ca-certs
|
||||
read_only: true
|
||||
- type: bind
|
||||
source: ./strimzi-listener-certs
|
||||
target: /opt/kafka/certificates/custom-mtls-9094-certs
|
||||
read_only: true
|
||||
- type: bind
|
||||
source: ./strimzi-listener-certs
|
||||
target: /opt/kafka/certificates/custom-oauth-9093-certs
|
||||
read_only: true
|
||||
- type: bind
|
||||
source: ./strimzi-listener-certs
|
||||
target: /opt/kafka/certificates/oauth-oauth-9093-certs
|
||||
read_only: true
|
||||
- type: bind
|
||||
source: ./strimzi-kafka1-config
|
||||
target: /opt/kafka/custom-config
|
||||
read_only: true
|
||||
- type: volume
|
||||
source: kafka1-data
|
||||
target: /var/lib/kafka/data
|
||||
environment:
|
||||
KAFKA_METRICS_ENABLED: "false"
|
||||
STRIMZI_KAFKA_GC_LOG_ENABLED: "false"
|
||||
KAFKA_HEAP_OPTS: "-Xms128M"
|
||||
|
||||
kafka2:
|
||||
image: quay.io/strimzi/kafka:0.26.0-kafka-3.0.0
|
||||
hostname: kafka-2
|
||||
container_name: kafka-2
|
||||
read_only: false
|
||||
entrypoint:
|
||||
/bin/bash -c "mkdir -p /var/opt/kafka && chown -R kafka:0 /var/lib/kafka/data /var/opt/kafka && su kafka -p -c '/opt/kafka/kafka_run.sh'"
|
||||
user: root
|
||||
depends_on:
|
||||
- zookeeper
|
||||
ports:
|
||||
- "29094:29094"
|
||||
- "29093:29093"
|
||||
- "29092:29092"
|
||||
volumes:
|
||||
- type: bind
|
||||
source: ./strimzi-ca-certs
|
||||
target: /opt/kafka/cluster-ca-certs
|
||||
read_only: true
|
||||
- type: bind
|
||||
source: ./strimzi-broker-certs
|
||||
target: /opt/kafka/broker-certs
|
||||
read_only: true
|
||||
- type: bind
|
||||
source: ./strimzi-client-ca
|
||||
target: /opt/kafka/client-ca-certs
|
||||
read_only: true
|
||||
- type: bind
|
||||
source: ./strimzi-listener-certs
|
||||
target: /opt/kafka/certificates/custom-mtls-9094-certs
|
||||
read_only: true
|
||||
- type: bind
|
||||
source: ./strimzi-listener-certs
|
||||
target: /opt/kafka/certificates/custom-oauth-9093-certs
|
||||
read_only: true
|
||||
- type: bind
|
||||
source: ./strimzi-listener-certs
|
||||
target: /opt/kafka/certificates/oauth-oauth-9093-certs
|
||||
read_only: true
|
||||
- type: bind
|
||||
source: ./strimzi-kafka2-config
|
||||
target: /opt/kafka/custom-config
|
||||
read_only: true
|
||||
- type: volume
|
||||
source: kafka2-data
|
||||
target: /var/lib/kafka/data
|
||||
environment:
|
||||
KAFKA_METRICS_ENABLED: "false"
|
||||
STRIMZI_KAFKA_GC_LOG_ENABLED: "false"
|
||||
KAFKA_HEAP_OPTS: "-Xms128M"
|
||||
|
||||
kafka3:
|
||||
image: quay.io/strimzi/kafka:0.26.0-kafka-3.0.0
|
||||
hostname: kafka-3
|
||||
container_name: kafka-3
|
||||
read_only: false
|
||||
entrypoint:
|
||||
/bin/bash -c "mkdir -p /var/opt/kafka && chown -R kafka:0 /var/lib/kafka/data /var/opt/kafka && su kafka -p -c '/opt/kafka/kafka_run.sh'"
|
||||
user: root
|
||||
depends_on:
|
||||
- zookeeper
|
||||
ports:
|
||||
- "39094:39094"
|
||||
- "39093:39093"
|
||||
- "39092:39092"
|
||||
volumes:
|
||||
- type: bind
|
||||
source: ./strimzi-ca-certs
|
||||
target: /opt/kafka/cluster-ca-certs
|
||||
read_only: true
|
||||
- type: bind
|
||||
source: ./strimzi-broker-certs
|
||||
target: /opt/kafka/broker-certs
|
||||
read_only: true
|
||||
- type: bind
|
||||
source: ./strimzi-client-ca
|
||||
target: /opt/kafka/client-ca-certs
|
||||
read_only: true
|
||||
- type: bind
|
||||
source: ./strimzi-listener-certs
|
||||
target: /opt/kafka/certificates/custom-mtls-9094-certs
|
||||
read_only: true
|
||||
- type: bind
|
||||
source: ./strimzi-listener-certs
|
||||
target: /opt/kafka/certificates/custom-oauth-9093-certs
|
||||
read_only: true
|
||||
- type: bind
|
||||
source: ./strimzi-listener-certs
|
||||
target: /opt/kafka/certificates/oauth-oauth-9093-certs
|
||||
read_only: true
|
||||
- type: bind
|
||||
source: ./strimzi-kafka3-config
|
||||
target: /opt/kafka/custom-config
|
||||
read_only: true
|
||||
- type: volume
|
||||
source: kafka3-data
|
||||
target: /var/lib/kafka/data
|
||||
environment:
|
||||
KAFKA_METRICS_ENABLED: "false"
|
||||
STRIMZI_KAFKA_GC_LOG_ENABLED: "false"
|
||||
KAFKA_HEAP_OPTS: "-Xms128M"
|
||||
hydra:
|
||||
image: oryd/hydra:v1.10.6-sqlite
|
||||
hostname: hydra
|
||||
container_name: hydra
|
||||
ports:
|
||||
- "4443:4443"
|
||||
- "4444:4444"
|
||||
read_only: false
|
||||
entrypoint: hydra serve all -c /config/config.yaml --sqa-opt-out
|
||||
volumes:
|
||||
- type: bind
|
||||
source: ./oauth-config
|
||||
target: /config
|
||||
read_only: true
|
||||
hydra-config:
|
||||
image: oryd/hydra:v1.10.6-sqlite
|
||||
hostname: hydra-config
|
||||
container_name: hydra-config
|
||||
depends_on:
|
||||
- hydra
|
||||
entrypoint: |
|
||||
/bin/sh -c "sleep 20;hydra clients create --skip-tls-verify -g client_credentials --id dapr -n dapr -r token -a openid,kafka --secret dapr-test; hydra clients create --skip-tls-verify -g client_credentials --id kafka -n kafka -r token -a openid --secret dapr-test"
|
||||
environment:
|
||||
HYDRA_ADMIN_URL: https://hydra:4444
|
||||
volumes:
|
||||
kafka1-data: {}
|
||||
kafka2-data: {}
|
||||
kafka3-data: {}
|
||||
version: "3.7"
|
||||
services:
|
||||
zookeeper:
|
||||
image: confluentinc/cp-zookeeper:7.3.0
|
||||
hostname: zookeeper
|
||||
container_name: zookeeper
|
||||
ports:
|
||||
- "2181:2181"
|
||||
environment:
|
||||
ZOOKEEPER_CLIENT_PORT: 2181
|
||||
ZOOKEEPER_TICK_TIME: 2000
|
||||
|
||||
kafka1:
|
||||
image: confluentinc/cp-server:7.3.0
|
||||
hostname: kafka1
|
||||
container_name: kafka1
|
||||
depends_on:
|
||||
- zookeeper
|
||||
ports:
|
||||
- "19092:19092"
|
||||
environment:
|
||||
KAFKA_BROKER_ID: 1
|
||||
KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181'
|
||||
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
|
||||
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka1:9092,PLAINTEXT_HOST://localhost:19092
|
||||
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 3
|
||||
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
|
||||
KAFKA_CONFLUENT_LICENSE_TOPIC_REPLICATION_FACTOR: 1
|
||||
KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'true'
|
||||
KAFKA_NUM_PARTITIONS: 10
|
||||
|
||||
kafka2:
|
||||
image: confluentinc/cp-server:7.3.0
|
||||
hostname: kafka2
|
||||
container_name: kafka2
|
||||
depends_on:
|
||||
- zookeeper
|
||||
ports:
|
||||
- "29092:29092"
|
||||
environment:
|
||||
KAFKA_BROKER_ID: 2
|
||||
KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181'
|
||||
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
|
||||
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka2:9092,PLAINTEXT_HOST://localhost:29092
|
||||
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 3
|
||||
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
|
||||
KAFKA_CONFLUENT_LICENSE_TOPIC_REPLICATION_FACTOR: 1
|
||||
KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'true'
|
||||
KAFKA_NUM_PARTITIONS: 10
|
||||
|
||||
kafka3:
|
||||
image: confluentinc/cp-server:7.3.0
|
||||
hostname: kafka3
|
||||
container_name: kafka3
|
||||
depends_on:
|
||||
- zookeeper
|
||||
ports:
|
||||
- "39092:39092"
|
||||
environment:
|
||||
KAFKA_BROKER_ID: 3
|
||||
KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181'
|
||||
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
|
||||
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka3:9092,PLAINTEXT_HOST://localhost:39092
|
||||
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 3
|
||||
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
|
||||
KAFKA_CONFLUENT_LICENSE_TOPIC_REPLICATION_FACTOR: 1
|
||||
KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'true'
|
||||
KAFKA_NUM_PARTITIONS: 10
|
||||
|
|
@ -15,9 +15,7 @@ package kafka_test
|
|||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
|
|
@ -69,10 +67,7 @@ const (
|
|||
topicName = "neworder"
|
||||
)
|
||||
|
||||
var (
|
||||
brokers = []string{"localhost:19092", "localhost:29092", "localhost:39092"}
|
||||
oauthClientQuery = "https://localhost:4444/clients/dapr"
|
||||
)
|
||||
var brokers = []string{"localhost:19092", "localhost:29092", "localhost:39092"}
|
||||
|
||||
func TestKafka(t *testing.T) {
|
||||
// For Kafka, we should ensure messages are received in order.
|
||||
|
|
@ -242,24 +237,6 @@ func TestKafka(t *testing.T) {
|
|||
|
||||
return err
|
||||
})).
|
||||
Step("wait for Dapr OAuth client", retry.Do(20*time.Second, 6, func(ctx flow.Context) error {
|
||||
httpClient := &http.Client{
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: &tls.Config{
|
||||
InsecureSkipVerify: true, // test server certificate is not trusted.
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
resp, err := httpClient.Get(oauthClientQuery)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resp.StatusCode != 200 {
|
||||
return fmt.Errorf("oauth client query for 'dapr' not successful")
|
||||
}
|
||||
return nil
|
||||
})).
|
||||
//
|
||||
// Run the application logic above.
|
||||
Step(app.Run(appID1, fmt.Sprintf(":%d", appPort),
|
||||
|
|
@ -280,7 +257,7 @@ func TestKafka(t *testing.T) {
|
|||
//
|
||||
// Run the Dapr sidecar with the Kafka component.
|
||||
Step(sidecar.Run(sidecarName2,
|
||||
embedded.WithComponentsPath("./components/mtls-consumer"),
|
||||
embedded.WithComponentsPath("./components/consumer2"),
|
||||
embedded.WithAppProtocol(runtime.HTTPProtocol, appPort+portOffset),
|
||||
embedded.WithDaprGRPCPort(runtime.DefaultDaprAPIGRPCPort+portOffset),
|
||||
embedded.WithDaprHTTPPort(runtime.DefaultDaprHTTPPort+portOffset),
|
||||
|
|
@ -298,7 +275,7 @@ func TestKafka(t *testing.T) {
|
|||
//
|
||||
// Run the Dapr sidecar with the Kafka component.
|
||||
Step(sidecar.Run(sidecarName3,
|
||||
embedded.WithComponentsPath("./components/oauth-consumer"),
|
||||
embedded.WithComponentsPath("./components/consumer2"),
|
||||
embedded.WithAppProtocol(runtime.HTTPProtocol, appPort+portOffset*2),
|
||||
embedded.WithDaprGRPCPort(runtime.DefaultDaprAPIGRPCPort+portOffset*2),
|
||||
embedded.WithDaprHTTPPort(runtime.DefaultDaprHTTPPort+portOffset*2),
|
||||
|
|
|
|||
|
|
@ -1,22 +0,0 @@
|
|||
serve:
|
||||
admin:
|
||||
host: 0.0.0.0
|
||||
port: 4444
|
||||
public:
|
||||
host: 0.0.0.0
|
||||
port: 4443
|
||||
tls:
|
||||
cert:
|
||||
path: /config/tls/hydra.crt
|
||||
key:
|
||||
path: /config/tls/hydra.key
|
||||
dsn: memory
|
||||
log:
|
||||
leak_sensitive_values: true
|
||||
level: debug
|
||||
urls:
|
||||
self:
|
||||
issuer: https://hydra:4443
|
||||
strategies:
|
||||
access_token: opaque
|
||||
|
||||
|
|
@ -1,21 +0,0 @@
|
|||
-----BEGIN CERTIFICATE-----
|
||||
MIIDejCCAmKgAwIBAgIUIgMF15XiDisW+e4I+clKWYvxcfMwDQYJKoZIhvcNAQEL
|
||||
BQAwKzESMBAGA1UECxMJRGFwciBUZXN0MRUwEwYDVQQDEwxEYXByIFRlc3QgQ0Ew
|
||||
HhcNMjExMjEwMDA1ODAwWhcNMjIxMjEwMDA1ODAwWjAjMRIwEAYDVQQKEwlEYXBy
|
||||
IFRlc3QxDTALBgNVBAMTBGRhcHIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
|
||||
AoIBAQDICbhBmpxFPFtoRTjdiki2ouZQbUoHE4llIQnJz3ta/+gWi/czrOmC3aHz
|
||||
x9pJ1kifBG5MlbdnH8WCQXx/vPXP5hpTmTDjAp87Fygk2KWdb/bQBrpRTIEgAuK3
|
||||
IWJ9tYhcDDxSwEF52xNnRkklxZpVRZX1SmcdndEqioaAnxWEM1x+JJcjrk6Ud4dv
|
||||
aX0G1xw8g6u0KT1I61Aja2OAAj+iPih6RK6xSRdxvELXbehClBHOpJP6sRw03Xw4
|
||||
HRJEesWqrGAFEp0qSZulKwn2MHAW80VVF/U9hogUQrBVFTKw/5oS9eu+BV2AY3Rh
|
||||
8DACB0blpEkjIachjjo2A8wuhBeNAgMBAAGjgZ0wgZowDgYDVR0PAQH/BAQDAgWg
|
||||
MB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAMBgNVHRMBAf8EAjAAMB0G
|
||||
A1UdDgQWBBRVxfGJ7a+7DBz2PM2w/U5aeJFOfjAfBgNVHSMEGDAWgBR+l/nJVNA+
|
||||
PUmfXs1kYJbBfN4JbzAbBgNVHREEFDASggVoeWRyYYIJbG9jYWxob3N0MA0GCSqG
|
||||
SIb3DQEBCwUAA4IBAQA+0zkBNBZ8okLiEl9B4nbfBvQXdkYOl9H9TdDYlWLNKb1S
|
||||
8Y4SNQ4hrfKspYVIBVvWfuwnphdLeexs4ovU6OkXeVPFPSsjihX9I+sJ3bFCLvkj
|
||||
lVXY/pJy/Z6QQlPg71LkCiH0Hv2RIvGZ1UtTu12d8BiF3oO8Nnzq4kiyfpPJ5QDR
|
||||
GsTKmXxEzgCcR+DI4g05hI2BQuq8Xjw4jZzt0IOcWhR2ZxBwfzLQp/hAQK69iPCN
|
||||
3DfD/eMr1EF8kAWec4eo3CFwHvrPpEdIMeNE7q9fuyfVPJGQZFKNHl7rF4YqYn/F
|
||||
4XGJxRCjd860JkJDLrmXazED6cLE1IvYPCLUsfK8
|
||||
-----END CERTIFICATE-----
|
||||
|
|
@ -1,27 +0,0 @@
|
|||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEowIBAAKCAQEAyAm4QZqcRTxbaEU43YpItqLmUG1KBxOJZSEJyc97Wv/oFov3
|
||||
M6zpgt2h88faSdZInwRuTJW3Zx/FgkF8f7z1z+YaU5kw4wKfOxcoJNilnW/20Aa6
|
||||
UUyBIALityFifbWIXAw8UsBBedsTZ0ZJJcWaVUWV9UpnHZ3RKoqGgJ8VhDNcfiSX
|
||||
I65OlHeHb2l9BtccPIOrtCk9SOtQI2tjgAI/oj4oekSusUkXcbxC123oQpQRzqST
|
||||
+rEcNN18OB0SRHrFqqxgBRKdKkmbpSsJ9jBwFvNFVRf1PYaIFEKwVRUysP+aEvXr
|
||||
vgVdgGN0YfAwAgdG5aRJIyGnIY46NgPMLoQXjQIDAQABAoIBAQDEErLmqxOt0aGP
|
||||
LPq2PEtVqYqzHszG7uFnnOCpTZQN+HSXVQ4zOrOQMIoEF8rhQQbhx0gODVo93KiO
|
||||
Kn5L/v26kEMR2kBO400McIBKzYhYL1zvPwj1k1Wl+O4crr6JlZxZDS07t3L2bEQy
|
||||
oHQmb+/80T5RtmIoZ36Ugj+gZ06BytKPY2yZRpLnF/p9V77JK2BT2pg1EXahU5LL
|
||||
wGhodg+MqFrKPk0TpdQ7edipHEiqprk/sEH9KA4cPfa83LBv6xRcHYBzlA0mHnZo
|
||||
jgGdptDAFJeJcMLwywF1CvI/x5Y0mAkDN95uFcw8/ozX2pKGuIZYY9BjR444zKm2
|
||||
8V7Br2gBAoGBAN2n2BlBXTjOgZ7c50fGFA+oR23C90r3AHwnh1FOnCzKOUNbW48F
|
||||
tsKvmI0DUK+sg+ZkGIEz1ll81FVzCAZQ8sii3LV5qnW7QVhZszHbKWtI9ulcFDqe
|
||||
ZqKlOahy5GmcGfxbniufrHaBlP+Y1gwJd8NXjoFKNxLLtQ8S25e4QwKNAoGBAOcI
|
||||
ZH+eaZ3653fFPzuJtsbbfqB5HW6bTLIUqnwNRGghvMP0JTLzYYVlcaLMrI2L50Qf
|
||||
Z5IEl7+uVeTmRehkoe5J3r5tIifKrVGnQM7inpTfkCOlY2tsAL8/XvQ/6ikBEt2J
|
||||
r166mOk3RfjuuXuBFrPwfpZ5fMggFa92e5ukWqkBAoGAQ12VsedJu9AXWP7uU8QB
|
||||
qNiODO/qVKBJR3KED9QCZyJ20N/dLdSgvP69MG5HgXy/AbB+OhZVGRF1Pxsc3z6O
|
||||
6yeESKtXgTyOGZn5ejePmQmt8TKI+1/U9a2dnnJ8tRQ6WZZGth9rPQEZFa2PsEzY
|
||||
V0gvCWBS6KV8u74Re0UHKKkCgYB9j8Ae49d+9rgKDfd5wjTGCtDdIjXuwRSDzFuD
|
||||
pCpDdeKDlRMKh9++gg2qbxZwr1J3YaIGZ9yZXoRsLQJddSPUv+0BDYr8mVhtAjtk
|
||||
tSF+w6ow1VgdL8uQJT7T/FClDGJWaNgY4cztIw8yZXwFNXlDPjduTISWt2lRvVEc
|
||||
m8xyAQKBgF+aAk2qJ8/MM4aXoWgjkWiDGvgfVmWsYMpalz34PDP+hzPg3LxaGKsn
|
||||
jm+LQs9Z/WX26hxZK0HWQbcCsJ81mBvgeXnUrY/T50Zvd7zUFF+1WG7Is9KUlLA1
|
||||
ceQzJcixurQtuUSkwj2PfVziiufkHk43tuzDQ57carUX6kg2OwAD
|
||||
-----END RSA PRIVATE KEY-----
|
||||
|
|
@ -1,22 +0,0 @@
|
|||
-----BEGIN CERTIFICATE-----
|
||||
MIIDpTCCAo2gAwIBAgIUTAjabskCLxIqbh2E4MnYIsivipswDQYJKoZIhvcNAQEL
|
||||
BQAwKzESMBAGA1UECxMJRGFwciBUZXN0MRUwEwYDVQQDEwxEYXByIFRlc3QgQ0Ew
|
||||
HhcNMjExMjA0MTkwMjAwWhcNMjIxMjA0MTkwMjAwWjAjMRIwEAYDVQQKEwlEYXBy
|
||||
IFRlc3QxDTALBgNVBAMTBGRhcHIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
|
||||
AoIBAQC5rlhpdzY2RuRRRKevotZnLUx/dh2wLvCMluSxKFJYvC7DXK3cHZh1+6Wo
|
||||
cdlsEYY3ZQ7Pt/N8DkV7ODqSvFyhJu+1fCY3elMfZcxSw24UJ2aXzlx5RbNhLAI0
|
||||
E804ugAp3qss4ygCwQ4U2jMGXqeVpi7gyGsYybEUOMSorx5OBgiJAKkaATNMBqdp
|
||||
MX2FKzBU3owpAcuXhIGSdKblYQuZJmAfITnaJFO4ffLyn9m4I9n/dDfZag/TCZBL
|
||||
27uIo79mZO99YfhMfdrifH3FkvE/14/JUPhwHAChoCbDol0/V/KDv0tp3vQbQH+7
|
||||
1dyrAWhszswSXQGgADYm8y74dlQpAgMBAAGjgcgwgcUwDgYDVR0PAQH/BAQDAgWg
|
||||
MB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAMBgNVHRMBAf8EAjAAMB0G
|
||||
A1UdDgQWBBQ4eToXZz4AH4YbuW23vy99T8d8OTAfBgNVHSMEGDAWgBR+l/nJVNA+
|
||||
PUmfXs1kYJbBfN4JbzBGBgNVHREEPzA9ggRkYXBygglsb2NhbGhvc3SCB2thZmth
|
||||
LTGCB2thZmthLTKCB2thZmthLTOCD2thZmFrLWJvb3RzdHJhcDANBgkqhkiG9w0B
|
||||
AQsFAAOCAQEAAapIJIdQhGF2qz/N4i/nIwJHGxUapgtVrydC8kw7DeuQi2usG62Y
|
||||
hGNnBAoJCR0auSQ2P3SWEO19o1doZjFroqFkNIXdTT+aHxLg0k89H203oeMSI43x
|
||||
xTlmJCjBNw4zQD9jC1c6u/W6WBwN2SJGBZrdmA95KQrz+gan9nh6ecPYeGF89io2
|
||||
G20dRE2cGwbt7LAImK87M8LXbw/Of28gYMh3L14CNy6oma3izMix9xhUhDVACnVy
|
||||
TaltjNIiAlFP2g4GIsPSYTMAOeIzIU/LxKlxg8mLg1bTPwb5IZK1wFwPBY5rnNqx
|
||||
OrycW7rZKfrg2eZml8FnYlzO64u41oC47A==
|
||||
-----END CERTIFICATE-----
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue