Merge branch 'master' into optimize-bulkpubres-struct

This commit is contained in:
Mukundan Sundararajan 2022-12-17 01:51:53 +05:30 committed by GitHub
commit f69306a123
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
126 changed files with 1915 additions and 3299 deletions

View File

@ -21,7 +21,6 @@ import (
"fmt"
"net/http"
"reflect"
"strconv"
"strings"
"time"
@ -36,6 +35,7 @@ import (
contribmeta "github.com/dapr/components-contrib/metadata"
"github.com/dapr/components-contrib/state"
"github.com/dapr/components-contrib/state/query"
stateutils "github.com/dapr/components-contrib/state/utils"
"github.com/dapr/kit/logger"
"github.com/dapr/kit/ptr"
)
@ -77,7 +77,6 @@ type CosmosItem struct {
const (
metadataPartitionKey = "partitionKey"
metadataTTLKey = "ttlInSeconds"
defaultTimeout = 20 * time.Second
statusNotFound = "NotFound"
)
@ -481,7 +480,7 @@ func createUpsertItem(contentType string, req state.SetRequest, partitionKey str
isBinary = false
}
ttl, err := parseTTL(req.Metadata)
ttl, err := stateutils.ParseTTL(req.Metadata)
if err != nil {
return CosmosItem{}, fmt.Errorf("error parsing TTL from metadata: %s", err)
}
@ -534,20 +533,6 @@ func populatePartitionMetadata(key string, requestMetadata map[string]string) st
return key
}
func parseTTL(requestMetadata map[string]string) (*int, error) {
if val, found := requestMetadata[metadataTTLKey]; found && val != "" {
parsedVal, err := strconv.ParseInt(val, 10, 0)
if err != nil {
return nil, err
}
i := int(parsedVal)
return &i, nil
}
return nil, nil
}
func isNotFoundError(err error) bool {
if err == nil {
return false

View File

@ -21,6 +21,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/dapr/components-contrib/state"
stateutils "github.com/dapr/components-contrib/state/utils"
)
type widget struct {
@ -284,7 +285,7 @@ func TestCreateCosmosItemWithTTL(t *testing.T) {
Key: "testKey",
Value: value,
Metadata: map[string]string{
metadataTTLKey: strconv.Itoa(ttl),
stateutils.MetadataTTLKey: strconv.Itoa(ttl),
},
}
@ -316,7 +317,7 @@ func TestCreateCosmosItemWithTTL(t *testing.T) {
Key: "testKey",
Value: value,
Metadata: map[string]string{
metadataTTLKey: strconv.Itoa(ttl),
stateutils.MetadataTTLKey: strconv.Itoa(ttl),
},
}
@ -347,7 +348,7 @@ func TestCreateCosmosItemWithTTL(t *testing.T) {
Key: "testKey",
Value: value,
Metadata: map[string]string{
metadataTTLKey: "notattl",
stateutils.MetadataTTLKey: "notattl",
},
}

View File

@ -24,6 +24,7 @@ import (
"github.com/dapr/components-contrib/metadata"
"github.com/dapr/components-contrib/state"
stateutils "github.com/dapr/components-contrib/state/utils"
"github.com/dapr/kit/logger"
)
@ -279,7 +280,7 @@ func (c *Cassandra) Set(ctx context.Context, req *state.SetRequest) error {
session = sess
}
ttl, err := parseTTL(req.Metadata)
ttl, err := stateutils.ParseTTL(req.Metadata)
if err != nil {
return fmt.Errorf("error parsing TTL from Metadata: %s", err)
}
@ -302,20 +303,6 @@ func (c *Cassandra) createSession(consistency gocql.Consistency) (*gocql.Session
return session, nil
}
func parseTTL(requestMetadata map[string]string) (*int, error) {
if val, found := requestMetadata[metadataTTLKey]; found && val != "" {
parsedVal, err := strconv.ParseInt(val, 10, 0)
if err != nil {
return nil, err
}
parsedInt := int(parsedVal)
return &parsedInt, nil
}
return nil, nil
}
func (c *Cassandra) GetComponentMetadata() map[string]string {
metadataStruct := cassandraMetadata{}
metadataInfo := map[string]string{}

View File

@ -14,7 +14,6 @@ limitations under the License.
package cassandra
import (
"strconv"
"strings"
"testing"
@ -111,35 +110,3 @@ func TestGetCassandraMetadata(t *testing.T) {
assert.NotNil(t, err)
})
}
func TestParseTTL(t *testing.T) {
t.Run("TTL Not an integer", func(t *testing.T) {
ttlInSeconds := "not an integer"
ttl, err := parseTTL(map[string]string{
"ttlInSeconds": ttlInSeconds,
})
assert.Error(t, err)
assert.Nil(t, ttl)
})
t.Run("TTL specified with wrong key", func(t *testing.T) {
ttlInSeconds := 12345
ttl, err := parseTTL(map[string]string{
"expirationTime": strconv.Itoa(ttlInSeconds),
})
assert.NoError(t, err)
assert.Nil(t, ttl)
})
t.Run("TTL is a number", func(t *testing.T) {
ttlInSeconds := 12345
ttl, err := parseTTL(map[string]string{
"ttlInSeconds": strconv.Itoa(ttlInSeconds),
})
assert.NoError(t, err)
assert.Equal(t, *ttl, ttlInSeconds)
})
t.Run("TTL not set", func(t *testing.T) {
ttl, err := parseTTL(map[string]string{})
assert.NoError(t, err)
assert.Nil(t, ttl)
})
}

View File

@ -93,7 +93,7 @@ func TestParseTTL(t *testing.T) {
},
})
assert.NotNil(t, err, "tll is not an integer")
assert.Error(t, err)
assert.Nil(t, ttl)
})
t.Run("TTL is a negative integer ends up translated to 0", func(t *testing.T) {

View File

@ -22,7 +22,6 @@ import (
"os"
"path"
"reflect"
"strconv"
"strings"
"time"
@ -34,6 +33,7 @@ import (
"github.com/dapr/components-contrib/metadata"
"github.com/dapr/components-contrib/state"
stateutils "github.com/dapr/components-contrib/state/utils"
"github.com/dapr/kit/logger"
)
@ -278,17 +278,13 @@ func (r *StateStore) writeDocument(ctx context.Context, req *state.SetRequest) e
}
func (r *StateStore) convertTTLtoExpiryTime(req *state.SetRequest, metadata map[string]string) error {
ttl, ttlerr := parseTTL(req.Metadata)
ttl, ttlerr := stateutils.ParseTTL(req.Metadata)
if ttlerr != nil {
return fmt.Errorf("error in parsing TTL %w", ttlerr)
return fmt.Errorf("error parsing TTL: %w", ttlerr)
}
if ttl != nil {
if *ttl == -1 {
r.logger.Debugf("TTL is set to -1; this means: never expire. ")
} else {
metadata[expiryTimeMetaLabel] = time.Now().UTC().Add(time.Second * time.Duration(*ttl)).Format(isoDateTimeFormat)
r.logger.Debugf("Set %s in meta properties for object to ", expiryTimeMetaLabel, metadata[expiryTimeMetaLabel])
}
metadata[expiryTimeMetaLabel] = time.Now().UTC().Add(time.Second * time.Duration(*ttl)).Format(isoDateTimeFormat)
r.logger.Debugf("Set %s in meta properties for object to ", expiryTimeMetaLabel, metadata[expiryTimeMetaLabel])
}
return nil
}
@ -367,20 +363,6 @@ func getFileName(key string) string {
return path.Join(pr[0], pr[1])
}
func parseTTL(requestMetadata map[string]string) (*int, error) {
if val, found := requestMetadata[metadataTTLKey]; found && val != "" {
parsedVal, err := strconv.ParseInt(val, 10, 0)
if err != nil {
return nil, fmt.Errorf("error in parsing ttl metadata : %w", err)
}
parsedInt := int(parsedVal)
return &parsedInt, nil
}
return nil, nil
}
/**************** functions with OCI ObjectStorage Service interaction. */
func getNamespace(ctx context.Context, client objectstorage.ObjectStorageClient) (string, error) {

View File

@ -17,7 +17,6 @@ import (
"context"
"fmt"
"io"
"strconv"
"testing"
"time"
@ -394,36 +393,3 @@ func TestGetFilename(t *testing.T) {
assert.Equal(t, "app-id-key", filename)
})
}
func TestParseTTL(t *testing.T) {
t.Parallel()
t.Run("TTL Not an integer", func(t *testing.T) {
ttlInSeconds := "not an integer"
ttl, err := parseTTL(map[string]string{
"ttlInSeconds": ttlInSeconds,
})
assert.Error(t, err)
assert.Nil(t, ttl)
})
t.Run("TTL specified with wrong key", func(t *testing.T) {
ttlInSeconds := 12345
ttl, err := parseTTL(map[string]string{
"expirationTime": strconv.Itoa(ttlInSeconds),
})
assert.NoError(t, err)
assert.Nil(t, ttl)
})
t.Run("TTL is a number", func(t *testing.T) {
ttlInSeconds := 12345
ttl, err := parseTTL(map[string]string{
"ttlInSeconds": strconv.Itoa(ttlInSeconds),
})
assert.NoError(t, err)
assert.Equal(t, *ttl, ttlInSeconds)
})
t.Run("TTL not set", func(t *testing.T) {
ttl, err := parseTTL(map[string]string{})
assert.NoError(t, err)
assert.Nil(t, ttl)
})
}

View File

@ -21,7 +21,6 @@ import (
"fmt"
"net/url"
"os"
"strconv"
"testing"
"time"
@ -658,43 +657,6 @@ func setItemWithNoKey(t *testing.T, ods *OracleDatabase) {
assert.NotNil(t, err)
}
func TestParseTTL(t *testing.T) {
t.Parallel()
t.Run("TTL Not an integer", func(t *testing.T) {
t.Parallel()
ttlInSeconds := "not an integer"
ttl, err := parseTTL(map[string]string{
"ttlInSeconds": ttlInSeconds,
})
assert.Error(t, err)
assert.Nil(t, ttl)
})
t.Run("TTL specified with wrong key", func(t *testing.T) {
t.Parallel()
ttlInSeconds := 12345
ttl, err := parseTTL(map[string]string{
"expirationTime": strconv.Itoa(ttlInSeconds),
})
assert.NoError(t, err)
assert.Nil(t, ttl)
})
t.Run("TTL is a number", func(t *testing.T) {
t.Parallel()
ttlInSeconds := 12345
ttl, err := parseTTL(map[string]string{
"ttlInSeconds": strconv.Itoa(ttlInSeconds),
})
assert.NoError(t, err)
assert.Equal(t, *ttl, ttlInSeconds)
})
t.Run("TTL not set", func(t *testing.T) {
t.Parallel()
ttl, err := parseTTL(map[string]string{})
assert.NoError(t, err)
assert.Nil(t, ttl)
})
}
func testSetItemWithInvalidTTL(t *testing.T, ods *OracleDatabase) {
setReq := &state.SetRequest{
Key: randomKey(),

View File

@ -20,13 +20,12 @@ import (
"encoding/json"
"fmt"
"net/url"
"strconv"
"github.com/google/uuid"
"github.com/dapr/components-contrib/metadata"
"github.com/dapr/components-contrib/state"
"github.com/dapr/components-contrib/state/utils"
stateutils "github.com/dapr/components-contrib/state/utils"
"github.com/dapr/kit/logger"
// Blank import for the underlying Oracle Database driver.
@ -36,7 +35,6 @@ import (
const (
connectionStringKey = "connectionString"
oracleWalletLocationKey = "oracleWalletLocation"
metadataTTLKey = "ttlInSeconds"
errMissingConnectionString = "missing connection string"
tableName = "state"
)
@ -115,20 +113,6 @@ func (o *oracleDatabaseAccess) Init(metadata state.Metadata) error {
return nil
}
func parseTTL(requestMetadata map[string]string) (*int, error) {
if val, found := requestMetadata[metadataTTLKey]; found && val != "" {
parsedVal, err := strconv.ParseInt(val, 10, 0)
if err != nil {
return nil, fmt.Errorf("error in parsing ttl metadata : %w", err)
}
parsedInt := int(parsedVal)
return &parsedInt, nil
}
return nil, nil
}
// Set makes an insert or update to the database.
func (o *oracleDatabaseAccess) Set(ctx context.Context, req *state.SetRequest) error {
o.logger.Debug("Setting state value in OracleDatabase")
@ -149,19 +133,12 @@ func (o *oracleDatabaseAccess) Set(ctx context.Context, req *state.SetRequest) e
return fmt.Errorf("when FirstWrite is to be enforced, a value must be provided for the ETag")
}
var ttlSeconds int
ttl, ttlerr := parseTTL(req.Metadata)
ttl, ttlerr := stateutils.ParseTTL(req.Metadata)
if ttlerr != nil {
return fmt.Errorf("error in parsing TTL %w", ttlerr)
return fmt.Errorf("error parsing TTL: %w", ttlerr)
}
if ttl != nil {
if *ttl == -1 {
o.logger.Debugf("TTL is set to -1; this means: never expire. ")
} else {
if *ttl < -1 {
return fmt.Errorf("incorrect value for %s %d", metadataTTLKey, *ttl)
}
ttlSeconds = *ttl
}
ttlSeconds = *ttl
}
requestValue := req.Value
byteArray, isBinary := req.Value.([]uint8)
@ -172,7 +149,7 @@ func (o *oracleDatabaseAccess) Set(ctx context.Context, req *state.SetRequest) e
}
// Convert to json string.
bt, _ := utils.Marshal(requestValue, json.Marshal)
bt, _ := stateutils.Marshal(requestValue, json.Marshal)
value := string(bt)
var result sql.Result

View File

@ -15,6 +15,7 @@ package postgresql
import (
"context"
"database/sql"
"github.com/dapr/components-contrib/state"
)
@ -31,3 +32,12 @@ type dbAccess interface {
Query(ctx context.Context, req *state.QueryRequest) (*state.QueryResponse, error)
Close() error // io.Closer
}
// Interface that contains methods for querying.
// Applies to both *sql.DB and *sql.Tx
type dbquerier interface {
Exec(query string, args ...any) (sql.Result, error)
ExecContext(ctx context.Context, query string, args ...any) (sql.Result, error)
QueryRow(query string, args ...any) *sql.Row
QueryRowContext(ctx context.Context, query string, args ...any) *sql.Row
}

View File

@ -0,0 +1,227 @@
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package postgresql
import (
"context"
"database/sql"
"errors"
"fmt"
"strconv"
"strings"
"time"
"github.com/dapr/kit/logger"
)
// Performs migrations for the database schema
type migrations struct {
Logger logger.Logger
Conn *sql.DB
StateTableName string
MetadataTableName string
}
// Perform the required migrations
func (m *migrations) Perform(ctx context.Context) error {
// Use an advisory lock (with an arbitrary number) to ensure that no one else is performing migrations at the same time
// This is the only way to also ensure we are not running multiple "CREATE TABLE IF NOT EXISTS" at the exact same time
// See: https://www.postgresql.org/message-id/CA+TgmoZAdYVtwBfp1FL2sMZbiHCWT4UPrzRLNnX1Nb30Ku3-gg@mail.gmail.com
const lockID = 42
// Long timeout here as this query may block
queryCtx, cancel := context.WithTimeout(ctx, time.Minute)
_, err := m.Conn.ExecContext(queryCtx, "SELECT pg_advisory_lock($1)", lockID)
cancel()
if err != nil {
return fmt.Errorf("faild to acquire advisory lock: %w", err)
}
// Release the lock
defer func() {
queryCtx, cancel = context.WithTimeout(ctx, time.Minute)
_, err = m.Conn.ExecContext(queryCtx, "SELECT pg_advisory_unlock($1)", lockID)
cancel()
if err != nil {
// Panicking here, as this forcibly closes the session and thus ensures we are not leaving locks hanging around
m.Logger.Fatalf("Failed to release advisory lock: %v", err)
}
}()
// Check if the metadata table exists, which we also use to store the migration level
queryCtx, cancel = context.WithTimeout(ctx, 30*time.Second)
exists, _, _, err := m.tableExists(queryCtx, m.MetadataTableName)
cancel()
if err != nil {
return err
}
// If the table doesn't exist, create it
if !exists {
queryCtx, cancel = context.WithTimeout(ctx, 30*time.Second)
err = m.createMetadataTable(queryCtx)
cancel()
if err != nil {
return err
}
}
// Select the migration level
var (
migrationLevelStr string
migrationLevel int
)
queryCtx, cancel = context.WithTimeout(ctx, 30*time.Second)
err = m.Conn.
QueryRowContext(queryCtx,
fmt.Sprintf(`SELECT value FROM %s WHERE key = 'migrations'`, m.MetadataTableName),
).Scan(&migrationLevelStr)
cancel()
if errors.Is(err, sql.ErrNoRows) {
// If there's no row...
migrationLevel = 0
} else if err != nil {
return fmt.Errorf("failed to read migration level: %w", err)
} else {
migrationLevel, err = strconv.Atoi(migrationLevelStr)
if err != nil || migrationLevel < 0 {
return fmt.Errorf("invalid migration level found in metadata table: %s", migrationLevelStr)
}
}
// Perform the migrations
for i := migrationLevel; i < len(allMigrations); i++ {
m.Logger.Infof("Performing migration %d", i)
err = allMigrations[i](ctx, m)
if err != nil {
return fmt.Errorf("failed to perform migration %d: %w", i, err)
}
queryCtx, cancel = context.WithTimeout(ctx, 30*time.Second)
_, err = m.Conn.ExecContext(queryCtx,
fmt.Sprintf(`INSERT INTO %s (key, value) VALUES ('migrations', $1) ON CONFLICT (key) DO UPDATE SET value = $1`, m.MetadataTableName),
strconv.Itoa(i+1),
)
cancel()
if err != nil {
return fmt.Errorf("failed to update migration level in metadata table: %w", err)
}
}
return nil
}
func (m migrations) createMetadataTable(ctx context.Context) error {
m.Logger.Infof("Creating metadata table '%s'", m.MetadataTableName)
// Add an "IF NOT EXISTS" in case another Dapr sidecar is creating the same table at the same time
// In the next step we'll acquire a lock so there won't be issues with concurrency
_, err := m.Conn.Exec(fmt.Sprintf(
`CREATE TABLE IF NOT EXISTS %s (
key text NOT NULL PRIMARY KEY,
value text NOT NULL
)`,
m.MetadataTableName,
))
if err != nil {
return fmt.Errorf("failed to create metadata table: %w", err)
}
return nil
}
// If the table exists, returns true and the name of the table and schema
func (m migrations) tableExists(ctx context.Context, tableName string) (exists bool, schema string, table string, err error) {
table, schema, err = m.tableSchemaName(tableName)
if err != nil {
return false, "", "", err
}
if schema == "" {
err = m.Conn.
QueryRowContext(
ctx,
`SELECT table_name, table_schema
FROM information_schema.tables
WHERE table_name = $1`,
table,
).
Scan(&table, &schema)
} else {
err = m.Conn.
QueryRowContext(
ctx,
`SELECT table_name, table_schema
FROM information_schema.tables
WHERE table_schema = $1 AND table_name = $2`,
schema, table,
).
Scan(&table, &schema)
}
if err != nil && errors.Is(err, sql.ErrNoRows) {
return false, "", "", nil
} else if err != nil {
return false, "", "", fmt.Errorf("failed to check if table '%s' exists: %w", tableName, err)
}
return true, schema, table, nil
}
// If the table name includes a schema (e.g. `schema.table`, returns the two parts separately)
func (m migrations) tableSchemaName(tableName string) (table string, schema string, err error) {
parts := strings.Split(tableName, ".")
switch len(parts) {
case 1:
return parts[0], "", nil
case 2:
return parts[1], parts[0], nil
default:
return "", "", errors.New("invalid table name: must be in the format 'table' or 'schema.table'")
}
}
var allMigrations = [2]func(ctx context.Context, m *migrations) error{
// Migration 0: create the state table
func(ctx context.Context, m *migrations) error {
// We need to add an "IF NOT EXISTS" because we may be migrating from when we did not use a metadata table
m.Logger.Infof("Creating state table '%s'", m.StateTableName)
_, err := m.Conn.Exec(
fmt.Sprintf(
`CREATE TABLE IF NOT EXISTS %s (
key text NOT NULL PRIMARY KEY,
value jsonb NOT NULL,
isbinary boolean NOT NULL,
insertdate TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
updatedate TIMESTAMP WITH TIME ZONE NULL
)`,
m.StateTableName,
),
)
if err != nil {
return fmt.Errorf("failed to create state table: %w", err)
}
return nil
},
// Migration 1: add the "expiredate" column
func(ctx context.Context, m *migrations) error {
m.Logger.Infof("Adding expiredate column to state table '%s'", m.StateTableName)
_, err := m.Conn.Exec(fmt.Sprintf(
`ALTER TABLE %s ADD expiredate TIMESTAMP WITH TIME ZONE`,
m.StateTableName,
))
if err != nil {
return fmt.Errorf("failed to update state table: %w", err)
}
return nil
},
}

View File

@ -26,34 +26,38 @@ import (
"github.com/dapr/components-contrib/metadata"
"github.com/dapr/components-contrib/state"
"github.com/dapr/components-contrib/state/query"
"github.com/dapr/components-contrib/state/utils"
stateutils "github.com/dapr/components-contrib/state/utils"
"github.com/dapr/kit/logger"
"github.com/dapr/kit/ptr"
// Blank import for the underlying PostgreSQL driver.
// Blank import for the underlying Postgres driver.
_ "github.com/jackc/pgx/v5/stdlib"
)
const (
connectionStringKey = "connectionString"
errMissingConnectionString = "missing connection string"
defaultTableName = "state"
defaultTableName = "state"
defaultMetadataTableName = "dapr_metadata"
cleanupIntervalKey = "cleanupIntervalInSeconds"
defaultCleanupInternal = 3600 // In seconds = 1 hour
)
// postgresDBAccess implements dbaccess.
type postgresDBAccess struct {
logger logger.Logger
metadata postgresMetadataStruct
db *sql.DB
connectionString string
tableName string
var errMissingConnectionString = errors.New("missing connection string")
// PostgresDBAccess implements dbaccess.
type PostgresDBAccess struct {
logger logger.Logger
metadata postgresMetadataStruct
cleanupInterval *time.Duration
db *sql.DB
ctx context.Context
cancel context.CancelFunc
}
// newPostgresDBAccess creates a new instance of postgresAccess.
func newPostgresDBAccess(logger logger.Logger) *postgresDBAccess {
logger.Debug("Instantiating new PostgreSQL state store")
func newPostgresDBAccess(logger logger.Logger) *PostgresDBAccess {
logger.Debug("Instantiating new Postgres state store")
return &postgresDBAccess{
return &PostgresDBAccess{
logger: logger,
}
}
@ -61,14 +65,66 @@ func newPostgresDBAccess(logger logger.Logger) *postgresDBAccess {
type postgresMetadataStruct struct {
ConnectionString string
ConnectionMaxIdleTime time.Duration
TableName string
TableName string // Could be in the format "schema.table" or just "table"
MetadataTableName string // Could be in the format "schema.table" or just "table"
}
// Init sets up PostgreSQL connection and ensures that the state table exists.
func (p *postgresDBAccess) Init(meta state.Metadata) error {
p.logger.Debug("Initializing PostgreSQL state store")
// Init sets up Postgres connection and ensures that the state table exists.
func (p *PostgresDBAccess) Init(meta state.Metadata) error {
p.logger.Debug("Initializing Postgres state store")
p.ctx, p.cancel = context.WithCancel(context.Background())
err := p.ParseMetadata(meta)
if err != nil {
p.logger.Errorf("Failed to parse metadata: %v", err)
return err
}
db, err := sql.Open("pgx", p.metadata.ConnectionString)
if err != nil {
p.logger.Error(err)
return err
}
p.db = db
pingCtx, pingCancel := context.WithTimeout(p.ctx, 30*time.Second)
pingErr := db.PingContext(pingCtx)
pingCancel()
if pingErr != nil {
return pingErr
}
p.db.SetConnMaxIdleTime(p.metadata.ConnectionMaxIdleTime)
if err != nil {
return err
}
migrate := &migrations{
Logger: p.logger,
Conn: p.db,
MetadataTableName: p.metadata.MetadataTableName,
StateTableName: p.metadata.TableName,
}
err = migrate.Perform(p.ctx)
if err != nil {
return err
}
p.ScheduleCleanupExpiredData(p.ctx)
return nil
}
func (p *PostgresDBAccess) GetDB() *sql.DB {
return p.db
}
func (p *PostgresDBAccess) ParseMetadata(meta state.Metadata) error {
m := postgresMetadataStruct{
TableName: defaultTableName,
TableName: defaultTableName,
MetadataTableName: defaultMetadataTableName,
}
err := metadata.DecodeMetadata(meta.Properties, &m)
if err != nil {
@ -77,44 +133,33 @@ func (p *postgresDBAccess) Init(meta state.Metadata) error {
p.metadata = m
if m.ConnectionString == "" {
p.logger.Error("Missing postgreSQL connection string")
return errors.New(errMissingConnectionString)
}
p.connectionString = m.ConnectionString
db, err := sql.Open("pgx", p.connectionString)
if err != nil {
p.logger.Error(err)
return err
return errMissingConnectionString
}
p.db = db
s, ok := meta.Properties[cleanupIntervalKey]
if ok && s != "" {
cleanupIntervalInSec, err := strconv.ParseInt(s, 10, 0)
if err != nil {
return fmt.Errorf("invalid value for '%s': %s", cleanupIntervalKey, s)
}
pingErr := db.Ping()
if pingErr != nil {
return pingErr
// Non-positive value from meta means disable auto cleanup.
if cleanupIntervalInSec > 0 {
p.cleanupInterval = ptr.Of(time.Duration(cleanupIntervalInSec) * time.Second)
}
} else {
p.cleanupInterval = ptr.Of(defaultCleanupInternal * time.Second)
}
p.db.SetConnMaxIdleTime(m.ConnectionMaxIdleTime)
if err != nil {
return err
}
err = p.ensureStateTable(m.TableName)
if err != nil {
return err
}
p.tableName = m.TableName
return nil
}
// Set makes an insert or update to the database.
func (p *postgresDBAccess) Set(ctx context.Context, req *state.SetRequest) error {
p.logger.Debug("Setting state value in PostgreSQL")
func (p *PostgresDBAccess) Set(ctx context.Context, req *state.SetRequest) error {
return p.doSet(ctx, p.db, req)
}
func (p *PostgresDBAccess) doSet(parentCtx context.Context, db dbquerier, req *state.SetRequest) error {
err := state.CheckRequestOptions(req.Options)
if err != nil {
return err
@ -135,22 +180,47 @@ func (p *postgresDBAccess) Set(ctx context.Context, req *state.SetRequest) error
}
// Convert to json string
bt, _ := utils.Marshal(v, json.Marshal)
bt, _ := stateutils.Marshal(v, json.Marshal)
value := string(bt)
// TTL
var ttlSeconds int
ttl, ttlerr := stateutils.ParseTTL(req.Metadata)
if ttlerr != nil {
return fmt.Errorf("error parsing TTL: %w", ttlerr)
}
if ttl != nil {
ttlSeconds = *ttl
}
var result sql.Result
// Sprintf is required for table name because sql.DB does not substitute parameters for table names.
// Other parameters use sql.DB parameter substitution.
if req.Options.Concurrency == state.FirstWrite && (req.ETag == nil || *req.ETag == "") {
result, err = p.db.ExecContext(ctx, fmt.Sprintf(
`INSERT INTO %s (key, value, isbinary) VALUES ($1, $2, $3);`,
p.tableName), req.Key, value, isBinary)
} else if req.ETag == nil || *req.ETag == "" {
result, err = p.db.ExecContext(ctx, fmt.Sprintf(
`INSERT INTO %s (key, value, isbinary) VALUES ($1, $2, $3)
ON CONFLICT (key) DO UPDATE SET value = $2, isbinary = $3, updatedate = NOW();`,
p.tableName), req.Key, value, isBinary)
// Sprintf is required for table name because query.DB does not substitute parameters for table names.
// Other parameters use query.DB parameter substitution.
var (
query string
queryExpiredate string
params []any
)
if req.ETag == nil || *req.ETag == "" {
if req.Options.Concurrency == state.FirstWrite {
query = `INSERT INTO %[1]s
(key, value, isbinary, expiredate)
VALUES
($1, $2, $3, %[2]s)`
} else {
query = `INSERT INTO %[1]s
(key, value, isbinary, expiredate)
VALUES
($1, $2, $3, %[2]s)
ON CONFLICT (key)
DO UPDATE SET
value = $2,
isbinary = $3,
updatedate = CURRENT_TIMESTAMP,
expiredate = %[2]s`
}
params = []any{req.Key, value, isBinary}
} else {
// Convert req.ETag to uint32 for postgres XID compatibility
var etag64 uint64
@ -158,20 +228,30 @@ func (p *postgresDBAccess) Set(ctx context.Context, req *state.SetRequest) error
if err != nil {
return state.NewETagError(state.ETagInvalid, err)
}
etag := uint32(etag64)
// When an etag is provided do an update - no insert
result, err = p.db.ExecContext(ctx, fmt.Sprintf(
`UPDATE %s SET value = $1, isbinary = $2, updatedate = NOW()
WHERE key = $3 AND xmin = $4;`,
p.tableName), value, isBinary, req.Key, etag)
query = `UPDATE %[1]s
SET
value = $1,
isbinary = $2,
updatedate = CURRENT_TIMESTAMP,
expiredate = %[2]s
WHERE
key = $3
AND xmin = $4`
params = []any{value, isBinary, req.Key, uint32(etag64)}
}
if ttlSeconds > 0 {
queryExpiredate = "CURRENT_TIMESTAMP + interval '" + strconv.Itoa(ttlSeconds) + " seconds'"
} else {
queryExpiredate = "NULL"
}
result, err = db.ExecContext(parentCtx, fmt.Sprintf(query, p.metadata.TableName, queryExpiredate), params...)
if err != nil {
if req.ETag != nil && *req.ETag != "" {
return state.NewETagError(state.ETagMismatch, err)
}
return err
}
@ -179,7 +259,6 @@ func (p *postgresDBAccess) Set(ctx context.Context, req *state.SetRequest) error
if err != nil {
return err
}
if rows != 1 {
return errors.New("no item was updated")
}
@ -187,33 +266,32 @@ func (p *postgresDBAccess) Set(ctx context.Context, req *state.SetRequest) error
return nil
}
func (p *postgresDBAccess) BulkSet(ctx context.Context, req []state.SetRequest) error {
p.logger.Debug("Executing BulkSet request")
tx, err := p.db.Begin()
func (p *PostgresDBAccess) BulkSet(parentCtx context.Context, req []state.SetRequest) error {
tx, err := p.db.BeginTx(parentCtx, nil)
if err != nil {
return err
return fmt.Errorf("failed to begin transaction: %w", err)
}
defer tx.Rollback()
if len(req) > 0 {
for _, s := range req {
sa := s // Fix for gosec G601: Implicit memory aliasing in for loop.
err = p.Set(ctx, &sa)
for i := range req {
err = p.doSet(parentCtx, tx, &req[i])
if err != nil {
tx.Rollback()
return err
}
}
}
err = tx.Commit()
if err != nil {
return fmt.Errorf("failed to commit transaction: %w", err)
}
return err
return nil
}
// Get returns data from the database. If data does not exist for the key an empty state.GetResponse will be returned.
func (p *postgresDBAccess) Get(ctx context.Context, req *state.GetRequest) (*state.GetResponse, error) {
p.logger.Debug("Getting state value from PostgreSQL")
func (p *PostgresDBAccess) Get(parentCtx context.Context, req *state.GetRequest) (*state.GetResponse, error) {
if req.Key == "" {
return nil, errors.New("missing key in get operation")
}
@ -223,7 +301,15 @@ func (p *postgresDBAccess) Get(ctx context.Context, req *state.GetRequest) (*sta
isBinary bool
etag uint64 // Postgres uses uint32, but FormatUint requires uint64, so using uint64 directly to avoid re-allocations
)
err := p.db.QueryRowContext(ctx, fmt.Sprintf("SELECT value, isbinary, xmin as etag FROM %s WHERE key = $1", p.tableName), req.Key).Scan(&value, &isBinary, &etag)
query := `SELECT
value, isbinary, xmin AS etag
FROM %s
WHERE
key = $1
AND (expiredate IS NULL OR expiredate >= CURRENT_TIMESTAMP)`
err := p.db.
QueryRowContext(parentCtx, fmt.Sprintf(query, p.metadata.TableName), req.Key).
Scan(&value, &isBinary, &etag)
if err != nil {
// If no rows exist, return an empty response, otherwise return the error.
if err == sql.ErrNoRows {
@ -261,8 +347,11 @@ func (p *postgresDBAccess) Get(ctx context.Context, req *state.GetRequest) (*sta
}
// Delete removes an item from the state store.
func (p *postgresDBAccess) Delete(ctx context.Context, req *state.DeleteRequest) (err error) {
p.logger.Debug("Deleting state value from PostgreSQL")
func (p *PostgresDBAccess) Delete(ctx context.Context, req *state.DeleteRequest) (err error) {
return p.doDelete(ctx, p.db, req)
}
func (p *PostgresDBAccess) doDelete(parentCtx context.Context, db dbquerier, req *state.DeleteRequest) (err error) {
if req.Key == "" {
return errors.New("missing key in delete operation")
}
@ -270,7 +359,7 @@ func (p *postgresDBAccess) Delete(ctx context.Context, req *state.DeleteRequest)
var result sql.Result
if req.ETag == nil || *req.ETag == "" {
result, err = p.db.ExecContext(ctx, "DELETE FROM state WHERE key = $1", req.Key)
result, err = db.ExecContext(parentCtx, "DELETE FROM state WHERE key = $1", req.Key)
} else {
// Convert req.ETag to uint32 for postgres XID compatibility
var etag64 uint64
@ -280,7 +369,7 @@ func (p *postgresDBAccess) Delete(ctx context.Context, req *state.DeleteRequest)
}
etag := uint32(etag64)
result, err = p.db.ExecContext(ctx, "DELETE FROM state WHERE key = $1 and xmin = $2", req.Key, etag)
result, err = db.ExecContext(parentCtx, "DELETE FROM state WHERE key = $1 AND xmin = $2", req.Key, etag)
}
if err != nil {
@ -299,92 +388,88 @@ func (p *postgresDBAccess) Delete(ctx context.Context, req *state.DeleteRequest)
return nil
}
func (p *postgresDBAccess) BulkDelete(ctx context.Context, req []state.DeleteRequest) error {
p.logger.Debug("Executing BulkDelete request")
tx, err := p.db.Begin()
func (p *PostgresDBAccess) BulkDelete(parentCtx context.Context, req []state.DeleteRequest) error {
tx, err := p.db.BeginTx(parentCtx, nil)
if err != nil {
return err
return fmt.Errorf("failed to begin transaction: %w", err)
}
defer tx.Rollback()
if len(req) > 0 {
for i := range req {
err = p.Delete(ctx, &req[i])
err = p.doDelete(parentCtx, tx, &req[i])
if err != nil {
tx.Rollback()
return err
}
}
}
err = tx.Commit()
if err != nil {
return fmt.Errorf("failed to commit transaction: %w", err)
}
return err
return nil
}
func (p *postgresDBAccess) ExecuteMulti(ctx context.Context, request *state.TransactionalStateRequest) error {
p.logger.Debug("Executing PostgreSQL transaction")
tx, err := p.db.Begin()
func (p *PostgresDBAccess) ExecuteMulti(parentCtx context.Context, request *state.TransactionalStateRequest) error {
tx, err := p.db.BeginTx(parentCtx, nil)
if err != nil {
return err
return fmt.Errorf("failed to begin transaction: %w", err)
}
defer tx.Rollback()
for _, o := range request.Operations {
switch o.Operation {
case state.Upsert:
var setReq state.SetRequest
setReq, err = getSet(o)
if err != nil {
tx.Rollback()
return err
}
err = p.Set(ctx, &setReq)
err = p.doSet(parentCtx, tx, &setReq)
if err != nil {
tx.Rollback()
return err
}
case state.Delete:
var delReq state.DeleteRequest
delReq, err = getDelete(o)
if err != nil {
tx.Rollback()
return err
}
err = p.Delete(ctx, &delReq)
err = p.doDelete(parentCtx, tx, &delReq)
if err != nil {
tx.Rollback()
return err
}
default:
tx.Rollback()
return fmt.Errorf("unsupported operation: %s", o.Operation)
}
}
err = tx.Commit()
if err != nil {
return fmt.Errorf("failed to commit transaction: %w", err)
}
return err
return nil
}
// Query executes a query against store.
func (p *postgresDBAccess) Query(ctx context.Context, req *state.QueryRequest) (*state.QueryResponse, error) {
p.logger.Debug("Getting query value from PostgreSQL")
func (p *PostgresDBAccess) Query(parentCtx context.Context, req *state.QueryRequest) (*state.QueryResponse, error) {
q := &Query{
query: "",
params: []interface{}{},
tableName: p.tableName,
params: []any{},
tableName: p.metadata.TableName,
}
qbuilder := query.NewQueryBuilder(q)
if err := qbuilder.BuildQuery(&req.Query); err != nil {
return &state.QueryResponse{}, err
}
data, token, err := q.execute(ctx, p.logger, p.db)
data, token, err := q.execute(parentCtx, p.logger, p.db)
if err != nil {
return &state.QueryResponse{}, err
}
@ -395,8 +480,94 @@ func (p *postgresDBAccess) Query(ctx context.Context, req *state.QueryRequest) (
}, nil
}
func (p *PostgresDBAccess) ScheduleCleanupExpiredData(ctx context.Context) {
if p.cleanupInterval == nil {
return
}
p.logger.Infof("Schedule expired data clean up every %d seconds", int(p.cleanupInterval.Seconds()))
go func() {
ticker := time.NewTicker(*p.cleanupInterval)
for {
select {
case <-ticker.C:
err := p.CleanupExpired(ctx)
if err != nil {
p.logger.Errorf("Error removing expired data: %v", err)
}
case <-ctx.Done():
p.logger.Debug("Stopped background cleanup of expired data")
return
}
}
}()
}
func (p *PostgresDBAccess) CleanupExpired(ctx context.Context) error {
// Check if the last iteration was too recent
// This performs an atomic operation, so allows coordination with other daprd processes too
canContinue, err := p.UpdateLastCleanup(ctx, p.db, *p.cleanupInterval)
if err != nil {
// Log errors only
p.logger.Warnf("Failed to read last cleanup time from database: %v", err)
}
if !canContinue {
p.logger.Debug("Last cleanup was performed too recently")
return nil
}
// Note we're not using the transaction here as we don't want this to be rolled back half-way or to lock the table unnecessarily
// Need to use fmt.Sprintf because we can't parametrize a table name
// Note we are not setting a timeout here as this query can take a "long" time, especially if there's no index on expiredate
//nolint:gosec
stmt := fmt.Sprintf(`DELETE FROM %s WHERE expiredate IS NOT NULL AND expiredate < CURRENT_TIMESTAMP`, p.metadata.TableName)
res, err := p.db.ExecContext(ctx, stmt)
if err != nil {
return fmt.Errorf("failed to execute query: %w", err)
}
cleaned, err := res.RowsAffected()
if err != nil {
return fmt.Errorf("failed to count affected rows: %w", err)
}
p.logger.Infof("Removed %d expired rows", cleaned)
return nil
}
// UpdateLastCleanup sets the 'last-cleanup' value only if it's less than cleanupInterval.
// Returns true if the row was updated, which means that the cleanup can proceed.
func (p *PostgresDBAccess) UpdateLastCleanup(ctx context.Context, db dbquerier, cleanupInterval time.Duration) (bool, error) {
queryCtx, cancel := context.WithTimeout(ctx, 30*time.Second)
res, err := db.ExecContext(queryCtx,
fmt.Sprintf(`INSERT INTO %[1]s (key, value)
VALUES ('last-cleanup', CURRENT_TIMESTAMP)
ON CONFLICT (key)
DO UPDATE SET value = CURRENT_TIMESTAMP
WHERE (EXTRACT('epoch' FROM CURRENT_TIMESTAMP - %[1]s.value::timestamp with time zone) * 1000)::bigint > $1`,
p.metadata.MetadataTableName),
cleanupInterval.Milliseconds()-100, // Subtract 100ms for some buffer
)
cancel()
if err != nil {
return true, fmt.Errorf("failed to execute query: %w", err)
}
n, err := res.RowsAffected()
if err != nil {
return true, fmt.Errorf("failed to count affected rows: %w", err)
}
return n > 0, nil
}
// Close implements io.Close.
func (p *postgresDBAccess) Close() error {
func (p *PostgresDBAccess) Close() error {
if p.cancel != nil {
p.cancel()
p.cancel = nil
}
if p.db != nil {
return p.db.Close()
}
@ -404,34 +575,10 @@ func (p *postgresDBAccess) Close() error {
return nil
}
func (p *postgresDBAccess) ensureStateTable(stateTableName string) error {
exists, err := tableExists(p.db, stateTableName)
if err != nil {
return err
}
if !exists {
p.logger.Info("Creating PostgreSQL state table")
createTable := fmt.Sprintf(`CREATE TABLE %s (
key text NOT NULL PRIMARY KEY,
value jsonb NOT NULL,
isbinary boolean NOT NULL,
insertdate TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
updatedate TIMESTAMP WITH TIME ZONE NULL);`, stateTableName)
_, err = p.db.Exec(createTable)
if err != nil {
return err
}
}
return nil
}
func tableExists(db *sql.DB, tableName string) (bool, error) {
exists := false
err := db.QueryRow("SELECT EXISTS (SELECT FROM pg_tables where tablename = $1)", tableName).Scan(&exists)
return exists, err
// GetCleanupInterval returns the cleanupInterval property.
// This is primarily used for tests.
func (p *PostgresDBAccess) GetCleanupInterval() *time.Duration {
return p.cleanupInterval
}
// Returns the set requests.

View File

@ -18,18 +18,23 @@ import (
"context"
"database/sql"
"testing"
"time"
"github.com/DATA-DOG/go-sqlmock"
"github.com/stretchr/testify/assert"
"github.com/dapr/components-contrib/metadata"
"github.com/dapr/components-contrib/state"
"github.com/dapr/kit/logger"
// Blank import for pgx
_ "github.com/jackc/pgx/v5/stdlib"
)
type mocks struct {
db *sql.DB
mock sqlmock.Sqlmock
pgDba *postgresDBAccess
pgDba *PostgresDBAccess
}
func TestGetSetWithWrongType(t *testing.T) {
@ -451,7 +456,7 @@ func mockDatabase(t *testing.T) (*mocks, error) {
t.Fatalf("an error '%s' was not expected when opening a stub database connection", err)
}
dba := &postgresDBAccess{
dba := &PostgresDBAccess{
logger: logger,
db: db,
}
@ -462,3 +467,95 @@ func mockDatabase(t *testing.T) (*mocks, error) {
pgDba: dba,
}, err
}
func TestParseMetadata(t *testing.T) {
t.Run("missing connection string", func(t *testing.T) {
p := &PostgresDBAccess{}
props := map[string]string{}
err := p.ParseMetadata(state.Metadata{Base: metadata.Base{Properties: props}})
assert.Error(t, err)
assert.ErrorIs(t, err, errMissingConnectionString)
})
t.Run("has connection string", func(t *testing.T) {
p := &PostgresDBAccess{}
props := map[string]string{
"connectionString": "foo",
}
err := p.ParseMetadata(state.Metadata{Base: metadata.Base{Properties: props}})
assert.NoError(t, err)
})
t.Run("default table name", func(t *testing.T) {
p := &PostgresDBAccess{}
props := map[string]string{
"connectionString": "foo",
}
err := p.ParseMetadata(state.Metadata{Base: metadata.Base{Properties: props}})
assert.NoError(t, err)
assert.Equal(t, p.metadata.TableName, defaultTableName)
})
t.Run("custom table name", func(t *testing.T) {
p := &PostgresDBAccess{}
props := map[string]string{
"connectionString": "foo",
"tableName": "mytable",
}
err := p.ParseMetadata(state.Metadata{Base: metadata.Base{Properties: props}})
assert.NoError(t, err)
assert.Equal(t, p.metadata.TableName, "mytable")
})
t.Run("default cleanupIntervalInSeconds", func(t *testing.T) {
p := &PostgresDBAccess{}
props := map[string]string{
"connectionString": "foo",
}
err := p.ParseMetadata(state.Metadata{Base: metadata.Base{Properties: props}})
assert.NoError(t, err)
_ = assert.NotNil(t, p.cleanupInterval) &&
assert.Equal(t, *p.cleanupInterval, defaultCleanupInternal*time.Second)
})
t.Run("invalid cleanupIntervalInSeconds", func(t *testing.T) {
p := &PostgresDBAccess{}
props := map[string]string{
"connectionString": "foo",
"cleanupIntervalInSeconds": "NaN",
}
err := p.ParseMetadata(state.Metadata{Base: metadata.Base{Properties: props}})
assert.Error(t, err)
})
t.Run("positive cleanupIntervalInSeconds", func(t *testing.T) {
p := &PostgresDBAccess{}
props := map[string]string{
"connectionString": "foo",
"cleanupIntervalInSeconds": "42",
}
err := p.ParseMetadata(state.Metadata{Base: metadata.Base{Properties: props}})
assert.NoError(t, err)
_ = assert.NotNil(t, p.cleanupInterval) &&
assert.Equal(t, *p.cleanupInterval, 42*time.Second)
})
t.Run("zero cleanupIntervalInSeconds", func(t *testing.T) {
p := &PostgresDBAccess{}
props := map[string]string{
"connectionString": "foo",
"cleanupIntervalInSeconds": "0",
}
err := p.ParseMetadata(state.Metadata{Base: metadata.Base{Properties: props}})
assert.NoError(t, err)
assert.Nil(t, p.cleanupInterval)
})
}

View File

@ -24,7 +24,6 @@ import (
// PostgreSQL state store.
type PostgreSQL struct {
features []state.Feature
logger logger.Logger
dbaccess dbAccess
}
@ -40,7 +39,6 @@ func NewPostgreSQLStateStore(logger logger.Logger) state.Store {
// This unexported constructor allows injecting a dbAccess instance for unit testing.
func newPostgreSQLStateStore(logger logger.Logger, dba dbAccess) *PostgreSQL {
return &PostgreSQL{
features: []state.Feature{state.FeatureETag, state.FeatureTransactional, state.FeatureQueryAPI},
logger: logger,
dbaccess: dba,
}
@ -53,7 +51,7 @@ func (p *PostgreSQL) Init(metadata state.Metadata) error {
// Features returns the features available in this state store.
func (p *PostgreSQL) Features() []state.Feature {
return p.features
return []state.Feature{state.FeatureETag, state.FeatureTransactional, state.FeatureQueryAPI}
}
// Delete removes an entity from the store.
@ -102,10 +100,15 @@ func (p *PostgreSQL) Close() error {
if p.dbaccess != nil {
return p.dbaccess.Close()
}
return nil
}
// Returns the dbaccess property.
// This method is used in tests.
func (p *PostgreSQL) GetDBAccess() dbAccess {
return p.dbaccess
}
func (p *PostgreSQL) GetComponentMetadata() map[string]string {
metadataStruct := postgresMetadataStruct{}
metadataInfo := map[string]string{}

View File

@ -49,7 +49,7 @@ func TestPostgreSQLIntegration(t *testing.T) {
})
metadata := state.Metadata{
Base: metadata.Base{Properties: map[string]string{connectionStringKey: connectionString}},
Base: metadata.Base{Properties: map[string]string{"connectionString": connectionString}},
}
pgs := NewPostgreSQLStateStore(logger.NewLogger("test")).(*PostgreSQL)
@ -62,11 +62,6 @@ func TestPostgreSQLIntegration(t *testing.T) {
t.Fatal(error)
}
t.Run("Create table succeeds", func(t *testing.T) {
t.Parallel()
testCreateTable(t, pgs.dbaccess.(*postgresDBAccess))
})
t.Run("Get Set Delete one item", func(t *testing.T) {
t.Parallel()
setGetUpdateDeleteOneItem(t, pgs)
@ -161,33 +156,6 @@ func setGetUpdateDeleteOneItem(t *testing.T, pgs *PostgreSQL) {
deleteItem(t, pgs, key, getResponse.ETag)
}
// testCreateTable tests the ability to create the state table.
func testCreateTable(t *testing.T, dba *postgresDBAccess) {
tableName := "test_state"
// Drop the table if it already exists
exists, err := tableExists(dba.db, tableName)
assert.Nil(t, err)
if exists {
dropTable(t, dba.db, tableName)
}
// Create the state table and test for its existence
err = dba.ensureStateTable(tableName)
assert.Nil(t, err)
exists, err = tableExists(dba.db, tableName)
assert.Nil(t, err)
assert.True(t, exists)
// Drop the state table
dropTable(t, dba.db, tableName)
}
func dropTable(t *testing.T, db *sql.DB, tableName string) {
_, err := db.Exec(fmt.Sprintf("DROP TABLE %s", tableName))
assert.Nil(t, err)
}
func deleteItemThatDoesNotExist(t *testing.T, pgs *PostgreSQL) {
// Delete the item with a key not in the store
deleteReq := &state.DeleteRequest{
@ -477,7 +445,7 @@ func testInitConfiguration(t *testing.T) {
tests := []struct {
name string
props map[string]string
expectedErr string
expectedErr error
}{
{
name: "Empty",
@ -486,8 +454,8 @@ func testInitConfiguration(t *testing.T) {
},
{
name: "Valid connection string",
props: map[string]string{connectionStringKey: getConnectionString()},
expectedErr: "",
props: map[string]string{"connectionString": getConnectionString()},
expectedErr: nil,
},
}
@ -501,11 +469,11 @@ func testInitConfiguration(t *testing.T) {
}
err := p.Init(metadata)
if tt.expectedErr == "" {
assert.Nil(t, err)
if tt.expectedErr == nil {
assert.NoError(t, err)
} else {
assert.NotNil(t, err)
assert.Equal(t, err.Error(), tt.expectedErr)
assert.Error(t, err)
assert.ErrorIs(t, err, tt.expectedErr)
}
})
}

View File

@ -107,7 +107,7 @@ func createPostgreSQL(t *testing.T) *PostgreSQL {
assert.NotNil(t, pgs)
metadata := &state.Metadata{
Base: metadata.Base{Properties: map[string]string{connectionStringKey: fakeConnectionString}},
Base: metadata.Base{Properties: map[string]string{"connectionString": fakeConnectionString}},
}
err := pgs.Init(*metadata)

40
state/utils/ttl.go Normal file
View File

@ -0,0 +1,40 @@
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
import (
"fmt"
"math"
"strconv"
)
// Key used for "ttlInSeconds" in metadata.
const MetadataTTLKey = "ttlInSeconds"
// ParseTTL parses the "ttlInSeconds" metadata property.
func ParseTTL(requestMetadata map[string]string) (*int, error) {
val, found := requestMetadata[MetadataTTLKey]
if found && val != "" {
parsedVal, err := strconv.ParseInt(val, 10, 0)
if err != nil {
return nil, fmt.Errorf("incorrect value for metadata '%s': %w", MetadataTTLKey, err)
}
if parsedVal < -1 || parsedVal > math.MaxInt32 {
return nil, fmt.Errorf("incorrect value for metadata '%s': must be -1 or greater", MetadataTTLKey)
}
i := int(parsedVal)
return &i, nil
}
return nil, nil
}

74
state/utils/ttl_test.go Normal file
View File

@ -0,0 +1,74 @@
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
import (
"math"
"strconv"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestParseTTL(t *testing.T) {
t.Run("TTL Not an integer", func(t *testing.T) {
ttlInSeconds := "not an integer"
ttl, err := ParseTTL(map[string]string{
MetadataTTLKey: ttlInSeconds,
})
require.Error(t, err)
assert.Nil(t, ttl)
})
t.Run("TTL specified with wrong key", func(t *testing.T) {
ttlInSeconds := 12345
ttl, err := ParseTTL(map[string]string{
"expirationTime": strconv.Itoa(ttlInSeconds),
})
require.NoError(t, err)
assert.Nil(t, ttl)
})
t.Run("TTL is a number", func(t *testing.T) {
ttlInSeconds := 12345
ttl, err := ParseTTL(map[string]string{
MetadataTTLKey: strconv.Itoa(ttlInSeconds),
})
require.NoError(t, err)
assert.Equal(t, *ttl, ttlInSeconds)
})
t.Run("TTL not set", func(t *testing.T) {
ttl, err := ParseTTL(map[string]string{})
require.NoError(t, err)
assert.Nil(t, ttl)
})
t.Run("TTL < -1", func(t *testing.T) {
ttl, err := ParseTTL(map[string]string{
MetadataTTLKey: "-3",
})
require.Error(t, err)
assert.Nil(t, ttl)
})
t.Run("TTL bigger than 32-bit", func(t *testing.T) {
ttl, err := ParseTTL(map[string]string{
MetadataTTLKey: strconv.FormatInt(math.MaxInt32+1, 10),
})
require.Error(t, err)
assert.Nil(t, ttl)
})
}

View File

@ -1,98 +0,0 @@
apiVersion: dapr.io/v1alpha1
kind: Component
metadata:
name: messagebus
spec:
type: bindings.kafka
version: v1
metadata:
- name: topics # Input binding topic
value: neworder
- name: publishTopic # Outpub binding topic
value: neworder
- name: consumeRetryEnabled # enable consumer retry
value: true
- name: brokers
value: localhost:19094,localhost:29094,localhost:39094
- name: consumerGroup
value: kafkaCertification2
- name: initialOffset
value: oldest
- name: backOffDuration
value: 50ms
- name: authType
value: mtls
- name: caCert
value: |
-----BEGIN CERTIFICATE-----
MIIDJjCCAg6gAwIBAgIUJPqvjfNx6kMf7mE5FtW81+X8HekwDQYJKoZIhvcNAQEL
BQAwKzESMBAGA1UECxMJRGFwciBUZXN0MRUwEwYDVQQDEwxEYXByIFRlc3QgQ0Ew
HhcNMjExMjA0MTYyNzAwWhcNMjYxMjAzMTYyNzAwWjArMRIwEAYDVQQLEwlEYXBy
IFRlc3QxFTATBgNVBAMTDERhcHIgVGVzdCBDQTCCASIwDQYJKoZIhvcNAQEBBQAD
ggEPADCCAQoCggEBAMPLpsfCUdYf+7RAY7mktcj4/qJJyNroHxS8ChwSeJ0M/dLk
I6G4kyty3TGvzmrdxkr2DW2B+ZmrZFzSVQg+kNESMhEWLJt4MtyGMNuDZcwV5kJL
NPltLYmov2z8hyD2v6agZNyiWM0k2p/dl+Ikp4DJmd08PSd+nhc5Wj9X33gsEAoK
jKptl+XGGvSlC3tIbHmBhRsP42QlLjqk5PWxINbMDePHOiYFmau3VRrbPweKTFuF
bY0Y0w8t1qOFX55hU7LkMEXjLmuUfFUEZvn3NUTvH80gKDioiJTC7NBRE6sCYAlm
b4Vvix3p9Y/yNKbMA5J3chaZdTZfVqAXplZY3jMCAwEAAaNCMEAwDgYDVR0PAQH/
BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFH6X+clU0D49SZ9ezWRg
lsF83glvMA0GCSqGSIb3DQEBCwUAA4IBAQAjeaFNxIhWZkDYiwsVP5R2JqFifZbq
A/m9YJypRwA+rUeBLFGuIh4QPFf2fZlskJYmFaDB3aplQGoSIzB1HCC0OAhJM5Ec
z6gm+bhqDfCaWz1HfmpvvQes1l/mUzYx5GfiX202W87CMKMQ+5WSg1IsCPFwYN2w
nZkGKYkh9D9TzIFMfi2b1G+O+BuUUyOAXvT8zcJ17GexRHHdc1Gq+1PgDPDL1Sug
rLHmo+dDTZhIV5D14wvxsNHTTr5tt0aaQw3fJqo6P2HE2dBiqadSYnlwS7BQ9Jxc
MlmFggFubM9/QGQ/hGQYmTp+LSlM5ndaVA80o7+SOQZ2aliuH0fQN3ST
-----END CERTIFICATE-----
- name: clientCert
value: |
-----BEGIN CERTIFICATE-----
MIIDpTCCAo2gAwIBAgIUTAjabskCLxIqbh2E4MnYIsivipswDQYJKoZIhvcNAQEL
BQAwKzESMBAGA1UECxMJRGFwciBUZXN0MRUwEwYDVQQDEwxEYXByIFRlc3QgQ0Ew
HhcNMjExMjA0MTkwMjAwWhcNMjIxMjA0MTkwMjAwWjAjMRIwEAYDVQQKEwlEYXBy
IFRlc3QxDTALBgNVBAMTBGRhcHIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
AoIBAQC5rlhpdzY2RuRRRKevotZnLUx/dh2wLvCMluSxKFJYvC7DXK3cHZh1+6Wo
cdlsEYY3ZQ7Pt/N8DkV7ODqSvFyhJu+1fCY3elMfZcxSw24UJ2aXzlx5RbNhLAI0
E804ugAp3qss4ygCwQ4U2jMGXqeVpi7gyGsYybEUOMSorx5OBgiJAKkaATNMBqdp
MX2FKzBU3owpAcuXhIGSdKblYQuZJmAfITnaJFO4ffLyn9m4I9n/dDfZag/TCZBL
27uIo79mZO99YfhMfdrifH3FkvE/14/JUPhwHAChoCbDol0/V/KDv0tp3vQbQH+7
1dyrAWhszswSXQGgADYm8y74dlQpAgMBAAGjgcgwgcUwDgYDVR0PAQH/BAQDAgWg
MB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAMBgNVHRMBAf8EAjAAMB0G
A1UdDgQWBBQ4eToXZz4AH4YbuW23vy99T8d8OTAfBgNVHSMEGDAWgBR+l/nJVNA+
PUmfXs1kYJbBfN4JbzBGBgNVHREEPzA9ggRkYXBygglsb2NhbGhvc3SCB2thZmth
LTGCB2thZmthLTKCB2thZmthLTOCD2thZmFrLWJvb3RzdHJhcDANBgkqhkiG9w0B
AQsFAAOCAQEAAapIJIdQhGF2qz/N4i/nIwJHGxUapgtVrydC8kw7DeuQi2usG62Y
hGNnBAoJCR0auSQ2P3SWEO19o1doZjFroqFkNIXdTT+aHxLg0k89H203oeMSI43x
xTlmJCjBNw4zQD9jC1c6u/W6WBwN2SJGBZrdmA95KQrz+gan9nh6ecPYeGF89io2
G20dRE2cGwbt7LAImK87M8LXbw/Of28gYMh3L14CNy6oma3izMix9xhUhDVACnVy
TaltjNIiAlFP2g4GIsPSYTMAOeIzIU/LxKlxg8mLg1bTPwb5IZK1wFwPBY5rnNqx
OrycW7rZKfrg2eZml8FnYlzO64u41oC47A==
-----END CERTIFICATE-----
- name: clientKey
value: |
-----BEGIN RSA PRIVATE KEY-----
MIIEogIBAAKCAQEAua5YaXc2NkbkUUSnr6LWZy1Mf3YdsC7wjJbksShSWLwuw1yt
3B2YdfulqHHZbBGGN2UOz7fzfA5Fezg6krxcoSbvtXwmN3pTH2XMUsNuFCdml85c
eUWzYSwCNBPNOLoAKd6rLOMoAsEOFNozBl6nlaYu4MhrGMmxFDjEqK8eTgYIiQCp
GgEzTAanaTF9hSswVN6MKQHLl4SBknSm5WELmSZgHyE52iRTuH3y8p/ZuCPZ/3Q3
2WoP0wmQS9u7iKO/ZmTvfWH4TH3a4nx9xZLxP9ePyVD4cBwAoaAmw6JdP1fyg79L
ad70G0B/u9XcqwFobM7MEl0BoAA2JvMu+HZUKQIDAQABAoIBACZz2JNewLdUzwuV
cDSLQGN1mhX7XAKUdfRne0zE0OjXb8e9dbPT3TLxvki36xLaPjVSlFKoAaB7RCBU
cKzanUQyUAoBf9iVWIl0B3BMUIuT7Uca0UO8D33cI0itoR5SRp5lIoXVNP/9AvGG
jnKPP51aIPMkDim/+w/5AaD9QwVdGC2BWNn8bFykz/DfIB0tiVTec8/pWaP7vHGM
FriQbL07Yrj3BE0ndp5cL52ZbH9OmQ/hXUHCk6vCuV/yrqljeLPGbEYkpmhm/fMO
Fa3pX6wR+QgZ5lta870jK52bexyoGWgsMcTTl8+7q4DYM2YREEKicAlbOh92bdm4
tnjIiVECgYEA1btWqCtxWat5tzXeYAowYs/uia/ANbmg+SGqIeVqGn4EyLIBYnmZ
jexfWliLj7Nk802fbNIO9sStMt6q7vvRbYR2ZHFPU0Th9m/XVPdJKJ9qpMkSWdY3
P7VlQuYHSZvU1ny/QtDc8dGoaxluiaJsIBde0UUcwOo/tA66OnP2n7cCgYEA3mbf
hz6W+ThofDPyJN5kFTnx4g+uNA8hnqyJeh9xcnh1t/A5BH4faZBPhokoskahUWis
yI4v6e552CHkF9jo6k397xUb/W/HO0BlKhapf8prdrG4zSE5pr140eTdr10h95SD
Wr4twfEaBNsSXRnaMxAMaVbPKfLuW0+N1Qbk6x8CgYA8EZnKS+Ngk0vzDOXB0jtF
GjFtawK3VsOCIU8ClcqbRX2stjKjbY+VjrBB4Q7gRUgDBXbgC61+90nCOUiLQCTd
BdSMaDgmK/7h1w8K5zEdhKhhRc2tiAIhGqcqBSJZMr2/xnGuoqrmH8mYyB4D+q0u
28KfSDBLm8ppnZYDZaITwwKBgDv76xYDH50gRa4aJJklEkFXW5HpQMbxvdOaHYo+
qM6DBt0RgY9gpQBH1+slW0CaJDBc1x1QnEOv+lT87xQvgMKRPogZXW9Bkq68c4yi
iBzbb5iX3owVBgOe3tNdsxz1NZAdEkCLQrQoXygoHg/WRS+4iGBw9XcO+pLOJibq
sRtpAoGARUL0cfedOtIgGOQTNzfHqQZsRbLEKx64FI6Q8g1womr7lWWXy6RX4BZv
vm41g/PkdiES9ZfaNihRHcEhaNuA26OhiCbXe/FRcyZRX9TeCkuyQgNn9nssPIgR
edWdnN8kZKQ7ReZwMlw2UpXenAwlVoQQbHw9zpkcD2Exmp/TLAk=
-----END RSA PRIVATE KEY-----

View File

@ -1,53 +0,0 @@
apiVersion: dapr.io/v1alpha1
kind: Component
metadata:
name: messagebus
spec:
type: bindings.kafka
version: v1
metadata:
- name: topics # Input binding topic
value: neworder
- name: publishTopic # Outpub binding topic
value: neworder
- name: consumeRetryEnabled # enable consumer retry
value: true
- name: brokers
value: localhost:19093,localhost:29093,localhost:39093
- name: consumerGroup
value: kafkaCertification2
- name: authType
value: "oidc"
- name: initialOffset
value: oldest
- name: backOffDuration
value: 50ms
- name: oidcTokenEndpoint
value: https://localhost:4443/oauth2/token
- name: oidcClientID
value: "dapr"
- name: oidcClientSecret
value: "dapr-test"
- name: oidcScopes
value: openid,kafka
- name: caCert
value: |
-----BEGIN CERTIFICATE-----
MIIDJjCCAg6gAwIBAgIUJPqvjfNx6kMf7mE5FtW81+X8HekwDQYJKoZIhvcNAQEL
BQAwKzESMBAGA1UECxMJRGFwciBUZXN0MRUwEwYDVQQDEwxEYXByIFRlc3QgQ0Ew
HhcNMjExMjA0MTYyNzAwWhcNMjYxMjAzMTYyNzAwWjArMRIwEAYDVQQLEwlEYXBy
IFRlc3QxFTATBgNVBAMTDERhcHIgVGVzdCBDQTCCASIwDQYJKoZIhvcNAQEBBQAD
ggEPADCCAQoCggEBAMPLpsfCUdYf+7RAY7mktcj4/qJJyNroHxS8ChwSeJ0M/dLk
I6G4kyty3TGvzmrdxkr2DW2B+ZmrZFzSVQg+kNESMhEWLJt4MtyGMNuDZcwV5kJL
NPltLYmov2z8hyD2v6agZNyiWM0k2p/dl+Ikp4DJmd08PSd+nhc5Wj9X33gsEAoK
jKptl+XGGvSlC3tIbHmBhRsP42QlLjqk5PWxINbMDePHOiYFmau3VRrbPweKTFuF
bY0Y0w8t1qOFX55hU7LkMEXjLmuUfFUEZvn3NUTvH80gKDioiJTC7NBRE6sCYAlm
b4Vvix3p9Y/yNKbMA5J3chaZdTZfVqAXplZY3jMCAwEAAaNCMEAwDgYDVR0PAQH/
BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFH6X+clU0D49SZ9ezWRg
lsF83glvMA0GCSqGSIb3DQEBCwUAA4IBAQAjeaFNxIhWZkDYiwsVP5R2JqFifZbq
A/m9YJypRwA+rUeBLFGuIh4QPFf2fZlskJYmFaDB3aplQGoSIzB1HCC0OAhJM5Ec
z6gm+bhqDfCaWz1HfmpvvQes1l/mUzYx5GfiX202W87CMKMQ+5WSg1IsCPFwYN2w
nZkGKYkh9D9TzIFMfi2b1G+O+BuUUyOAXvT8zcJ17GexRHHdc1Gq+1PgDPDL1Sug
rLHmo+dDTZhIV5D14wvxsNHTTr5tt0aaQw3fJqo6P2HE2dBiqadSYnlwS7BQ9Jxc
MlmFggFubM9/QGQ/hGQYmTp+LSlM5ndaVA80o7+SOQZ2aliuH0fQN3ST
-----END CERTIFICATE-----

View File

@ -1,24 +1,26 @@
apiVersion: dapr.io/v1alpha1
kind: Component
metadata:
name: messagebus
spec:
type: bindings.kafka
version: v1
metadata:
- name: topics # Input binding topic
value: neworder
- name: publishTopic # Outpub binding topic
value: neworder
- name: brokers
value: localhost:19092,localhost:29092,localhost:39092
- name: consumerGroup
value: kafkaCertification1
- name: authType
value: "none"
- name: initialOffset
value: oldest
- name: backOffDuration
value: 50ms
- name: backOffDuration
value: 50ms
apiVersion: dapr.io/v1alpha1
kind: Component
metadata:
name: messagebus
spec:
type: bindings.kafka
version: v1
metadata:
- name: topics # Input binding topic
value: neworder
- name: publishTopic # Outpub binding topic
value: neworder
- name: consumeRetryEnabled # enable consumer retry
value: true
- name: brokers
value: localhost:19092,localhost:29092,localhost:39092
- name: consumerGroup
value: kafkaCertification1
- name: authType
value: "none"
- name: initialOffset
value: oldest
- name: backOffDuration
value: 50ms
- name: backOffDuration
value: 50ms

View File

@ -1,26 +1,26 @@
apiVersion: dapr.io/v1alpha1
kind: Component
metadata:
name: messagebus
spec:
type: bindings.kafka
version: v1
metadata:
- name: topics # Input binding topic
value: neworder
- name: publishTopic # Outpub binding topic
value: neworder
- name: consumeRetryEnabled # enable consumer retry
value: true
- name: brokers
value: localhost:19092,localhost:29092,localhost:39092
- name: consumerGroup
value: kafkaCertification1
- name: authType
value: "none"
- name: initialOffset
value: oldest
- name: backOffDuration
value: 50ms
- name: backOffDuration
value: 50ms
apiVersion: dapr.io/v1alpha1
kind: Component
metadata:
name: messagebus
spec:
type: bindings.kafka
version: v1
metadata:
- name: topics # Input binding topic
value: neworder
- name: publishTopic # Outpub binding topic
value: neworder
- name: consumeRetryEnabled # enable consumer retry
value: true
- name: brokers
value: localhost:19092,localhost:29092,localhost:39092
- name: consumerGroup
value: kafkaCertification2
- name: authType
value: "none"
- name: initialOffset
value: oldest
- name: backOffDuration
value: 50ms
- name: backOffDuration
value: 50ms

View File

@ -1,96 +0,0 @@
apiVersion: dapr.io/v1alpha1
kind: Component
metadata:
name: messagebus
spec:
type: bindings.kafka
version: v1
metadata:
- name: topics # Input binding topic
value: neworder
- name: publishTopic # Outpub binding topic
value: neworder
- name: brokers
value: localhost:19094,localhost:29094,localhost:39094
- name: consumerGroup
value: kafkaCertification2
- name: initialOffset
value: oldest
- name: backOffDuration
value: 50ms
- name: authType
value: mtls
- name: caCert
value: |
-----BEGIN CERTIFICATE-----
MIIDJjCCAg6gAwIBAgIUJPqvjfNx6kMf7mE5FtW81+X8HekwDQYJKoZIhvcNAQEL
BQAwKzESMBAGA1UECxMJRGFwciBUZXN0MRUwEwYDVQQDEwxEYXByIFRlc3QgQ0Ew
HhcNMjExMjA0MTYyNzAwWhcNMjYxMjAzMTYyNzAwWjArMRIwEAYDVQQLEwlEYXBy
IFRlc3QxFTATBgNVBAMTDERhcHIgVGVzdCBDQTCCASIwDQYJKoZIhvcNAQEBBQAD
ggEPADCCAQoCggEBAMPLpsfCUdYf+7RAY7mktcj4/qJJyNroHxS8ChwSeJ0M/dLk
I6G4kyty3TGvzmrdxkr2DW2B+ZmrZFzSVQg+kNESMhEWLJt4MtyGMNuDZcwV5kJL
NPltLYmov2z8hyD2v6agZNyiWM0k2p/dl+Ikp4DJmd08PSd+nhc5Wj9X33gsEAoK
jKptl+XGGvSlC3tIbHmBhRsP42QlLjqk5PWxINbMDePHOiYFmau3VRrbPweKTFuF
bY0Y0w8t1qOFX55hU7LkMEXjLmuUfFUEZvn3NUTvH80gKDioiJTC7NBRE6sCYAlm
b4Vvix3p9Y/yNKbMA5J3chaZdTZfVqAXplZY3jMCAwEAAaNCMEAwDgYDVR0PAQH/
BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFH6X+clU0D49SZ9ezWRg
lsF83glvMA0GCSqGSIb3DQEBCwUAA4IBAQAjeaFNxIhWZkDYiwsVP5R2JqFifZbq
A/m9YJypRwA+rUeBLFGuIh4QPFf2fZlskJYmFaDB3aplQGoSIzB1HCC0OAhJM5Ec
z6gm+bhqDfCaWz1HfmpvvQes1l/mUzYx5GfiX202W87CMKMQ+5WSg1IsCPFwYN2w
nZkGKYkh9D9TzIFMfi2b1G+O+BuUUyOAXvT8zcJ17GexRHHdc1Gq+1PgDPDL1Sug
rLHmo+dDTZhIV5D14wvxsNHTTr5tt0aaQw3fJqo6P2HE2dBiqadSYnlwS7BQ9Jxc
MlmFggFubM9/QGQ/hGQYmTp+LSlM5ndaVA80o7+SOQZ2aliuH0fQN3ST
-----END CERTIFICATE-----
- name: clientCert
value: |
-----BEGIN CERTIFICATE-----
MIIDpTCCAo2gAwIBAgIUTAjabskCLxIqbh2E4MnYIsivipswDQYJKoZIhvcNAQEL
BQAwKzESMBAGA1UECxMJRGFwciBUZXN0MRUwEwYDVQQDEwxEYXByIFRlc3QgQ0Ew
HhcNMjExMjA0MTkwMjAwWhcNMjIxMjA0MTkwMjAwWjAjMRIwEAYDVQQKEwlEYXBy
IFRlc3QxDTALBgNVBAMTBGRhcHIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
AoIBAQC5rlhpdzY2RuRRRKevotZnLUx/dh2wLvCMluSxKFJYvC7DXK3cHZh1+6Wo
cdlsEYY3ZQ7Pt/N8DkV7ODqSvFyhJu+1fCY3elMfZcxSw24UJ2aXzlx5RbNhLAI0
E804ugAp3qss4ygCwQ4U2jMGXqeVpi7gyGsYybEUOMSorx5OBgiJAKkaATNMBqdp
MX2FKzBU3owpAcuXhIGSdKblYQuZJmAfITnaJFO4ffLyn9m4I9n/dDfZag/TCZBL
27uIo79mZO99YfhMfdrifH3FkvE/14/JUPhwHAChoCbDol0/V/KDv0tp3vQbQH+7
1dyrAWhszswSXQGgADYm8y74dlQpAgMBAAGjgcgwgcUwDgYDVR0PAQH/BAQDAgWg
MB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAMBgNVHRMBAf8EAjAAMB0G
A1UdDgQWBBQ4eToXZz4AH4YbuW23vy99T8d8OTAfBgNVHSMEGDAWgBR+l/nJVNA+
PUmfXs1kYJbBfN4JbzBGBgNVHREEPzA9ggRkYXBygglsb2NhbGhvc3SCB2thZmth
LTGCB2thZmthLTKCB2thZmthLTOCD2thZmFrLWJvb3RzdHJhcDANBgkqhkiG9w0B
AQsFAAOCAQEAAapIJIdQhGF2qz/N4i/nIwJHGxUapgtVrydC8kw7DeuQi2usG62Y
hGNnBAoJCR0auSQ2P3SWEO19o1doZjFroqFkNIXdTT+aHxLg0k89H203oeMSI43x
xTlmJCjBNw4zQD9jC1c6u/W6WBwN2SJGBZrdmA95KQrz+gan9nh6ecPYeGF89io2
G20dRE2cGwbt7LAImK87M8LXbw/Of28gYMh3L14CNy6oma3izMix9xhUhDVACnVy
TaltjNIiAlFP2g4GIsPSYTMAOeIzIU/LxKlxg8mLg1bTPwb5IZK1wFwPBY5rnNqx
OrycW7rZKfrg2eZml8FnYlzO64u41oC47A==
-----END CERTIFICATE-----
- name: clientKey
value: |
-----BEGIN RSA PRIVATE KEY-----
MIIEogIBAAKCAQEAua5YaXc2NkbkUUSnr6LWZy1Mf3YdsC7wjJbksShSWLwuw1yt
3B2YdfulqHHZbBGGN2UOz7fzfA5Fezg6krxcoSbvtXwmN3pTH2XMUsNuFCdml85c
eUWzYSwCNBPNOLoAKd6rLOMoAsEOFNozBl6nlaYu4MhrGMmxFDjEqK8eTgYIiQCp
GgEzTAanaTF9hSswVN6MKQHLl4SBknSm5WELmSZgHyE52iRTuH3y8p/ZuCPZ/3Q3
2WoP0wmQS9u7iKO/ZmTvfWH4TH3a4nx9xZLxP9ePyVD4cBwAoaAmw6JdP1fyg79L
ad70G0B/u9XcqwFobM7MEl0BoAA2JvMu+HZUKQIDAQABAoIBACZz2JNewLdUzwuV
cDSLQGN1mhX7XAKUdfRne0zE0OjXb8e9dbPT3TLxvki36xLaPjVSlFKoAaB7RCBU
cKzanUQyUAoBf9iVWIl0B3BMUIuT7Uca0UO8D33cI0itoR5SRp5lIoXVNP/9AvGG
jnKPP51aIPMkDim/+w/5AaD9QwVdGC2BWNn8bFykz/DfIB0tiVTec8/pWaP7vHGM
FriQbL07Yrj3BE0ndp5cL52ZbH9OmQ/hXUHCk6vCuV/yrqljeLPGbEYkpmhm/fMO
Fa3pX6wR+QgZ5lta870jK52bexyoGWgsMcTTl8+7q4DYM2YREEKicAlbOh92bdm4
tnjIiVECgYEA1btWqCtxWat5tzXeYAowYs/uia/ANbmg+SGqIeVqGn4EyLIBYnmZ
jexfWliLj7Nk802fbNIO9sStMt6q7vvRbYR2ZHFPU0Th9m/XVPdJKJ9qpMkSWdY3
P7VlQuYHSZvU1ny/QtDc8dGoaxluiaJsIBde0UUcwOo/tA66OnP2n7cCgYEA3mbf
hz6W+ThofDPyJN5kFTnx4g+uNA8hnqyJeh9xcnh1t/A5BH4faZBPhokoskahUWis
yI4v6e552CHkF9jo6k397xUb/W/HO0BlKhapf8prdrG4zSE5pr140eTdr10h95SD
Wr4twfEaBNsSXRnaMxAMaVbPKfLuW0+N1Qbk6x8CgYA8EZnKS+Ngk0vzDOXB0jtF
GjFtawK3VsOCIU8ClcqbRX2stjKjbY+VjrBB4Q7gRUgDBXbgC61+90nCOUiLQCTd
BdSMaDgmK/7h1w8K5zEdhKhhRc2tiAIhGqcqBSJZMr2/xnGuoqrmH8mYyB4D+q0u
28KfSDBLm8ppnZYDZaITwwKBgDv76xYDH50gRa4aJJklEkFXW5HpQMbxvdOaHYo+
qM6DBt0RgY9gpQBH1+slW0CaJDBc1x1QnEOv+lT87xQvgMKRPogZXW9Bkq68c4yi
iBzbb5iX3owVBgOe3tNdsxz1NZAdEkCLQrQoXygoHg/WRS+4iGBw9XcO+pLOJibq
sRtpAoGARUL0cfedOtIgGOQTNzfHqQZsRbLEKx64FI6Q8g1womr7lWWXy6RX4BZv
vm41g/PkdiES9ZfaNihRHcEhaNuA26OhiCbXe/FRcyZRX9TeCkuyQgNn9nssPIgR
edWdnN8kZKQ7ReZwMlw2UpXenAwlVoQQbHw9zpkcD2Exmp/TLAk=
-----END RSA PRIVATE KEY-----

View File

@ -1,51 +0,0 @@
apiVersion: dapr.io/v1alpha1
kind: Component
metadata:
name: messagebus
spec:
type: bindings.kafka
version: v1
metadata:
- name: topics # Input binding topic
value: neworder
- name: publishTopic # Outpub binding topic
value: neworder
- name: brokers
value: localhost:19093,localhost:29093,localhost:39093
- name: consumerGroup
value: kafkaCertification2
- name: authType
value: "oidc"
- name: initialOffset
value: oldest
- name: backOffDuration
value: 50ms
- name: oidcTokenEndpoint
value: https://localhost:4443/oauth2/token
- name: oidcClientID
value: "dapr"
- name: oidcClientSecret
value: "dapr-test"
- name: oidcScopes
value: openid,kafka
- name: caCert
value: |
-----BEGIN CERTIFICATE-----
MIIDJjCCAg6gAwIBAgIUJPqvjfNx6kMf7mE5FtW81+X8HekwDQYJKoZIhvcNAQEL
BQAwKzESMBAGA1UECxMJRGFwciBUZXN0MRUwEwYDVQQDEwxEYXByIFRlc3QgQ0Ew
HhcNMjExMjA0MTYyNzAwWhcNMjYxMjAzMTYyNzAwWjArMRIwEAYDVQQLEwlEYXBy
IFRlc3QxFTATBgNVBAMTDERhcHIgVGVzdCBDQTCCASIwDQYJKoZIhvcNAQEBBQAD
ggEPADCCAQoCggEBAMPLpsfCUdYf+7RAY7mktcj4/qJJyNroHxS8ChwSeJ0M/dLk
I6G4kyty3TGvzmrdxkr2DW2B+ZmrZFzSVQg+kNESMhEWLJt4MtyGMNuDZcwV5kJL
NPltLYmov2z8hyD2v6agZNyiWM0k2p/dl+Ikp4DJmd08PSd+nhc5Wj9X33gsEAoK
jKptl+XGGvSlC3tIbHmBhRsP42QlLjqk5PWxINbMDePHOiYFmau3VRrbPweKTFuF
bY0Y0w8t1qOFX55hU7LkMEXjLmuUfFUEZvn3NUTvH80gKDioiJTC7NBRE6sCYAlm
b4Vvix3p9Y/yNKbMA5J3chaZdTZfVqAXplZY3jMCAwEAAaNCMEAwDgYDVR0PAQH/
BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFH6X+clU0D49SZ9ezWRg
lsF83glvMA0GCSqGSIb3DQEBCwUAA4IBAQAjeaFNxIhWZkDYiwsVP5R2JqFifZbq
A/m9YJypRwA+rUeBLFGuIh4QPFf2fZlskJYmFaDB3aplQGoSIzB1HCC0OAhJM5Ec
z6gm+bhqDfCaWz1HfmpvvQes1l/mUzYx5GfiX202W87CMKMQ+5WSg1IsCPFwYN2w
nZkGKYkh9D9TzIFMfi2b1G+O+BuUUyOAXvT8zcJ17GexRHHdc1Gq+1PgDPDL1Sug
rLHmo+dDTZhIV5D14wvxsNHTTr5tt0aaQw3fJqo6P2HE2dBiqadSYnlwS7BQ9Jxc
MlmFggFubM9/QGQ/hGQYmTp+LSlM5ndaVA80o7+SOQZ2aliuH0fQN3ST
-----END CERTIFICATE-----

View File

@ -1,191 +1,68 @@
version: "3.7"
services:
zookeeper:
image: confluentinc/cp-zookeeper:5.4.0
hostname: zookeeper
container_name: zookeeper
ports:
- "2181:2181"
environment:
ZOOKEEPER_CLIENT_PORT: 2181
ZOOKEEPER_TICK_TIME: 2000
kafka1:
image: quay.io/strimzi/kafka:0.26.0-kafka-3.0.0
hostname: kafka-1
container_name: kafka-1
read_only: false
entrypoint:
/bin/bash -c "mkdir -p /var/opt/kafka && chown -R kafka:0 /var/lib/kafka/data /var/opt/kafka && su kafka -p -c '/opt/kafka/kafka_run.sh'"
user: root
depends_on:
- zookeeper
ports:
- "19094:19094"
- "19093:19093"
- "19092:19092"
volumes:
- type: bind
source: ./strimzi-ca-certs
target: /opt/kafka/cluster-ca-certs
read_only: true
- type: bind
source: ./strimzi-broker-certs
target: /opt/kafka/broker-certs
read_only: true
- type: bind
source: ./strimzi-client-ca
target: /opt/kafka/client-ca-certs
read_only: true
- type: bind
source: ./strimzi-listener-certs
target: /opt/kafka/certificates/custom-mtls-9094-certs
read_only: true
- type: bind
source: ./strimzi-listener-certs
target: /opt/kafka/certificates/custom-oauth-9093-certs
read_only: true
- type: bind
source: ./strimzi-listener-certs
target: /opt/kafka/certificates/oauth-oauth-9093-certs
read_only: true
- type: bind
source: ./strimzi-kafka1-config
target: /opt/kafka/custom-config
read_only: true
- type: volume
source: kafka1-data
target: /var/lib/kafka/data
environment:
KAFKA_METRICS_ENABLED: "false"
STRIMZI_KAFKA_GC_LOG_ENABLED: "false"
KAFKA_HEAP_OPTS: "-Xms128M"
kafka2:
image: quay.io/strimzi/kafka:0.26.0-kafka-3.0.0
hostname: kafka-2
container_name: kafka-2
read_only: false
entrypoint:
/bin/bash -c "mkdir -p /var/opt/kafka && chown -R kafka:0 /var/lib/kafka/data /var/opt/kafka && su kafka -p -c '/opt/kafka/kafka_run.sh'"
user: root
depends_on:
- zookeeper
ports:
- "29094:29094"
- "29093:29093"
- "29092:29092"
volumes:
- type: bind
source: ./strimzi-ca-certs
target: /opt/kafka/cluster-ca-certs
read_only: true
- type: bind
source: ./strimzi-broker-certs
target: /opt/kafka/broker-certs
read_only: true
- type: bind
source: ./strimzi-client-ca
target: /opt/kafka/client-ca-certs
read_only: true
- type: bind
source: ./strimzi-listener-certs
target: /opt/kafka/certificates/custom-mtls-9094-certs
read_only: true
- type: bind
source: ./strimzi-listener-certs
target: /opt/kafka/certificates/custom-oauth-9093-certs
read_only: true
- type: bind
source: ./strimzi-listener-certs
target: /opt/kafka/certificates/oauth-oauth-9093-certs
read_only: true
- type: bind
source: ./strimzi-kafka2-config
target: /opt/kafka/custom-config
read_only: true
- type: volume
source: kafka2-data
target: /var/lib/kafka/data
environment:
KAFKA_METRICS_ENABLED: "false"
STRIMZI_KAFKA_GC_LOG_ENABLED: "false"
KAFKA_HEAP_OPTS: "-Xms128M"
kafka3:
image: quay.io/strimzi/kafka:0.26.0-kafka-3.0.0
hostname: kafka-3
container_name: kafka-3
read_only: false
entrypoint:
/bin/bash -c "mkdir -p /var/opt/kafka && chown -R kafka:0 /var/lib/kafka/data /var/opt/kafka && su kafka -p -c '/opt/kafka/kafka_run.sh'"
user: root
depends_on:
- zookeeper
ports:
- "39094:39094"
- "39093:39093"
- "39092:39092"
volumes:
- type: bind
source: ./strimzi-ca-certs
target: /opt/kafka/cluster-ca-certs
read_only: true
- type: bind
source: ./strimzi-broker-certs
target: /opt/kafka/broker-certs
read_only: true
- type: bind
source: ./strimzi-client-ca
target: /opt/kafka/client-ca-certs
read_only: true
- type: bind
source: ./strimzi-listener-certs
target: /opt/kafka/certificates/custom-mtls-9094-certs
read_only: true
- type: bind
source: ./strimzi-listener-certs
target: /opt/kafka/certificates/custom-oauth-9093-certs
read_only: true
- type: bind
source: ./strimzi-listener-certs
target: /opt/kafka/certificates/oauth-oauth-9093-certs
read_only: true
- type: bind
source: ./strimzi-kafka3-config
target: /opt/kafka/custom-config
read_only: true
- type: volume
source: kafka3-data
target: /var/lib/kafka/data
environment:
KAFKA_METRICS_ENABLED: "false"
STRIMZI_KAFKA_GC_LOG_ENABLED: "false"
KAFKA_HEAP_OPTS: "-Xms128M"
hydra:
image: oryd/hydra:v1.10.6-sqlite
hostname: hydra
container_name: hydra
ports:
- "4443:4443"
- "4444:4444"
read_only: false
entrypoint: hydra serve all -c /config/config.yaml --sqa-opt-out
volumes:
- type: bind
source: ./oauth-config
target: /config
read_only: true
hydra-config:
image: oryd/hydra:v1.10.6-sqlite
hostname: hydra-config
container_name: hydra-config
depends_on:
- hydra
entrypoint: |
/bin/sh -c "sleep 20;hydra clients create --skip-tls-verify -g client_credentials --id dapr -n dapr -r token -a openid,kafka --secret dapr-test; hydra clients create --skip-tls-verify -g client_credentials --id kafka -n kafka -r token -a openid --secret dapr-test"
environment:
HYDRA_ADMIN_URL: https://hydra:4444
volumes:
kafka1-data: {}
kafka2-data: {}
kafka3-data: {}
version: "3.7"
services:
zookeeper:
image: confluentinc/cp-zookeeper:7.3.0
hostname: zookeeper
container_name: zookeeper
ports:
- "2181:2181"
environment:
ZOOKEEPER_CLIENT_PORT: 2181
ZOOKEEPER_TICK_TIME: 2000
kafka1:
image: confluentinc/cp-server:7.3.0
hostname: kafka1
container_name: kafka1
depends_on:
- zookeeper
ports:
- "19092:19092"
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181'
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka1:9092,PLAINTEXT_HOST://localhost:19092
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 3
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
KAFKA_CONFLUENT_LICENSE_TOPIC_REPLICATION_FACTOR: 1
KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'true'
KAFKA_NUM_PARTITIONS: 10
kafka2:
image: confluentinc/cp-server:7.3.0
hostname: kafka2
container_name: kafka2
depends_on:
- zookeeper
ports:
- "29092:29092"
environment:
KAFKA_BROKER_ID: 2
KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181'
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka2:9092,PLAINTEXT_HOST://localhost:29092
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 3
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
KAFKA_CONFLUENT_LICENSE_TOPIC_REPLICATION_FACTOR: 1
KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'true'
KAFKA_NUM_PARTITIONS: 10
kafka3:
image: confluentinc/cp-server:7.3.0
hostname: kafka3
container_name: kafka3
depends_on:
- zookeeper
ports:
- "39092:39092"
environment:
KAFKA_BROKER_ID: 3
KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181'
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka3:9092,PLAINTEXT_HOST://localhost:39092
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 3
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
KAFKA_CONFLUENT_LICENSE_TOPIC_REPLICATION_FACTOR: 1
KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'true'
KAFKA_NUM_PARTITIONS: 10

View File

@ -1,339 +0,0 @@
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kafka_test
import (
"context"
"crypto/tls"
"fmt"
"net/http"
"testing"
"time"
"github.com/Shopify/sarama"
"github.com/cenkalti/backoff/v4"
"github.com/google/uuid"
"github.com/stretchr/testify/require"
"go.uber.org/multierr"
// Pub/Sub.
"github.com/dapr/components-contrib/bindings"
// Dapr runtime and Go-SDK
"github.com/dapr/dapr/pkg/runtime"
dapr "github.com/dapr/go-sdk/client"
"github.com/dapr/go-sdk/service/common"
kit_retry "github.com/dapr/kit/retry"
// Certification testing runnables
"github.com/dapr/components-contrib/tests/certification/embedded"
"github.com/dapr/components-contrib/tests/certification/flow"
"github.com/dapr/components-contrib/tests/certification/flow/app"
"github.com/dapr/components-contrib/tests/certification/flow/dockercompose"
"github.com/dapr/components-contrib/tests/certification/flow/network"
"github.com/dapr/components-contrib/tests/certification/flow/retry"
"github.com/dapr/components-contrib/tests/certification/flow/sidecar"
"github.com/dapr/components-contrib/tests/certification/flow/simulate"
"github.com/dapr/components-contrib/tests/certification/flow/watcher"
)
func TestKafka_with_retry(t *testing.T) {
// For Kafka, we should ensure messages are received in order.
consumerGroup1 := watcher.NewOrdered()
// This watcher is across multiple consumers in the same group
// so exact ordering is not expected.
consumerGroup2 := watcher.NewUnordered()
// Application logic that tracks messages from a topic.
application := func(appName string, watcher *watcher.Watcher) app.SetupFn {
return func(ctx flow.Context, s common.Service) error {
// Simulate periodic errors.
sim := simulate.PeriodicError(ctx, 100)
// Setup the /orders event handler.
return multierr.Combine(
s.AddBindingInvocationHandler(bindingName, func(_ context.Context, in *common.BindingEvent) (out []byte, err error) {
if err := sim(); err != nil {
return nil, err
}
// Track/Observe the data of the event.
watcher.Observe(string(in.Data))
ctx.Logf("======== %s received event: %s\n", appName, string(in.Data))
return in.Data, nil
}),
)
}
}
// Set the partition key on all messages so they
// are written to the same partition.
// This allows for checking of ordered messages.
metadata := map[string]string{
messageKey: "test",
}
// Test logic that sends messages to a topic and
// verifies the application has received them.
sendRecvTest := func(metadata map[string]string, watchers ...*watcher.Watcher) flow.Runnable {
_, hasKey := metadata[messageKey]
return func(ctx flow.Context) error {
client := sidecar.GetClient(ctx, sidecarName1)
// Declare what is expected BEFORE performing any steps
// that will satisfy the test.
msgs := make([]string, numMessages)
for i := range msgs {
msgs[i] = fmt.Sprintf("Hello, Messages %03d", i)
}
for _, m := range watchers {
m.ExpectStrings(msgs...)
}
// If no key it provided, create a random one.
// For Kafka, this will spread messages across
// the topic's partitions.
if !hasKey {
metadata[messageKey] = uuid.NewString()
}
// Send events that the application above will observe.
ctx.Log("Sending messages!")
for _, msg := range msgs {
ctx.Logf("Sending: %q", msg)
err := client.InvokeOutputBinding(ctx, &dapr.InvokeBindingRequest{
Name: bindingName,
Operation: string(bindings.CreateOperation),
Data: []byte(msg),
Metadata: metadata,
})
require.NoError(ctx, err, "error output binding message")
}
// Do the messages we observed match what we expect?
for _, m := range watchers {
m.Assert(ctx, time.Minute)
}
return nil
}
}
// sendMessagesInBackground and assertMessages are
// Runnables for testing publishing and consuming
// messages reliably when infrastructure and network
// interruptions occur.
var task flow.AsyncTask
sendMessagesInBackground := func(watchers ...*watcher.Watcher) flow.Runnable {
return func(ctx flow.Context) error {
client := sidecar.GetClient(ctx, sidecarName1)
for _, m := range watchers {
m.Reset()
}
t := time.NewTicker(100 * time.Millisecond)
defer t.Stop()
counter := 1
for {
select {
case <-task.Done():
return nil
case <-t.C:
msg := fmt.Sprintf("Background message - %03d", counter)
for _, m := range watchers {
m.Prepare(msg) // Track for observation
}
// Publish with retries.
bo := backoff.WithContext(backoff.NewConstantBackOff(time.Second), task)
if err := kit_retry.NotifyRecover(func() error {
return client.InvokeOutputBinding(ctx, &dapr.InvokeBindingRequest{
Name: bindingName,
Operation: string(bindings.CreateOperation),
Data: []byte(msg),
Metadata: metadata,
})
}, bo, func(err error, t time.Duration) {
ctx.Logf("Error outpub binding message, retrying in %s", t)
}, func() {}); err == nil {
for _, m := range watchers {
m.Add(msg) // Success
}
counter++
} else {
for _, m := range watchers {
m.Remove(msg) // Remove from Tracking
}
}
}
}
}
}
assertMessages := func(messages ...*watcher.Watcher) flow.Runnable {
return func(ctx flow.Context) error {
// Signal sendMessagesInBackground to stop and wait for it to complete.
task.CancelAndWait()
for _, m := range messages {
m.Assert(ctx, 5*time.Minute)
}
return nil
}
}
flow.New(t, "kafka certification with retry").
// Run Kafka using Docker Compose.
Step(dockercompose.Run(clusterName, dockerComposeYAML)).
Step("wait for broker sockets",
network.WaitForAddresses(5*time.Minute, brokers...)).
Step("wait", flow.Sleep(5*time.Second)).
Step("wait for kafka readiness", retry.Do(10*time.Second, 30, func(ctx flow.Context) error {
config := sarama.NewConfig()
config.ClientID = "test-consumer"
config.Consumer.Return.Errors = true
// Create new consumer
client, err := sarama.NewConsumer(brokers, config)
if err != nil {
return err
}
defer client.Close()
// Ensure the brokers are ready by attempting to consume
// a topic partition.
_, err = client.ConsumePartition("myTopic", 0, sarama.OffsetOldest)
return err
})).
Step("wait for Dapr OAuth client", retry.Do(20*time.Second, 6, func(ctx flow.Context) error {
httpClient := &http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true, // test server certificate is not trusted.
},
},
}
resp, err := httpClient.Get(oauthClientQuery)
if err != nil {
return err
}
if resp.StatusCode != 200 {
return fmt.Errorf("oauth client query for 'dapr' not successful")
}
return nil
})).
// Run the application logic above.
Step(app.Run(appID1, fmt.Sprintf(":%d", appPort),
application(appID1, consumerGroup1))).
//
// Run the Dapr sidecar with the Kafka component.
Step(sidecar.Run(sidecarName1,
embedded.WithComponentsPath("./components-retry/consumer1"),
embedded.WithAppProtocol(runtime.HTTPProtocol, appPort),
embedded.WithDaprGRPCPort(runtime.DefaultDaprAPIGRPCPort),
embedded.WithDaprHTTPPort(runtime.DefaultDaprHTTPPort),
componentRuntimeOptions(),
)).
//
// Run the second application.
Step(app.Run(appID2, fmt.Sprintf(":%d", appPort+portOffset),
application(appID2, consumerGroup2))).
//
// Run the Dapr sidecar with the Kafka component.
Step(sidecar.Run(sidecarName2,
embedded.WithComponentsPath("./components-retry/mtls-consumer"),
embedded.WithAppProtocol(runtime.HTTPProtocol, appPort+portOffset),
embedded.WithDaprGRPCPort(runtime.DefaultDaprAPIGRPCPort+portOffset),
embedded.WithDaprHTTPPort(runtime.DefaultDaprHTTPPort+portOffset),
embedded.WithProfilePort(runtime.DefaultProfilePort+portOffset),
componentRuntimeOptions(),
)).
//
// Send messages using the same metadata/message key so we can expect
// in-order processing.
Step("send and wait(in-order)", sendRecvTest(metadata, consumerGroup2)).
// Run the third application.
Step(app.Run(appID3, fmt.Sprintf(":%d", appPort+portOffset*2),
application(appID3, consumerGroup2))).
//
// Run the Dapr sidecar with the Kafka component.
Step(sidecar.Run(sidecarName3,
embedded.WithComponentsPath("./components-retry/oauth-consumer"),
embedded.WithAppProtocol(runtime.HTTPProtocol, appPort+portOffset*2),
embedded.WithDaprGRPCPort(runtime.DefaultDaprAPIGRPCPort+portOffset*2),
embedded.WithDaprHTTPPort(runtime.DefaultDaprHTTPPort+portOffset*2),
embedded.WithProfilePort(runtime.DefaultProfilePort+portOffset*2),
componentRuntimeOptions(),
)).
Step("reset", flow.Reset(consumerGroup2)).
//
// Send messages with random keys to test message consumption
// across more than one consumer group and consumers per group.
Step("send and wait(no-order)", sendRecvTest(map[string]string{}, consumerGroup2)).
// Gradually stop each broker.
// This tests the components ability to handle reconnections
// when brokers are shutdown cleanly.
StepAsync("steady flow of messages to publish", &task,
sendMessagesInBackground(consumerGroup1, consumerGroup2)).
Step("wait", flow.Sleep(5*time.Second)).
Step("stop broker 1", dockercompose.Stop(clusterName, dockerComposeYAML, "kafka1")).
Step("wait", flow.Sleep(5*time.Second)).
//
// Errors will likely start occurring here since quorum is lost.
Step("stop broker 2", dockercompose.Stop(clusterName, dockerComposeYAML, "kafka2")).
Step("wait", flow.Sleep(10*time.Second)).
//
// Errors will definitely occur here.
Step("stop broker 3", dockercompose.Stop(clusterName, dockerComposeYAML, "kafka3")).
Step("wait", flow.Sleep(30*time.Second)).
Step("restart broker 3", dockercompose.Start(clusterName, dockerComposeYAML, "kafka3")).
Step("restart broker 2", dockercompose.Start(clusterName, dockerComposeYAML, "kafka2")).
Step("restart broker 1", dockercompose.Start(clusterName, dockerComposeYAML, "kafka1")).
//
// Component should recover at this point.
Step("wait", flow.Sleep(30*time.Second)).
Step("assert messages(Component reconnect)", assertMessages(consumerGroup1, consumerGroup2)).
//
// Simulate a network interruption.
// This tests the components ability to handle reconnections
// when Dapr is disconnected abnormally.
StepAsync("steady flow of messages to publish", &task,
sendMessagesInBackground(consumerGroup1, consumerGroup2)).
Step("wait", flow.Sleep(5*time.Second)).
//
// Errors will occurring here.
Step("interrupt network",
network.InterruptNetwork(30*time.Second, nil, nil, "19092", "29092", "39092")).
//
// Component should recover at this point.
Step("wait", flow.Sleep(30*time.Second)).
Step("assert messages(network interruption)", assertMessages(consumerGroup1, consumerGroup2)).
// Reset and test that all messages are received during a
// consumer rebalance.
Step("reset", flow.Reset(consumerGroup2)).
StepAsync("steady flow of messages to publish", &task,
sendMessagesInBackground(consumerGroup2)).
Step("wait", flow.Sleep(15*time.Second)).
Step("stop sidecar 2", sidecar.Stop(sidecarName2)).
Step("wait", flow.Sleep(3*time.Second)).
Step("stop app 2", app.Stop(appID2)).
Step("wait", flow.Sleep(30*time.Second)).
Step("assert messages(consumer rebalance)", assertMessages(consumerGroup2)).
Run()
}

View File

@ -1,374 +1,360 @@
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kafka_test
import (
"context"
"crypto/tls"
"fmt"
"net/http"
"testing"
"time"
"github.com/Shopify/sarama"
"github.com/cenkalti/backoff/v4"
"github.com/google/uuid"
"github.com/stretchr/testify/require"
"go.uber.org/multierr"
// Pub/Sub.
"github.com/dapr/components-contrib/bindings"
bindings_kafka "github.com/dapr/components-contrib/bindings/kafka"
bindings_loader "github.com/dapr/dapr/pkg/components/bindings"
// Dapr runtime and Go-SDK
"github.com/dapr/dapr/pkg/runtime"
dapr "github.com/dapr/go-sdk/client"
"github.com/dapr/go-sdk/service/common"
"github.com/dapr/kit/logger"
kit_retry "github.com/dapr/kit/retry"
// Certification testing runnables
"github.com/dapr/components-contrib/tests/certification/embedded"
"github.com/dapr/components-contrib/tests/certification/flow"
"github.com/dapr/components-contrib/tests/certification/flow/app"
"github.com/dapr/components-contrib/tests/certification/flow/dockercompose"
"github.com/dapr/components-contrib/tests/certification/flow/network"
"github.com/dapr/components-contrib/tests/certification/flow/retry"
"github.com/dapr/components-contrib/tests/certification/flow/sidecar"
"github.com/dapr/components-contrib/tests/certification/flow/watcher"
)
const (
sidecarName1 = "dapr-1"
sidecarName2 = "dapr-2"
sidecarName3 = "dapr-3"
appID1 = "app-1"
appID2 = "app-2"
appID3 = "app-3"
clusterName = "kafkacertification"
dockerComposeYAML = "docker-compose.yml"
numMessages = 1000
appPort = 8000
portOffset = 2
messageKey = "partitionKey"
bindingName = "messagebus"
topicName = "neworder"
)
var (
brokers = []string{"localhost:19092", "localhost:29092", "localhost:39092"}
oauthClientQuery = "https://localhost:4444/clients/dapr"
)
func TestKafka(t *testing.T) {
// For Kafka, we should ensure messages are received in order.
consumerGroup1 := watcher.NewOrdered()
// This watcher is across multiple consumers in the same group
// so exact ordering is not expected.
consumerGroup2 := watcher.NewUnordered()
// Application logic that tracks messages from a topic.
application := func(appName string, watcher *watcher.Watcher) app.SetupFn {
return func(ctx flow.Context, s common.Service) error {
// Setup the /orders event handler.
return multierr.Combine(
s.AddBindingInvocationHandler(bindingName, func(_ context.Context, in *common.BindingEvent) (out []byte, err error) {
// Track/Observe the data of the event.
watcher.Observe(string(in.Data))
ctx.Logf("======== %s received event: %s\n", appName, string(in.Data))
return in.Data, nil
}),
)
}
}
// Set the partition key on all messages so they
// are written to the same partition.
// This allows for checking of ordered messages.
metadata := map[string]string{
messageKey: "test",
}
// Test logic that sends messages to a topic and
// verifies the application has received them.
sendRecvTest := func(metadata map[string]string, watchers ...*watcher.Watcher) flow.Runnable {
_, hasKey := metadata[messageKey]
return func(ctx flow.Context) error {
client := sidecar.GetClient(ctx, sidecarName1)
// Declare what is expected BEFORE performing any steps
// that will satisfy the test.
msgs := make([]string, numMessages)
for i := range msgs {
msgs[i] = fmt.Sprintf("Hello, Messages %03d", i)
}
for _, m := range watchers {
m.ExpectStrings(msgs...)
}
// If no key it provided, create a random one.
// For Kafka, this will spread messages across
// the topic's partitions.
if !hasKey {
metadata[messageKey] = uuid.NewString()
}
// Send events that the application above will observe.
ctx.Log("Sending messages!")
for _, msg := range msgs {
err := client.InvokeOutputBinding(ctx, &dapr.InvokeBindingRequest{
Name: bindingName,
Operation: string(bindings.CreateOperation),
Data: []byte(msg),
Metadata: metadata,
})
require.NoError(ctx, err, "error publishing message")
}
// Do the messages we observed match what we expect?
for _, m := range watchers {
m.Assert(ctx, time.Minute)
}
return nil
}
}
// sendMessagesInBackground and assertMessages are
// Runnables for testing publishing and consuming
// messages reliably when infrastructure and network
// interruptions occur.
var task flow.AsyncTask
sendMessagesInBackground := func(watchers ...*watcher.Watcher) flow.Runnable {
return func(ctx flow.Context) error {
client := sidecar.GetClient(ctx, sidecarName1)
for _, m := range watchers {
m.Reset()
}
t := time.NewTicker(100 * time.Millisecond)
defer t.Stop()
counter := 1
for {
select {
case <-task.Done():
return nil
case <-t.C:
msg := fmt.Sprintf("Background message - %03d", counter)
for _, m := range watchers {
m.Prepare(msg) // Track for observation
}
// Publish with retries.
bo := backoff.WithContext(backoff.NewConstantBackOff(time.Second), task)
if err := kit_retry.NotifyRecover(func() error {
return client.InvokeOutputBinding(ctx, &dapr.InvokeBindingRequest{
Name: bindingName,
Operation: string(bindings.CreateOperation),
Data: []byte(msg),
Metadata: metadata,
})
}, bo, func(err error, t time.Duration) {
ctx.Logf("Error outpub binding message, retrying in %s", t)
}, func() {}); err == nil {
for _, m := range watchers {
m.Add(msg) // Success
}
counter++
} else {
for _, m := range watchers {
m.Remove(msg) // Remove from Tracking
}
}
}
}
}
}
assertMessages := func(watchers ...*watcher.Watcher) flow.Runnable {
return func(ctx flow.Context) error {
// Signal sendMessagesInBackground to stop and wait for it to complete.
task.CancelAndWait()
for _, m := range watchers {
m.Assert(ctx, 5*time.Minute)
}
return nil
}
}
flow.New(t, "kafka certification").
// Run Kafka using Docker Compose.
Step(dockercompose.Run(clusterName, dockerComposeYAML)).
Step("wait for broker sockets",
network.WaitForAddresses(5*time.Minute, brokers...)).
Step("wait", flow.Sleep(5*time.Second)).
Step("wait for kafka readiness", retry.Do(10*time.Second, 30, func(ctx flow.Context) error {
config := sarama.NewConfig()
config.ClientID = "test-consumer"
config.Consumer.Return.Errors = true
// Create new consumer
client, err := sarama.NewConsumer(brokers, config)
if err != nil {
return err
}
defer client.Close()
// Ensure the brokers are ready by attempting to consume
// a topic partition.
_, err = client.ConsumePartition("myTopic", 0, sarama.OffsetOldest)
return err
})).
Step("wait for Dapr OAuth client", retry.Do(20*time.Second, 6, func(ctx flow.Context) error {
httpClient := &http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true, // test server certificate is not trusted.
},
},
}
resp, err := httpClient.Get(oauthClientQuery)
if err != nil {
return err
}
if resp.StatusCode != 200 {
return fmt.Errorf("oauth client query for 'dapr' not successful")
}
return nil
})).
//
// Run the application logic above.
Step(app.Run(appID1, fmt.Sprintf(":%d", appPort),
application(appID1, consumerGroup1))).
//
// Run the Dapr sidecar with the Kafka component.
Step(sidecar.Run(sidecarName1,
embedded.WithComponentsPath("./components/consumer1"),
embedded.WithAppProtocol(runtime.HTTPProtocol, appPort),
embedded.WithDaprGRPCPort(runtime.DefaultDaprAPIGRPCPort),
embedded.WithDaprHTTPPort(runtime.DefaultDaprHTTPPort),
componentRuntimeOptions(),
)).
//
// Run the second application.
Step(app.Run(appID2, fmt.Sprintf(":%d", appPort+portOffset),
application(appID2, consumerGroup2))).
//
// Run the Dapr sidecar with the Kafka component.
Step(sidecar.Run(sidecarName2,
embedded.WithComponentsPath("./components/mtls-consumer"),
embedded.WithAppProtocol(runtime.HTTPProtocol, appPort+portOffset),
embedded.WithDaprGRPCPort(runtime.DefaultDaprAPIGRPCPort+portOffset),
embedded.WithDaprHTTPPort(runtime.DefaultDaprHTTPPort+portOffset),
embedded.WithProfilePort(runtime.DefaultProfilePort+portOffset),
componentRuntimeOptions(),
)).
//
// Send messages using the same metadata/message key so we can expect
// in-order processing.
Step("send and wait(in-order)", sendRecvTest(metadata, consumerGroup1, consumerGroup2)).
//
// Run the third application.
Step(app.Run(appID3, fmt.Sprintf(":%d", appPort+portOffset*2),
application(appID3, consumerGroup2))).
//
// Run the Dapr sidecar with the Kafka component.
Step(sidecar.Run(sidecarName3,
embedded.WithComponentsPath("./components/oauth-consumer"),
embedded.WithAppProtocol(runtime.HTTPProtocol, appPort+portOffset*2),
embedded.WithDaprGRPCPort(runtime.DefaultDaprAPIGRPCPort+portOffset*2),
embedded.WithDaprHTTPPort(runtime.DefaultDaprHTTPPort+portOffset*2),
embedded.WithProfilePort(runtime.DefaultProfilePort+portOffset*2),
componentRuntimeOptions(),
)).
Step("reset", flow.Reset(consumerGroup2)).
//
// Send messages with random keys to test message consumption
// across more than one consumer group and consumers per group.
Step("send and wait(no-order)", sendRecvTest(map[string]string{}, consumerGroup2)).
//
// Gradually stop each broker.
// This tests the components ability to handle reconnections
// when brokers are shutdown cleanly.
StepAsync("steady flow of messages to publish", &task,
sendMessagesInBackground(consumerGroup1, consumerGroup2)).
Step("wait", flow.Sleep(5*time.Second)).
Step("stop broker 1", dockercompose.Stop(clusterName, dockerComposeYAML, "kafka1")).
Step("wait", flow.Sleep(5*time.Second)).
//
// Errors will likely start occurring here since quorum is lost.
Step("stop broker 2", dockercompose.Stop(clusterName, dockerComposeYAML, "kafka2")).
Step("wait", flow.Sleep(10*time.Second)).
//
// Errors will definitely occur here.
Step("stop broker 3", dockercompose.Stop(clusterName, dockerComposeYAML, "kafka3")).
Step("wait", flow.Sleep(30*time.Second)).
Step("restart broker 3", dockercompose.Start(clusterName, dockerComposeYAML, "kafka3")).
Step("restart broker 2", dockercompose.Start(clusterName, dockerComposeYAML, "kafka2")).
Step("restart broker 1", dockercompose.Start(clusterName, dockerComposeYAML, "kafka1")).
//
// Component should recover at this point.
Step("wait", flow.Sleep(30*time.Second)).
Step("assert messages(Component reconnect)", assertMessages(consumerGroup1, consumerGroup2)).
//
// Simulate a network interruption.
// This tests the components ability to handle reconnections
// when Dapr is disconnected abnormally.
StepAsync("steady flow of messages to publish", &task,
sendMessagesInBackground(consumerGroup1, consumerGroup2)).
Step("wait", flow.Sleep(5*time.Second)).
//
// Errors will occurring here.
Step("interrupt network",
network.InterruptNetwork(30*time.Second, nil, nil, "19092", "29092", "39092")).
//
// Component should recover at this point.
Step("wait", flow.Sleep(30*time.Second)).
Step("assert messages(network interruption)", assertMessages(consumerGroup1, consumerGroup2)).
//
// Reset and test that all messages are received during a
// consumer rebalance.
Step("reset", flow.Reset(consumerGroup2)).
StepAsync("steady flow of messages to publish", &task,
sendMessagesInBackground(consumerGroup2)).
Step("wait", flow.Sleep(15*time.Second)).
Step("stop sidecar 2", sidecar.Stop(sidecarName2)).
Step("wait", flow.Sleep(3*time.Second)).
Step("stop app 2", app.Stop(appID2)).
Step("wait", flow.Sleep(30*time.Second)).
Step("assert messages(consumer rebalance)", assertMessages(consumerGroup2)).
Run()
}
func componentRuntimeOptions() []runtime.Option {
log := logger.NewLogger("dapr.components")
bindingsRegistry := bindings_loader.NewRegistry()
bindingsRegistry.Logger = log
bindingsRegistry.RegisterInputBinding(func(l logger.Logger) bindings.InputBinding {
return bindings_kafka.NewKafka(l)
}, "kafka")
bindingsRegistry.RegisterOutputBinding(func(l logger.Logger) bindings.OutputBinding {
return bindings_kafka.NewKafka(l)
}, "kafka")
return []runtime.Option{
runtime.WithBindings(bindingsRegistry),
}
}
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kafka_test
import (
"context"
"fmt"
"testing"
"time"
"github.com/Shopify/sarama"
"github.com/cenkalti/backoff/v4"
"github.com/google/uuid"
"github.com/stretchr/testify/require"
"go.uber.org/multierr"
// Pub/Sub.
"github.com/dapr/components-contrib/bindings"
bindings_kafka "github.com/dapr/components-contrib/bindings/kafka"
bindings_loader "github.com/dapr/dapr/pkg/components/bindings"
// Dapr runtime and Go-SDK
"github.com/dapr/dapr/pkg/runtime"
dapr "github.com/dapr/go-sdk/client"
"github.com/dapr/go-sdk/service/common"
"github.com/dapr/kit/logger"
kit_retry "github.com/dapr/kit/retry"
// Certification testing runnables
"github.com/dapr/components-contrib/tests/certification/embedded"
"github.com/dapr/components-contrib/tests/certification/flow"
"github.com/dapr/components-contrib/tests/certification/flow/app"
"github.com/dapr/components-contrib/tests/certification/flow/dockercompose"
"github.com/dapr/components-contrib/tests/certification/flow/network"
"github.com/dapr/components-contrib/tests/certification/flow/retry"
"github.com/dapr/components-contrib/tests/certification/flow/sidecar"
"github.com/dapr/components-contrib/tests/certification/flow/simulate"
"github.com/dapr/components-contrib/tests/certification/flow/watcher"
)
const (
sidecarName1 = "dapr-1"
sidecarName2 = "dapr-2"
sidecarName3 = "dapr-3"
appID1 = "app-1"
appID2 = "app-2"
appID3 = "app-3"
clusterName = "kafkacertification"
dockerComposeYAML = "docker-compose.yml"
numMessages = 1000
appPort = 8000
portOffset = 2
messageKey = "partitionKey"
bindingName = "messagebus"
topicName = "neworder"
)
var (
brokers = []string{"localhost:19092", "localhost:29092", "localhost:39092"}
)
func TestKafka_with_retry(t *testing.T) {
// For Kafka, we should ensure messages are received in order.
consumerGroup1 := watcher.NewOrdered()
// This watcher is across multiple consumers in the same group
// so exact ordering is not expected.
consumerGroup2 := watcher.NewUnordered()
// Application logic that tracks messages from a topic.
application := func(appName string, watcher *watcher.Watcher) app.SetupFn {
return func(ctx flow.Context, s common.Service) error {
// Simulate periodic errors.
sim := simulate.PeriodicError(ctx, 100)
// Setup the /orders event handler.
return multierr.Combine(
s.AddBindingInvocationHandler(bindingName, func(_ context.Context, in *common.BindingEvent) (out []byte, err error) {
if err := sim(); err != nil {
return nil, err
}
// Track/Observe the data of the event.
watcher.Observe(string(in.Data))
ctx.Logf("======== %s received event: %s\n", appName, string(in.Data))
return in.Data, nil
}),
)
}
}
// Set the partition key on all messages so they
// are written to the same partition.
// This allows for checking of ordered messages.
metadata := map[string]string{
messageKey: "test",
}
// Test logic that sends messages to a topic and
// verifies the application has received them.
sendRecvTest := func(metadata map[string]string, watchers ...*watcher.Watcher) flow.Runnable {
_, hasKey := metadata[messageKey]
return func(ctx flow.Context) error {
client := sidecar.GetClient(ctx, sidecarName1)
// Declare what is expected BEFORE performing any steps
// that will satisfy the test.
msgs := make([]string, numMessages)
for i := range msgs {
msgs[i] = fmt.Sprintf("Hello, Messages %03d", i)
}
for _, m := range watchers {
m.ExpectStrings(msgs...)
}
// If no key it provided, create a random one.
// For Kafka, this will spread messages across
// the topic's partitions.
if !hasKey {
metadata[messageKey] = uuid.NewString()
}
// Send events that the application above will observe.
ctx.Log("Sending messages!")
for _, msg := range msgs {
ctx.Logf("Sending: %q", msg)
err := client.InvokeOutputBinding(ctx, &dapr.InvokeBindingRequest{
Name: bindingName,
Operation: string(bindings.CreateOperation),
Data: []byte(msg),
Metadata: metadata,
})
require.NoError(ctx, err, "error publishing message")
}
// Do the messages we observed match what we expect?
for _, m := range watchers {
m.Assert(ctx, time.Minute)
}
return nil
}
}
// sendMessagesInBackground and assertMessages are
// Runnables for testing publishing and consuming
// messages reliably when infrastructure and network
// interruptions occur.
var task flow.AsyncTask
sendMessagesInBackground := func(watchers ...*watcher.Watcher) flow.Runnable {
return func(ctx flow.Context) error {
client := sidecar.GetClient(ctx, sidecarName1)
for _, m := range watchers {
m.Reset()
}
t := time.NewTicker(100 * time.Millisecond)
defer t.Stop()
counter := 1
for {
select {
case <-task.Done():
return nil
case <-t.C:
msg := fmt.Sprintf("Background message - %03d", counter)
for _, m := range watchers {
m.Prepare(msg) // Track for observation
}
// Publish with retries.
bo := backoff.WithContext(backoff.NewConstantBackOff(time.Second), task)
if err := kit_retry.NotifyRecover(func() error {
return client.InvokeOutputBinding(ctx, &dapr.InvokeBindingRequest{
Name: bindingName,
Operation: string(bindings.CreateOperation),
Data: []byte(msg),
Metadata: metadata,
})
}, bo, func(err error, t time.Duration) {
ctx.Logf("Error output binding message, retrying in %s", t)
}, func() {}); err == nil {
for _, m := range watchers {
m.Add(msg) // Success
}
counter++
} else {
for _, m := range watchers {
m.Remove(msg) // Remove from Tracking
}
}
}
}
}
}
assertMessages := func(watchers ...*watcher.Watcher) flow.Runnable {
return func(ctx flow.Context) error {
// Signal sendMessagesInBackground to stop and wait for it to complete.
task.CancelAndWait()
for _, m := range watchers {
m.Assert(ctx, 5*time.Minute)
}
return nil
}
}
flow.New(t, "kafka certification with retry").
// Run Kafka using Docker Compose.
Step(dockercompose.Run(clusterName, dockerComposeYAML)).
Step("wait for broker sockets",
network.WaitForAddresses(5*time.Minute, brokers...)).
Step("wait", flow.Sleep(5*time.Second)).
Step("wait for kafka readiness", retry.Do(10*time.Second, 30, func(ctx flow.Context) error {
config := sarama.NewConfig()
config.ClientID = "test-consumer"
config.Consumer.Return.Errors = true
// Create new consumer
client, err := sarama.NewConsumer(brokers, config)
if err != nil {
return err
}
defer client.Close()
// Ensure the brokers are ready by attempting to consume
// a topic partition.
_, err = client.ConsumePartition("myTopic", 0, sarama.OffsetOldest)
return err
})).
// Run the application logic above.
Step(app.Run(appID1, fmt.Sprintf(":%d", appPort),
application(appID1, consumerGroup1))).
//
// Run the Dapr sidecar with the Kafka component.
Step(sidecar.Run(sidecarName1,
embedded.WithComponentsPath("./components/consumer1"),
embedded.WithAppProtocol(runtime.HTTPProtocol, appPort),
embedded.WithDaprGRPCPort(runtime.DefaultDaprAPIGRPCPort),
embedded.WithDaprHTTPPort(runtime.DefaultDaprHTTPPort),
componentRuntimeOptions(),
)).
//
// Run the second application.
Step(app.Run(appID2, fmt.Sprintf(":%d", appPort+portOffset),
application(appID2, consumerGroup2))).
//
// Run the Dapr sidecar with the Kafka component.
Step(sidecar.Run(sidecarName2,
embedded.WithComponentsPath("./components/consumer2"),
embedded.WithAppProtocol(runtime.HTTPProtocol, appPort+portOffset),
embedded.WithDaprGRPCPort(runtime.DefaultDaprAPIGRPCPort+portOffset),
embedded.WithDaprHTTPPort(runtime.DefaultDaprHTTPPort+portOffset),
embedded.WithProfilePort(runtime.DefaultProfilePort+portOffset),
componentRuntimeOptions(),
)).
//
// Send messages using the same metadata/message key so we can expect
// in-order processing.
Step("send and wait(in-order)", sendRecvTest(metadata, consumerGroup1, consumerGroup2)).
//
// Run the third application.
Step(app.Run(appID3, fmt.Sprintf(":%d", appPort+portOffset*2),
application(appID3, consumerGroup2))).
//
// Run the Dapr sidecar with the Kafka component.
Step(sidecar.Run(sidecarName3,
embedded.WithComponentsPath("./components/consumer2"),
embedded.WithAppProtocol(runtime.HTTPProtocol, appPort+portOffset*2),
embedded.WithDaprGRPCPort(runtime.DefaultDaprAPIGRPCPort+portOffset*2),
embedded.WithDaprHTTPPort(runtime.DefaultDaprHTTPPort+portOffset*2),
embedded.WithProfilePort(runtime.DefaultProfilePort+portOffset*2),
componentRuntimeOptions(),
)).
Step("reset", flow.Reset(consumerGroup2)).
//
// Send messages with random keys to test message consumption
// across more than one consumer group and consumers per group.
Step("send and wait(no-order)", sendRecvTest(map[string]string{}, consumerGroup2)).
//
// Gradually stop each broker.
// This tests the components ability to handle reconnections
// when brokers are shutdown cleanly.
StepAsync("steady flow of messages to publish", &task,
sendMessagesInBackground(consumerGroup1, consumerGroup2)).
Step("wait", flow.Sleep(5*time.Second)).
Step("stop broker 1", dockercompose.Stop(clusterName, dockerComposeYAML, "kafka1")).
Step("wait", flow.Sleep(5*time.Second)).
//
// Errors will likely start occurring here since quorum is lost.
Step("stop broker 2", dockercompose.Stop(clusterName, dockerComposeYAML, "kafka2")).
Step("wait", flow.Sleep(10*time.Second)).
//
// Errors will definitely occur here.
Step("stop broker 3", dockercompose.Stop(clusterName, dockerComposeYAML, "kafka3")).
Step("wait", flow.Sleep(30*time.Second)).
Step("restart broker 3", dockercompose.Start(clusterName, dockerComposeYAML, "kafka3")).
Step("restart broker 2", dockercompose.Start(clusterName, dockerComposeYAML, "kafka2")).
Step("restart broker 1", dockercompose.Start(clusterName, dockerComposeYAML, "kafka1")).
//
// Component should recover at this point.
Step("wait", flow.Sleep(30*time.Second)).
Step("assert messages(Component reconnect)", assertMessages(consumerGroup1, consumerGroup2)).
//
// Simulate a network interruption.
// This tests the components ability to handle reconnections
// when Dapr is disconnected abnormally.
StepAsync("steady flow of messages to publish", &task,
sendMessagesInBackground(consumerGroup1, consumerGroup2)).
Step("wait", flow.Sleep(5*time.Second)).
//
// Errors will occurring here.
Step("interrupt network",
network.InterruptNetwork(30*time.Second, nil, nil, "19092", "29092", "39092")).
//
// Component should recover at this point.
Step("wait", flow.Sleep(30*time.Second)).
Step("assert messages(network interruption)", assertMessages(consumerGroup1, consumerGroup2)).
//
// Reset and test that all messages are received during a
// consumer rebalance.
Step("reset", flow.Reset(consumerGroup2)).
StepAsync("steady flow of messages to publish", &task,
sendMessagesInBackground(consumerGroup2)).
Step("wait", flow.Sleep(15*time.Second)).
Step("stop sidecar 2", sidecar.Stop(sidecarName2)).
Step("wait", flow.Sleep(3*time.Second)).
Step("stop app 2", app.Stop(appID2)).
Step("wait", flow.Sleep(30*time.Second)).
Step("assert messages(consumer rebalance)", assertMessages(consumerGroup2)).
Run()
}
func componentRuntimeOptions() []runtime.Option {
log := logger.NewLogger("dapr.components")
bindingsRegistry := bindings_loader.NewRegistry()
bindingsRegistry.Logger = log
bindingsRegistry.RegisterInputBinding(func(l logger.Logger) bindings.InputBinding {
return bindings_kafka.NewKafka(l)
}, "kafka")
bindingsRegistry.RegisterOutputBinding(func(l logger.Logger) bindings.OutputBinding {
return bindings_kafka.NewKafka(l)
}, "kafka")
return []runtime.Option{
runtime.WithBindings(bindingsRegistry),
}
}

View File

@ -1,22 +0,0 @@
serve:
admin:
host: 0.0.0.0
port: 4444
public:
host: 0.0.0.0
port: 4443
tls:
cert:
path: /config/tls/hydra.crt
key:
path: /config/tls/hydra.key
dsn: memory
log:
leak_sensitive_values: true
level: debug
urls:
self:
issuer: https://hydra:4443
strategies:
access_token: opaque

View File

@ -1,21 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIDejCCAmKgAwIBAgIUIgMF15XiDisW+e4I+clKWYvxcfMwDQYJKoZIhvcNAQEL
BQAwKzESMBAGA1UECxMJRGFwciBUZXN0MRUwEwYDVQQDEwxEYXByIFRlc3QgQ0Ew
HhcNMjExMjEwMDA1ODAwWhcNMjIxMjEwMDA1ODAwWjAjMRIwEAYDVQQKEwlEYXBy
IFRlc3QxDTALBgNVBAMTBGRhcHIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
AoIBAQDICbhBmpxFPFtoRTjdiki2ouZQbUoHE4llIQnJz3ta/+gWi/czrOmC3aHz
x9pJ1kifBG5MlbdnH8WCQXx/vPXP5hpTmTDjAp87Fygk2KWdb/bQBrpRTIEgAuK3
IWJ9tYhcDDxSwEF52xNnRkklxZpVRZX1SmcdndEqioaAnxWEM1x+JJcjrk6Ud4dv
aX0G1xw8g6u0KT1I61Aja2OAAj+iPih6RK6xSRdxvELXbehClBHOpJP6sRw03Xw4
HRJEesWqrGAFEp0qSZulKwn2MHAW80VVF/U9hogUQrBVFTKw/5oS9eu+BV2AY3Rh
8DACB0blpEkjIachjjo2A8wuhBeNAgMBAAGjgZ0wgZowDgYDVR0PAQH/BAQDAgWg
MB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAMBgNVHRMBAf8EAjAAMB0G
A1UdDgQWBBRVxfGJ7a+7DBz2PM2w/U5aeJFOfjAfBgNVHSMEGDAWgBR+l/nJVNA+
PUmfXs1kYJbBfN4JbzAbBgNVHREEFDASggVoeWRyYYIJbG9jYWxob3N0MA0GCSqG
SIb3DQEBCwUAA4IBAQA+0zkBNBZ8okLiEl9B4nbfBvQXdkYOl9H9TdDYlWLNKb1S
8Y4SNQ4hrfKspYVIBVvWfuwnphdLeexs4ovU6OkXeVPFPSsjihX9I+sJ3bFCLvkj
lVXY/pJy/Z6QQlPg71LkCiH0Hv2RIvGZ1UtTu12d8BiF3oO8Nnzq4kiyfpPJ5QDR
GsTKmXxEzgCcR+DI4g05hI2BQuq8Xjw4jZzt0IOcWhR2ZxBwfzLQp/hAQK69iPCN
3DfD/eMr1EF8kAWec4eo3CFwHvrPpEdIMeNE7q9fuyfVPJGQZFKNHl7rF4YqYn/F
4XGJxRCjd860JkJDLrmXazED6cLE1IvYPCLUsfK8
-----END CERTIFICATE-----

View File

@ -1,27 +0,0 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEowIBAAKCAQEAyAm4QZqcRTxbaEU43YpItqLmUG1KBxOJZSEJyc97Wv/oFov3
M6zpgt2h88faSdZInwRuTJW3Zx/FgkF8f7z1z+YaU5kw4wKfOxcoJNilnW/20Aa6
UUyBIALityFifbWIXAw8UsBBedsTZ0ZJJcWaVUWV9UpnHZ3RKoqGgJ8VhDNcfiSX
I65OlHeHb2l9BtccPIOrtCk9SOtQI2tjgAI/oj4oekSusUkXcbxC123oQpQRzqST
+rEcNN18OB0SRHrFqqxgBRKdKkmbpSsJ9jBwFvNFVRf1PYaIFEKwVRUysP+aEvXr
vgVdgGN0YfAwAgdG5aRJIyGnIY46NgPMLoQXjQIDAQABAoIBAQDEErLmqxOt0aGP
LPq2PEtVqYqzHszG7uFnnOCpTZQN+HSXVQ4zOrOQMIoEF8rhQQbhx0gODVo93KiO
Kn5L/v26kEMR2kBO400McIBKzYhYL1zvPwj1k1Wl+O4crr6JlZxZDS07t3L2bEQy
oHQmb+/80T5RtmIoZ36Ugj+gZ06BytKPY2yZRpLnF/p9V77JK2BT2pg1EXahU5LL
wGhodg+MqFrKPk0TpdQ7edipHEiqprk/sEH9KA4cPfa83LBv6xRcHYBzlA0mHnZo
jgGdptDAFJeJcMLwywF1CvI/x5Y0mAkDN95uFcw8/ozX2pKGuIZYY9BjR444zKm2
8V7Br2gBAoGBAN2n2BlBXTjOgZ7c50fGFA+oR23C90r3AHwnh1FOnCzKOUNbW48F
tsKvmI0DUK+sg+ZkGIEz1ll81FVzCAZQ8sii3LV5qnW7QVhZszHbKWtI9ulcFDqe
ZqKlOahy5GmcGfxbniufrHaBlP+Y1gwJd8NXjoFKNxLLtQ8S25e4QwKNAoGBAOcI
ZH+eaZ3653fFPzuJtsbbfqB5HW6bTLIUqnwNRGghvMP0JTLzYYVlcaLMrI2L50Qf
Z5IEl7+uVeTmRehkoe5J3r5tIifKrVGnQM7inpTfkCOlY2tsAL8/XvQ/6ikBEt2J
r166mOk3RfjuuXuBFrPwfpZ5fMggFa92e5ukWqkBAoGAQ12VsedJu9AXWP7uU8QB
qNiODO/qVKBJR3KED9QCZyJ20N/dLdSgvP69MG5HgXy/AbB+OhZVGRF1Pxsc3z6O
6yeESKtXgTyOGZn5ejePmQmt8TKI+1/U9a2dnnJ8tRQ6WZZGth9rPQEZFa2PsEzY
V0gvCWBS6KV8u74Re0UHKKkCgYB9j8Ae49d+9rgKDfd5wjTGCtDdIjXuwRSDzFuD
pCpDdeKDlRMKh9++gg2qbxZwr1J3YaIGZ9yZXoRsLQJddSPUv+0BDYr8mVhtAjtk
tSF+w6ow1VgdL8uQJT7T/FClDGJWaNgY4cztIw8yZXwFNXlDPjduTISWt2lRvVEc
m8xyAQKBgF+aAk2qJ8/MM4aXoWgjkWiDGvgfVmWsYMpalz34PDP+hzPg3LxaGKsn
jm+LQs9Z/WX26hxZK0HWQbcCsJ81mBvgeXnUrY/T50Zvd7zUFF+1WG7Is9KUlLA1
ceQzJcixurQtuUSkwj2PfVziiufkHk43tuzDQ57carUX6kg2OwAD
-----END RSA PRIVATE KEY-----

View File

@ -1,22 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIDpTCCAo2gAwIBAgIUTAjabskCLxIqbh2E4MnYIsivipswDQYJKoZIhvcNAQEL
BQAwKzESMBAGA1UECxMJRGFwciBUZXN0MRUwEwYDVQQDEwxEYXByIFRlc3QgQ0Ew
HhcNMjExMjA0MTkwMjAwWhcNMjIxMjA0MTkwMjAwWjAjMRIwEAYDVQQKEwlEYXBy
IFRlc3QxDTALBgNVBAMTBGRhcHIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
AoIBAQC5rlhpdzY2RuRRRKevotZnLUx/dh2wLvCMluSxKFJYvC7DXK3cHZh1+6Wo
cdlsEYY3ZQ7Pt/N8DkV7ODqSvFyhJu+1fCY3elMfZcxSw24UJ2aXzlx5RbNhLAI0
E804ugAp3qss4ygCwQ4U2jMGXqeVpi7gyGsYybEUOMSorx5OBgiJAKkaATNMBqdp
MX2FKzBU3owpAcuXhIGSdKblYQuZJmAfITnaJFO4ffLyn9m4I9n/dDfZag/TCZBL
27uIo79mZO99YfhMfdrifH3FkvE/14/JUPhwHAChoCbDol0/V/KDv0tp3vQbQH+7
1dyrAWhszswSXQGgADYm8y74dlQpAgMBAAGjgcgwgcUwDgYDVR0PAQH/BAQDAgWg
MB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAMBgNVHRMBAf8EAjAAMB0G
A1UdDgQWBBQ4eToXZz4AH4YbuW23vy99T8d8OTAfBgNVHSMEGDAWgBR+l/nJVNA+
PUmfXs1kYJbBfN4JbzBGBgNVHREEPzA9ggRkYXBygglsb2NhbGhvc3SCB2thZmth
LTGCB2thZmthLTKCB2thZmthLTOCD2thZmFrLWJvb3RzdHJhcDANBgkqhkiG9w0B
AQsFAAOCAQEAAapIJIdQhGF2qz/N4i/nIwJHGxUapgtVrydC8kw7DeuQi2usG62Y
hGNnBAoJCR0auSQ2P3SWEO19o1doZjFroqFkNIXdTT+aHxLg0k89H203oeMSI43x
xTlmJCjBNw4zQD9jC1c6u/W6WBwN2SJGBZrdmA95KQrz+gan9nh6ecPYeGF89io2
G20dRE2cGwbt7LAImK87M8LXbw/Of28gYMh3L14CNy6oma3izMix9xhUhDVACnVy
TaltjNIiAlFP2g4GIsPSYTMAOeIzIU/LxKlxg8mLg1bTPwb5IZK1wFwPBY5rnNqx
OrycW7rZKfrg2eZml8FnYlzO64u41oC47A==
-----END CERTIFICATE-----

View File

@ -1,22 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIDpTCCAo2gAwIBAgIUTAjabskCLxIqbh2E4MnYIsivipswDQYJKoZIhvcNAQEL
BQAwKzESMBAGA1UECxMJRGFwciBUZXN0MRUwEwYDVQQDEwxEYXByIFRlc3QgQ0Ew
HhcNMjExMjA0MTkwMjAwWhcNMjIxMjA0MTkwMjAwWjAjMRIwEAYDVQQKEwlEYXBy
IFRlc3QxDTALBgNVBAMTBGRhcHIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
AoIBAQC5rlhpdzY2RuRRRKevotZnLUx/dh2wLvCMluSxKFJYvC7DXK3cHZh1+6Wo
cdlsEYY3ZQ7Pt/N8DkV7ODqSvFyhJu+1fCY3elMfZcxSw24UJ2aXzlx5RbNhLAI0
E804ugAp3qss4ygCwQ4U2jMGXqeVpi7gyGsYybEUOMSorx5OBgiJAKkaATNMBqdp
MX2FKzBU3owpAcuXhIGSdKblYQuZJmAfITnaJFO4ffLyn9m4I9n/dDfZag/TCZBL
27uIo79mZO99YfhMfdrifH3FkvE/14/JUPhwHAChoCbDol0/V/KDv0tp3vQbQH+7
1dyrAWhszswSXQGgADYm8y74dlQpAgMBAAGjgcgwgcUwDgYDVR0PAQH/BAQDAgWg
MB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAMBgNVHRMBAf8EAjAAMB0G
A1UdDgQWBBQ4eToXZz4AH4YbuW23vy99T8d8OTAfBgNVHSMEGDAWgBR+l/nJVNA+
PUmfXs1kYJbBfN4JbzBGBgNVHREEPzA9ggRkYXBygglsb2NhbGhvc3SCB2thZmth
LTGCB2thZmthLTKCB2thZmthLTOCD2thZmFrLWJvb3RzdHJhcDANBgkqhkiG9w0B
AQsFAAOCAQEAAapIJIdQhGF2qz/N4i/nIwJHGxUapgtVrydC8kw7DeuQi2usG62Y
hGNnBAoJCR0auSQ2P3SWEO19o1doZjFroqFkNIXdTT+aHxLg0k89H203oeMSI43x
xTlmJCjBNw4zQD9jC1c6u/W6WBwN2SJGBZrdmA95KQrz+gan9nh6ecPYeGF89io2
G20dRE2cGwbt7LAImK87M8LXbw/Of28gYMh3L14CNy6oma3izMix9xhUhDVACnVy
TaltjNIiAlFP2g4GIsPSYTMAOeIzIU/LxKlxg8mLg1bTPwb5IZK1wFwPBY5rnNqx
OrycW7rZKfrg2eZml8FnYlzO64u41oC47A==
-----END CERTIFICATE-----

View File

@ -1,27 +0,0 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEogIBAAKCAQEAua5YaXc2NkbkUUSnr6LWZy1Mf3YdsC7wjJbksShSWLwuw1yt
3B2YdfulqHHZbBGGN2UOz7fzfA5Fezg6krxcoSbvtXwmN3pTH2XMUsNuFCdml85c
eUWzYSwCNBPNOLoAKd6rLOMoAsEOFNozBl6nlaYu4MhrGMmxFDjEqK8eTgYIiQCp
GgEzTAanaTF9hSswVN6MKQHLl4SBknSm5WELmSZgHyE52iRTuH3y8p/ZuCPZ/3Q3
2WoP0wmQS9u7iKO/ZmTvfWH4TH3a4nx9xZLxP9ePyVD4cBwAoaAmw6JdP1fyg79L
ad70G0B/u9XcqwFobM7MEl0BoAA2JvMu+HZUKQIDAQABAoIBACZz2JNewLdUzwuV
cDSLQGN1mhX7XAKUdfRne0zE0OjXb8e9dbPT3TLxvki36xLaPjVSlFKoAaB7RCBU
cKzanUQyUAoBf9iVWIl0B3BMUIuT7Uca0UO8D33cI0itoR5SRp5lIoXVNP/9AvGG
jnKPP51aIPMkDim/+w/5AaD9QwVdGC2BWNn8bFykz/DfIB0tiVTec8/pWaP7vHGM
FriQbL07Yrj3BE0ndp5cL52ZbH9OmQ/hXUHCk6vCuV/yrqljeLPGbEYkpmhm/fMO
Fa3pX6wR+QgZ5lta870jK52bexyoGWgsMcTTl8+7q4DYM2YREEKicAlbOh92bdm4
tnjIiVECgYEA1btWqCtxWat5tzXeYAowYs/uia/ANbmg+SGqIeVqGn4EyLIBYnmZ
jexfWliLj7Nk802fbNIO9sStMt6q7vvRbYR2ZHFPU0Th9m/XVPdJKJ9qpMkSWdY3
P7VlQuYHSZvU1ny/QtDc8dGoaxluiaJsIBde0UUcwOo/tA66OnP2n7cCgYEA3mbf
hz6W+ThofDPyJN5kFTnx4g+uNA8hnqyJeh9xcnh1t/A5BH4faZBPhokoskahUWis
yI4v6e552CHkF9jo6k397xUb/W/HO0BlKhapf8prdrG4zSE5pr140eTdr10h95SD
Wr4twfEaBNsSXRnaMxAMaVbPKfLuW0+N1Qbk6x8CgYA8EZnKS+Ngk0vzDOXB0jtF
GjFtawK3VsOCIU8ClcqbRX2stjKjbY+VjrBB4Q7gRUgDBXbgC61+90nCOUiLQCTd
BdSMaDgmK/7h1w8K5zEdhKhhRc2tiAIhGqcqBSJZMr2/xnGuoqrmH8mYyB4D+q0u
28KfSDBLm8ppnZYDZaITwwKBgDv76xYDH50gRa4aJJklEkFXW5HpQMbxvdOaHYo+
qM6DBt0RgY9gpQBH1+slW0CaJDBc1x1QnEOv+lT87xQvgMKRPogZXW9Bkq68c4yi
iBzbb5iX3owVBgOe3tNdsxz1NZAdEkCLQrQoXygoHg/WRS+4iGBw9XcO+pLOJibq
sRtpAoGARUL0cfedOtIgGOQTNzfHqQZsRbLEKx64FI6Q8g1womr7lWWXy6RX4BZv
vm41g/PkdiES9ZfaNihRHcEhaNuA26OhiCbXe/FRcyZRX9TeCkuyQgNn9nssPIgR
edWdnN8kZKQ7ReZwMlw2UpXenAwlVoQQbHw9zpkcD2Exmp/TLAk=
-----END RSA PRIVATE KEY-----

View File

@ -1,19 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIDJjCCAg6gAwIBAgIUJPqvjfNx6kMf7mE5FtW81+X8HekwDQYJKoZIhvcNAQEL
BQAwKzESMBAGA1UECxMJRGFwciBUZXN0MRUwEwYDVQQDEwxEYXByIFRlc3QgQ0Ew
HhcNMjExMjA0MTYyNzAwWhcNMjYxMjAzMTYyNzAwWjArMRIwEAYDVQQLEwlEYXBy
IFRlc3QxFTATBgNVBAMTDERhcHIgVGVzdCBDQTCCASIwDQYJKoZIhvcNAQEBBQAD
ggEPADCCAQoCggEBAMPLpsfCUdYf+7RAY7mktcj4/qJJyNroHxS8ChwSeJ0M/dLk
I6G4kyty3TGvzmrdxkr2DW2B+ZmrZFzSVQg+kNESMhEWLJt4MtyGMNuDZcwV5kJL
NPltLYmov2z8hyD2v6agZNyiWM0k2p/dl+Ikp4DJmd08PSd+nhc5Wj9X33gsEAoK
jKptl+XGGvSlC3tIbHmBhRsP42QlLjqk5PWxINbMDePHOiYFmau3VRrbPweKTFuF
bY0Y0w8t1qOFX55hU7LkMEXjLmuUfFUEZvn3NUTvH80gKDioiJTC7NBRE6sCYAlm
b4Vvix3p9Y/yNKbMA5J3chaZdTZfVqAXplZY3jMCAwEAAaNCMEAwDgYDVR0PAQH/
BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFH6X+clU0D49SZ9ezWRg
lsF83glvMA0GCSqGSIb3DQEBCwUAA4IBAQAjeaFNxIhWZkDYiwsVP5R2JqFifZbq
A/m9YJypRwA+rUeBLFGuIh4QPFf2fZlskJYmFaDB3aplQGoSIzB1HCC0OAhJM5Ec
z6gm+bhqDfCaWz1HfmpvvQes1l/mUzYx5GfiX202W87CMKMQ+5WSg1IsCPFwYN2w
nZkGKYkh9D9TzIFMfi2b1G+O+BuUUyOAXvT8zcJ17GexRHHdc1Gq+1PgDPDL1Sug
rLHmo+dDTZhIV5D14wvxsNHTTr5tt0aaQw3fJqo6P2HE2dBiqadSYnlwS7BQ9Jxc
MlmFggFubM9/QGQ/hGQYmTp+LSlM5ndaVA80o7+SOQZ2aliuH0fQN3ST
-----END CERTIFICATE-----

View File

@ -1,19 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIDJjCCAg6gAwIBAgIUJPqvjfNx6kMf7mE5FtW81+X8HekwDQYJKoZIhvcNAQEL
BQAwKzESMBAGA1UECxMJRGFwciBUZXN0MRUwEwYDVQQDEwxEYXByIFRlc3QgQ0Ew
HhcNMjExMjA0MTYyNzAwWhcNMjYxMjAzMTYyNzAwWjArMRIwEAYDVQQLEwlEYXBy
IFRlc3QxFTATBgNVBAMTDERhcHIgVGVzdCBDQTCCASIwDQYJKoZIhvcNAQEBBQAD
ggEPADCCAQoCggEBAMPLpsfCUdYf+7RAY7mktcj4/qJJyNroHxS8ChwSeJ0M/dLk
I6G4kyty3TGvzmrdxkr2DW2B+ZmrZFzSVQg+kNESMhEWLJt4MtyGMNuDZcwV5kJL
NPltLYmov2z8hyD2v6agZNyiWM0k2p/dl+Ikp4DJmd08PSd+nhc5Wj9X33gsEAoK
jKptl+XGGvSlC3tIbHmBhRsP42QlLjqk5PWxINbMDePHOiYFmau3VRrbPweKTFuF
bY0Y0w8t1qOFX55hU7LkMEXjLmuUfFUEZvn3NUTvH80gKDioiJTC7NBRE6sCYAlm
b4Vvix3p9Y/yNKbMA5J3chaZdTZfVqAXplZY3jMCAwEAAaNCMEAwDgYDVR0PAQH/
BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFH6X+clU0D49SZ9ezWRg
lsF83glvMA0GCSqGSIb3DQEBCwUAA4IBAQAjeaFNxIhWZkDYiwsVP5R2JqFifZbq
A/m9YJypRwA+rUeBLFGuIh4QPFf2fZlskJYmFaDB3aplQGoSIzB1HCC0OAhJM5Ec
z6gm+bhqDfCaWz1HfmpvvQes1l/mUzYx5GfiX202W87CMKMQ+5WSg1IsCPFwYN2w
nZkGKYkh9D9TzIFMfi2b1G+O+BuUUyOAXvT8zcJ17GexRHHdc1Gq+1PgDPDL1Sug
rLHmo+dDTZhIV5D14wvxsNHTTr5tt0aaQw3fJqo6P2HE2dBiqadSYnlwS7BQ9Jxc
MlmFggFubM9/QGQ/hGQYmTp+LSlM5ndaVA80o7+SOQZ2aliuH0fQN3ST
-----END CERTIFICATE-----

View File

@ -1 +0,0 @@
PLAIN_9092_1://localhost MTLS_9094_1://localhost OAUTH_9093_1://localhost

View File

@ -1 +0,0 @@
PLAIN_9092_1://19092 MTLS_9094_1://19094 OAUTH_9093_1://19093

View File

@ -1 +0,0 @@
PLAIN_9092 MTLS_9094 OAUTH_9093

View File

@ -1,18 +0,0 @@
log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender
log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout
log4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} %p %m (%c) [%t]%n
kafka.root.logger.level=DEBUG
log4j.rootLogger=${kafka.root.logger.level}, CONSOLE
log4j.logger.org.I0Itec.zkclient.ZkClient=INFO
log4j.logger.org.apache.zookeeper=INFO
log4j.logger.kafka=INFO
log4j.logger.org.apache.kafka=INFO
log4j.logger.kafka.request.logger=WARN, CONSOLE
log4j.logger.kafka.network.Processor=OFF
log4j.logger.kafka.server.KafkaApis=OFF
log4j.logger.kafka.network.RequestChannel$=WARN
log4j.logger.kafka.controller=TRACE
log4j.logger.kafka.log.LogCleaner=INFO
log4j.logger.state.change.logger=TRACE
log4j.logger.kafka.authorizer.logger=INFO

View File

@ -1,101 +0,0 @@
##############################
##############################
# This file is automatically generated by the Strimzi Cluster Operator
# Any changes to this file will be ignored and overwritten!
##############################
##############################
##########
# Broker ID
##########
broker.id=${STRIMZI_BROKER_ID}
##########
# Zookeeper
##########
zookeeper.connect=zookeeper:2181
zookeeper.clientCnxnSocket=org.apache.zookeeper.ClientCnxnSocketNetty
zookeeper.ssl.client.enable=false
##########
# Kafka message logs configuration
##########
log.dirs=/var/lib/kafka/data/kafka-log${STRIMZI_BROKER_ID}
##########
# Control Plane listener
##########
listener.name.controlplane-9090.ssl.keystore.location=/tmp/kafka/cluster.keystore.p12
listener.name.controlplane-9090.ssl.keystore.password=${CERTS_STORE_PASSWORD}
listener.name.controlplane-9090.ssl.keystore.type=PKCS12
listener.name.controlplane-9090.ssl.truststore.location=/tmp/kafka/cluster.truststore.p12
listener.name.controlplane-9090.ssl.truststore.password=${CERTS_STORE_PASSWORD}
listener.name.controlplane-9090.ssl.truststore.type=PKCS12
listener.name.controlplane-9090.ssl.client.auth=required
##########
# Replication listener
##########
listener.name.replication-9091.ssl.keystore.location=/tmp/kafka/cluster.keystore.p12
listener.name.replication-9091.ssl.keystore.password=${CERTS_STORE_PASSWORD}
listener.name.replication-9091.ssl.keystore.type=PKCS12
listener.name.replication-9091.ssl.truststore.location=/tmp/kafka/cluster.truststore.p12
listener.name.replication-9091.ssl.truststore.password=${CERTS_STORE_PASSWORD}
listener.name.replication-9091.ssl.truststore.type=PKCS12
listener.name.replication-9091.ssl.client.auth=required
##########
# Listener configuration: MTLS-9094
##########
listener.name.mtls-9094.ssl.client.auth=required
listener.name.mtls-9094.ssl.truststore.location=/tmp/kafka/clients.truststore.p12
listener.name.mtls-9094.ssl.truststore.password=${CERTS_STORE_PASSWORD}
listener.name.mtls-9094.ssl.truststore.type=PKCS12
listener.name.mtls-9094.ssl.keystore.location=/tmp/kafka/cluster.keystore.p12
listener.name.mtls-9094.ssl.keystore.password=${CERTS_STORE_PASSWORD}
listener.name.mtls-9094.ssl.keystore.type=PKCS12
##########
# Listener configuration: OAUTH-9093
##########
listener.name.oauth-9093.oauthbearer.sasl.server.callback.handler.class=io.strimzi.kafka.oauth.server.JaasServerOauthValidatorCallbackHandler
listener.name.oauth-9093.oauthbearer.sasl.jaas.config=org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule required unsecuredLoginStringClaim_sub="admin" oauth.client.id="kafka" oauth.valid.issuer.uri="https://hydra:4443/" oauth.introspection.endpoint.uri="https://hydra:4444/oauth2/introspect" oauth.username.claim="sub" oauth.client.secret="dapr-test" oauth.ssl.truststore.location="/tmp/kafka/oauth-oauth-9093.truststore.p12" oauth.ssl.truststore.password="${CERTS_STORE_PASSWORD}" oauth.ssl.truststore.type="PKCS12";
listener.name.oauth-9093.sasl.enabled.mechanisms=OAUTHBEARER
listener.name.oauth-9093.connections.max.reauth.ms=1800000
listener.name.oauth-9093.ssl.keystore.location=/tmp/kafka/custom-oauth-9093.keystore.p12
listener.name.oauth-9093.ssl.keystore.password=${CERTS_STORE_PASSWORD}
listener.name.oauth-9093.ssl.keystore.type=PKCS12
principal.builder.class=io.strimzi.kafka.oauth.server.OAuthKafkaPrincipalBuilder
##########
# Common listener configuration
##########
listeners=CONTROLPLANE-9090://0.0.0.0:9090,REPLICATION-9091://0.0.0.0:9091,MTLS-9094://0.0.0.0:19094,OAUTH-9093://0.0.0.0:19093,PLAIN-9092://0.0.0.0:19092
advertised.listeners=CONTROLPLANE-9090://kafka-1:9090,REPLICATION-9091://kafka-1:9091,MTLS-9094://${STRIMZI_MTLS_9094_ADVERTISED_HOSTNAME}:${STRIMZI_MTLS_9094_ADVERTISED_PORT},OAUTH-9093://${STRIMZI_OAUTH_9093_ADVERTISED_HOSTNAME}:${STRIMZI_OAUTH_9093_ADVERTISED_PORT},PLAIN-9092://${STRIMZI_PLAIN_9092_ADVERTISED_HOSTNAME}:${STRIMZI_PLAIN_9092_ADVERTISED_PORT}
listener.security.protocol.map=CONTROLPLANE-9090:SSL,REPLICATION-9091:SSL,MTLS-9094:SSL,OAUTH-9093:SASL_SSL,PLAIN-9092:PLAINTEXT
inter.broker.listener.name=REPLICATION-9091
sasl.enabled.mechanisms=
ssl.secure.random.implementation=SHA1PRNG
ssl.endpoint.identification.algorithm=HTTPS
##########
# Authorization
##########
allow.everyone.if.no.acl.found=true
authorizer.class.name=kafka.security.authorizer.AclAuthorizer
super.users=User:CN=dapr,O=Dapr Test
##########
# User provided configuration
##########
num.partitions=10
auto.create.topics.enable=true
group.initial.rebalance.delay.ms=0
offsets.topic.replication.factor=3
inter.broker.protocol.version=3.0
log.message.format.version=3.0

View File

@ -1 +0,0 @@
PLAIN_9092_2://localhost MTLS_9094_2://localhost OAUTH_9093_2://localhost

View File

@ -1 +0,0 @@
PLAIN_9092_2://29092 MTLS_9094_2://29094 OAUTH_9093_2://29093

View File

@ -1 +0,0 @@
PLAIN_9092 MTLS_9094 OAUTH_9093

View File

@ -1,18 +0,0 @@
log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender
log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout
log4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} %p %m (%c) [%t]%n
kafka.root.logger.level=INFO
log4j.rootLogger=${kafka.root.logger.level}, CONSOLE
log4j.logger.org.I0Itec.zkclient.ZkClient=INFO
log4j.logger.org.apache.zookeeper=INFO
log4j.logger.kafka=INFO
log4j.logger.org.apache.kafka=INFO
log4j.logger.kafka.request.logger=WARN, CONSOLE
log4j.logger.kafka.network.Processor=OFF
log4j.logger.kafka.server.KafkaApis=OFF
log4j.logger.kafka.network.RequestChannel$=WARN
log4j.logger.kafka.controller=TRACE
log4j.logger.kafka.log.LogCleaner=INFO
log4j.logger.state.change.logger=TRACE
log4j.logger.kafka.authorizer.logger=INFO

View File

@ -1,101 +0,0 @@
##############################
##############################
# This file is automatically generated by the Strimzi Cluster Operator
# Any changes to this file will be ignored and overwritten!
##############################
##############################
##########
# Broker ID
##########
broker.id=${STRIMZI_BROKER_ID}
##########
# Zookeeper
##########
zookeeper.connect=zookeeper:2181
zookeeper.clientCnxnSocket=org.apache.zookeeper.ClientCnxnSocketNetty
zookeeper.ssl.client.enable=false
##########
# Kafka message logs configuration
##########
log.dirs=/var/lib/kafka/data/kafka-log${STRIMZI_BROKER_ID}
##########
# Control Plane listener
##########
listener.name.controlplane-9090.ssl.keystore.location=/tmp/kafka/cluster.keystore.p12
listener.name.controlplane-9090.ssl.keystore.password=${CERTS_STORE_PASSWORD}
listener.name.controlplane-9090.ssl.keystore.type=PKCS12
listener.name.controlplane-9090.ssl.truststore.location=/tmp/kafka/cluster.truststore.p12
listener.name.controlplane-9090.ssl.truststore.password=${CERTS_STORE_PASSWORD}
listener.name.controlplane-9090.ssl.truststore.type=PKCS12
listener.name.controlplane-9090.ssl.client.auth=required
##########
# Replication listener
##########
listener.name.replication-9091.ssl.keystore.location=/tmp/kafka/cluster.keystore.p12
listener.name.replication-9091.ssl.keystore.password=${CERTS_STORE_PASSWORD}
listener.name.replication-9091.ssl.keystore.type=PKCS12
listener.name.replication-9091.ssl.truststore.location=/tmp/kafka/cluster.truststore.p12
listener.name.replication-9091.ssl.truststore.password=${CERTS_STORE_PASSWORD}
listener.name.replication-9091.ssl.truststore.type=PKCS12
listener.name.replication-9091.ssl.client.auth=required
##########
# Listener configuration: MTLS-9094
##########
listener.name.mtls-9094.ssl.client.auth=required
listener.name.mtls-9094.ssl.truststore.location=/tmp/kafka/clients.truststore.p12
listener.name.mtls-9094.ssl.truststore.password=${CERTS_STORE_PASSWORD}
listener.name.mtls-9094.ssl.truststore.type=PKCS12
listener.name.mtls-9094.ssl.keystore.location=/tmp/kafka/cluster.keystore.p12
listener.name.mtls-9094.ssl.keystore.password=${CERTS_STORE_PASSWORD}
listener.name.mtls-9094.ssl.keystore.type=PKCS12
##########
# Listener configuration: OAUTH-9093
##########
listener.name.oauth-9093.oauthbearer.sasl.server.callback.handler.class=io.strimzi.kafka.oauth.server.JaasServerOauthValidatorCallbackHandler
listener.name.oauth-9093.oauthbearer.sasl.jaas.config=org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule required unsecuredLoginStringClaim_sub="admin" oauth.client.id="kafka" oauth.valid.issuer.uri="https://hydra:4443/" oauth.introspection.endpoint.uri="https://hydra:4444/oauth2/introspect" oauth.username.claim="sub" oauth.client.secret="dapr-test" oauth.ssl.truststore.location="/tmp/kafka/oauth-oauth-9093.truststore.p12" oauth.ssl.truststore.password="${CERTS_STORE_PASSWORD}" oauth.ssl.truststore.type="PKCS12";
listener.name.oauth-9093.sasl.enabled.mechanisms=OAUTHBEARER
listener.name.oauth-9093.connections.max.reauth.ms=1800000
listener.name.oauth-9093.ssl.keystore.location=/tmp/kafka/custom-oauth-9093.keystore.p12
listener.name.oauth-9093.ssl.keystore.password=${CERTS_STORE_PASSWORD}
listener.name.oauth-9093.ssl.keystore.type=PKCS12
principal.builder.class=io.strimzi.kafka.oauth.server.OAuthKafkaPrincipalBuilder
##########
# Common listener configuration
##########
listeners=CONTROLPLANE-9090://0.0.0.0:9090,REPLICATION-9091://0.0.0.0:9091,MTLS-9094://0.0.0.0:29094,OAUTH-9093://0.0.0.0:29093,PLAIN-9092://0.0.0.0:29092
advertised.listeners=CONTROLPLANE-9090://kafka-2:9090,REPLICATION-9091://kafka-2:9091,MTLS-9094://${STRIMZI_MTLS_9094_ADVERTISED_HOSTNAME}:${STRIMZI_MTLS_9094_ADVERTISED_PORT},OAUTH-9093://${STRIMZI_OAUTH_9093_ADVERTISED_HOSTNAME}:${STRIMZI_OAUTH_9093_ADVERTISED_PORT},PLAIN-9092://${STRIMZI_PLAIN_9092_ADVERTISED_HOSTNAME}:${STRIMZI_PLAIN_9092_ADVERTISED_PORT}
listener.security.protocol.map=CONTROLPLANE-9090:SSL,REPLICATION-9091:SSL,MTLS-9094:SSL,OAUTH-9093:SASL_SSL,PLAIN-9092:PLAINTEXT
inter.broker.listener.name=REPLICATION-9091
sasl.enabled.mechanisms=
ssl.secure.random.implementation=SHA1PRNG
ssl.endpoint.identification.algorithm=HTTPS
##########
# Authorization
##########
allow.everyone.if.no.acl.found=true
authorizer.class.name=kafka.security.authorizer.AclAuthorizer
super.users=User:CN=dapr,O=Dapr Test
##########
# User provided configuration
##########
num.partitions=10
auto.create.topics.enable=true
group.initial.rebalance.delay.ms=0
offsets.topic.replication.factor=3
inter.broker.protocol.version=3.0
log.message.format.version=3.0

View File

@ -1 +0,0 @@
PLAIN_9092_3://localhost MTLS_9094_3://localhost OAUTH_9093_3://localhost

View File

@ -1 +0,0 @@
PLAIN_9092_3://39092 MTLS_9094_3://39094 OAUTH_9093_3://39093

View File

@ -1 +0,0 @@
PLAIN_9092 MTLS_9094 OAUTH_9093

View File

@ -1,18 +0,0 @@
log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender
log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout
log4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} %p %m (%c) [%t]%n
kafka.root.logger.level=INFO
log4j.rootLogger=${kafka.root.logger.level}, CONSOLE
log4j.logger.org.I0Itec.zkclient.ZkClient=INFO
log4j.logger.org.apache.zookeeper=INFO
log4j.logger.kafka=INFO
log4j.logger.org.apache.kafka=INFO
log4j.logger.kafka.request.logger=WARN, CONSOLE
log4j.logger.kafka.network.Processor=OFF
log4j.logger.kafka.server.KafkaApis=OFF
log4j.logger.kafka.network.RequestChannel$=WARN
log4j.logger.kafka.controller=TRACE
log4j.logger.kafka.log.LogCleaner=INFO
log4j.logger.state.change.logger=TRACE
log4j.logger.kafka.authorizer.logger=INFO

View File

@ -1,101 +0,0 @@
##############################
##############################
# This file is automatically generated by the Strimzi Cluster Operator
# Any changes to this file will be ignored and overwritten!
##############################
##############################
##########
# Broker ID
##########
broker.id=${STRIMZI_BROKER_ID}
##########
# Zookeeper
##########
zookeeper.connect=zookeeper:2181
zookeeper.clientCnxnSocket=org.apache.zookeeper.ClientCnxnSocketNetty
zookeeper.ssl.client.enable=false
##########
# Kafka message logs configuration
##########
log.dirs=/var/lib/kafka/data/kafka-log${STRIMZI_BROKER_ID}
##########
# Control Plane listener
##########
listener.name.controlplane-9090.ssl.keystore.location=/tmp/kafka/cluster.keystore.p12
listener.name.controlplane-9090.ssl.keystore.password=${CERTS_STORE_PASSWORD}
listener.name.controlplane-9090.ssl.keystore.type=PKCS12
listener.name.controlplane-9090.ssl.truststore.location=/tmp/kafka/cluster.truststore.p12
listener.name.controlplane-9090.ssl.truststore.password=${CERTS_STORE_PASSWORD}
listener.name.controlplane-9090.ssl.truststore.type=PKCS12
listener.name.controlplane-9090.ssl.client.auth=required
##########
# Replication listener
##########
listener.name.replication-9091.ssl.keystore.location=/tmp/kafka/cluster.keystore.p12
listener.name.replication-9091.ssl.keystore.password=${CERTS_STORE_PASSWORD}
listener.name.replication-9091.ssl.keystore.type=PKCS12
listener.name.replication-9091.ssl.truststore.location=/tmp/kafka/cluster.truststore.p12
listener.name.replication-9091.ssl.truststore.password=${CERTS_STORE_PASSWORD}
listener.name.replication-9091.ssl.truststore.type=PKCS12
listener.name.replication-9091.ssl.client.auth=required
##########
# Listener configuration: MTLS-9094
##########
listener.name.mtls-9094.ssl.client.auth=required
listener.name.mtls-9094.ssl.truststore.location=/tmp/kafka/clients.truststore.p12
listener.name.mtls-9094.ssl.truststore.password=${CERTS_STORE_PASSWORD}
listener.name.mtls-9094.ssl.truststore.type=PKCS12
listener.name.mtls-9094.ssl.keystore.location=/tmp/kafka/cluster.keystore.p12
listener.name.mtls-9094.ssl.keystore.password=${CERTS_STORE_PASSWORD}
listener.name.mtls-9094.ssl.keystore.type=PKCS12
##########
# Listener configuration: OAUTH-9093
##########
listener.name.oauth-9093.oauthbearer.sasl.server.callback.handler.class=io.strimzi.kafka.oauth.server.JaasServerOauthValidatorCallbackHandler
listener.name.oauth-9093.oauthbearer.sasl.jaas.config=org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule required unsecuredLoginStringClaim_sub="admin" oauth.client.id="kafka" oauth.valid.issuer.uri="https://hydra:4443/" oauth.introspection.endpoint.uri="https://hydra:4444/oauth2/introspect" oauth.username.claim="sub" oauth.client.secret="dapr-test" oauth.ssl.truststore.location="/tmp/kafka/oauth-oauth-9093.truststore.p12" oauth.ssl.truststore.password="${CERTS_STORE_PASSWORD}" oauth.ssl.truststore.type="PKCS12";
listener.name.oauth-9093.sasl.enabled.mechanisms=OAUTHBEARER
listener.name.oauth-9093.connections.max.reauth.ms=1800000
listener.name.oauth-9093.ssl.keystore.location=/tmp/kafka/custom-oauth-9093.keystore.p12
listener.name.oauth-9093.ssl.keystore.password=${CERTS_STORE_PASSWORD}
listener.name.oauth-9093.ssl.keystore.type=PKCS12
principal.builder.class=io.strimzi.kafka.oauth.server.OAuthKafkaPrincipalBuilder
##########
# Common listener configuration
##########
listeners=CONTROLPLANE-9090://0.0.0.0:9090,REPLICATION-9091://0.0.0.0:9091,MTLS-9094://0.0.0.0:39094,OAUTH-9093://0.0.0.0:39093,PLAIN-9092://0.0.0.0:39092
advertised.listeners=CONTROLPLANE-9090://kafka-3:9090,REPLICATION-9091://kafka-3:9091,MTLS-9094://${STRIMZI_MTLS_9094_ADVERTISED_HOSTNAME}:${STRIMZI_MTLS_9094_ADVERTISED_PORT},OAUTH-9093://${STRIMZI_OAUTH_9093_ADVERTISED_HOSTNAME}:${STRIMZI_OAUTH_9093_ADVERTISED_PORT},PLAIN-9092://${STRIMZI_PLAIN_9092_ADVERTISED_HOSTNAME}:${STRIMZI_PLAIN_9092_ADVERTISED_PORT}
listener.security.protocol.map=CONTROLPLANE-9090:SSL,REPLICATION-9091:SSL,MTLS-9094:SSL,OAUTH-9093:SASL_SSL,PLAIN-9092:PLAINTEXT
inter.broker.listener.name=REPLICATION-9091
sasl.enabled.mechanisms=
ssl.secure.random.implementation=SHA1PRNG
ssl.endpoint.identification.algorithm=HTTPS
##########
# Authorization
##########
allow.everyone.if.no.acl.found=true
authorizer.class.name=kafka.security.authorizer.AclAuthorizer
super.users=User:CN=dapr,O=Dapr Test
##########
# User provided configuration
##########
num.partitions=10
auto.create.topics.enable=true
group.initial.rebalance.delay.ms=0
offsets.topic.replication.factor=3
inter.broker.protocol.version=3.0
log.message.format.version=3.0

View File

@ -1,19 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIDJjCCAg6gAwIBAgIUJPqvjfNx6kMf7mE5FtW81+X8HekwDQYJKoZIhvcNAQEL
BQAwKzESMBAGA1UECxMJRGFwciBUZXN0MRUwEwYDVQQDEwxEYXByIFRlc3QgQ0Ew
HhcNMjExMjA0MTYyNzAwWhcNMjYxMjAzMTYyNzAwWjArMRIwEAYDVQQLEwlEYXBy
IFRlc3QxFTATBgNVBAMTDERhcHIgVGVzdCBDQTCCASIwDQYJKoZIhvcNAQEBBQAD
ggEPADCCAQoCggEBAMPLpsfCUdYf+7RAY7mktcj4/qJJyNroHxS8ChwSeJ0M/dLk
I6G4kyty3TGvzmrdxkr2DW2B+ZmrZFzSVQg+kNESMhEWLJt4MtyGMNuDZcwV5kJL
NPltLYmov2z8hyD2v6agZNyiWM0k2p/dl+Ikp4DJmd08PSd+nhc5Wj9X33gsEAoK
jKptl+XGGvSlC3tIbHmBhRsP42QlLjqk5PWxINbMDePHOiYFmau3VRrbPweKTFuF
bY0Y0w8t1qOFX55hU7LkMEXjLmuUfFUEZvn3NUTvH80gKDioiJTC7NBRE6sCYAlm
b4Vvix3p9Y/yNKbMA5J3chaZdTZfVqAXplZY3jMCAwEAAaNCMEAwDgYDVR0PAQH/
BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFH6X+clU0D49SZ9ezWRg
lsF83glvMA0GCSqGSIb3DQEBCwUAA4IBAQAjeaFNxIhWZkDYiwsVP5R2JqFifZbq
A/m9YJypRwA+rUeBLFGuIh4QPFf2fZlskJYmFaDB3aplQGoSIzB1HCC0OAhJM5Ec
z6gm+bhqDfCaWz1HfmpvvQes1l/mUzYx5GfiX202W87CMKMQ+5WSg1IsCPFwYN2w
nZkGKYkh9D9TzIFMfi2b1G+O+BuUUyOAXvT8zcJ17GexRHHdc1Gq+1PgDPDL1Sug
rLHmo+dDTZhIV5D14wvxsNHTTr5tt0aaQw3fJqo6P2HE2dBiqadSYnlwS7BQ9Jxc
MlmFggFubM9/QGQ/hGQYmTp+LSlM5ndaVA80o7+SOQZ2aliuH0fQN3ST
-----END CERTIFICATE-----

View File

@ -1,19 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIDJjCCAg6gAwIBAgIUJPqvjfNx6kMf7mE5FtW81+X8HekwDQYJKoZIhvcNAQEL
BQAwKzESMBAGA1UECxMJRGFwciBUZXN0MRUwEwYDVQQDEwxEYXByIFRlc3QgQ0Ew
HhcNMjExMjA0MTYyNzAwWhcNMjYxMjAzMTYyNzAwWjArMRIwEAYDVQQLEwlEYXBy
IFRlc3QxFTATBgNVBAMTDERhcHIgVGVzdCBDQTCCASIwDQYJKoZIhvcNAQEBBQAD
ggEPADCCAQoCggEBAMPLpsfCUdYf+7RAY7mktcj4/qJJyNroHxS8ChwSeJ0M/dLk
I6G4kyty3TGvzmrdxkr2DW2B+ZmrZFzSVQg+kNESMhEWLJt4MtyGMNuDZcwV5kJL
NPltLYmov2z8hyD2v6agZNyiWM0k2p/dl+Ikp4DJmd08PSd+nhc5Wj9X33gsEAoK
jKptl+XGGvSlC3tIbHmBhRsP42QlLjqk5PWxINbMDePHOiYFmau3VRrbPweKTFuF
bY0Y0w8t1qOFX55hU7LkMEXjLmuUfFUEZvn3NUTvH80gKDioiJTC7NBRE6sCYAlm
b4Vvix3p9Y/yNKbMA5J3chaZdTZfVqAXplZY3jMCAwEAAaNCMEAwDgYDVR0PAQH/
BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFH6X+clU0D49SZ9ezWRg
lsF83glvMA0GCSqGSIb3DQEBCwUAA4IBAQAjeaFNxIhWZkDYiwsVP5R2JqFifZbq
A/m9YJypRwA+rUeBLFGuIh4QPFf2fZlskJYmFaDB3aplQGoSIzB1HCC0OAhJM5Ec
z6gm+bhqDfCaWz1HfmpvvQes1l/mUzYx5GfiX202W87CMKMQ+5WSg1IsCPFwYN2w
nZkGKYkh9D9TzIFMfi2b1G+O+BuUUyOAXvT8zcJ17GexRHHdc1Gq+1PgDPDL1Sug
rLHmo+dDTZhIV5D14wvxsNHTTr5tt0aaQw3fJqo6P2HE2dBiqadSYnlwS7BQ9Jxc
MlmFggFubM9/QGQ/hGQYmTp+LSlM5ndaVA80o7+SOQZ2aliuH0fQN3ST
-----END CERTIFICATE-----

View File

@ -1,22 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIDmzCCAoOgAwIBAgIUbM8Fssal+HxhavPplrJ1o4Fk/6kwDQYJKoZIhvcNAQEL
BQAwKzESMBAGA1UECxMJRGFwciBUZXN0MRUwEwYDVQQDEwxEYXByIFRlc3QgQ0Ew
HhcNMjExMjA0MTYzMjAwWhcNMjIxMjA0MTYzMjAwWjAjMRIwEAYDVQQKEwlEYXBy
IFRlc3QxDTALBgNVBAMTBGRhcHIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
AoIBAQC7jpeZVmiNi1I91j6Z7Z5W8z3MCuquNjConG2NjxyT7klQYFMAMlQ0j5v9
x5hUQ6ks4JTmCBaI/gPtjDJypCPwQKtr9QIECWjM1tSSOs/lu5p9Fqd30klcivF9
fEpuyui6KpRVobGdg8bZ27Mh4yee1fI1DhAj5ME6Ti3sLmA5uxRYLLPollNICgUs
QME2iJrm30rUmSqbKpB721ULcB7kLTn3PPqMDU3qmXLTTlioN3+hXuC0aSS5c/6f
IwHQ/l2bLApCF9rLc+bkSFBBMOEZD/iomaE7JolHGUt7vEhObSxgnJ6ZH0C+k0Y/
RLdG9cmmrdIP6SHy8UYX4O0UsHxPAgMBAAGjgb4wgbswDgYDVR0PAQH/BAQDAgWg
MBMGA1UdJQQMMAoGCCsGAQUFBwMBMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFLpv
SEQ21Za3JzU4rosvyiFHfM5VMB8GA1UdIwQYMBaAFH6X+clU0D49SZ9ezWRglsF8
3glvMEYGA1UdEQQ/MD2CBGRhcHKCCWxvY2FsaG9zdIIHa2Fma2EtMYIHa2Fma2Et
MoIHa2Fma2EtM4IPa2FmYWstYm9vdHN0cmFwMA0GCSqGSIb3DQEBCwUAA4IBAQC1
pNBOCYLN0CA3VZO+Pz+b7XnAUAZFisgH2/eBQBPOU9b4ImCLIASBqAMEJ4uq//MW
IiF9iu1YcncXPP/8rPnEsxKLBVcjWH48PneBcW3qb/9he+xXCnB7fqMUDkggzTeH
4ouZyY5/GnlQYgmFNgOIyI4pydoD8GkJQh88LzEn/YAKi0aVkwBMJ2eb2Fiz05WW
TKqWZKNnOjLPz5fIYNCR+uZtuqADhqItyaBa+X9NVIQ9cPcPMohZS4o+DtrCQevf
6QZuQEYh3IIY8Smi4+2OKUE0Gy2AnEKaEdwxbrCKYhuF/sUrLm76LmIH75HQJxyM
zE20cNgzX3yurenT3tbN
-----END CERTIFICATE-----

View File

@ -1,27 +0,0 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEpQIBAAKCAQEAu46XmVZojYtSPdY+me2eVvM9zArqrjYwqJxtjY8ck+5JUGBT
ADJUNI+b/ceYVEOpLOCU5ggWiP4D7YwycqQj8ECra/UCBAlozNbUkjrP5buafRan
d9JJXIrxfXxKbsrouiqUVaGxnYPG2duzIeMnntXyNQ4QI+TBOk4t7C5gObsUWCyz
6JZTSAoFLEDBNoia5t9K1JkqmyqQe9tVC3Ae5C059zz6jA1N6ply005YqDd/oV7g
tGkkuXP+nyMB0P5dmywKQhfay3Pm5EhQQTDhGQ/4qJmhOyaJRxlLe7xITm0sYJye
mR9AvpNGP0S3RvXJpq3SD+kh8vFGF+DtFLB8TwIDAQABAoIBAEchmh8eZUKhPwyS
r2VDeBSz5ZD35u8xQB0CTo4sY4M7EPT5wyDE9aiFenyx8PSsQIHznqTrFljYNXcm
/47472RTsm+cGSqcwvFE3JOk2GnhT4L3T4Yi6428aD/WHoiMTd0k/uLHEwyRCJ5h
Mzu74a/cpiI29ioWvK23LrVvFTFvOro3UgJvyK4HUS/bg5gnjqMnh87eIrLhbpNI
0zuoRKcAVIeQWOQ2CvfRAMmijrlp+VLovIqjn/xspvFwCYPMA2ocfjxOC/n6F7/4
jc8+/Q46xYO+3+1svU2cH0ptyxibk24Iqr+yTtMAx7gs4t7hIOYtRMAw4qCMxHyW
/hpc3OECgYEA0hd2q2gnadEOIAcKlWrAV772yXyqHKybezbvZHW/U8jOULLZGnpS
sddCxHE6x8nxmf3YnO3QTYYLTeY9XdlR9Z5Xydu0HzZeGJIxd3wSGZ2hTz4WgbVn
86JpikQBISW2/6T3MKFDsxhbLmivBrVdjVV1/TRM+UG5YL3bb0/wyz8CgYEA5IqK
AoJ+zaMGkt6HD4GM7XxSuhICbCITrV5Obkxh17tLguYuAME/WdGbIISrcS/B41KR
YkJWHMuvGxjW3GF/chcith4k8VDb51Pov1TqvelVDywSOIROUfht6NhtPYajaIGj
GAC5oYOoQpfH7m5ubmbYh1ueb+POfO3hKtIzWvECgYEAkUTwJW2Lczu+zJ6RzudV
wFanRoMRDWq8x+IgfhJ9DW4YWuyP+iMC8z2pSTQSNPuKN7SzBy/ZjQFW57KAVFhk
t7WZdlaYocxyHANaeQgta9D3LVf9MAtDqc9vss97CHSPqQ1kbxfTPA9nXRu9iqH1
4jhpsX9sih3MFPyysrFQCvkCgYEAgTjUUBb5G8zSKrkoJNxbkux42jzUoc+i0KRC
NJt7tz9vstPzrvmVmHOsAvcA+T7HooFNMwHPLvj8SZYB5xo5tYjfV5ozyT6vGF2Z
fJXHJRqJvcptgwdMQYz2mHHHUsKOIskqLqg6TdjjisPHiElop4P/aomjTCDC4GCg
sFWqNAECgYEAzOQT86+Rz9NdVfDo/C/IqsMK9fmwMjGXFaBnVBuKMv1VBrh4Zh3g
E8QrdKwf/pxR/G91O2dBXmqhUKFX+cEgtEvqhdCyVdR4jQhAKE4hsDd2q4psqbB2
BUqaBzo3GawDeKcwCqSPLu7tBKFJCEWjkQZhIVB1lZ8d30i2LSVv2NM=
-----END RSA PRIVATE KEY-----

View File

@ -0,0 +1,19 @@
apiVersion: dapr.io/v1alpha1
kind: Component
metadata:
name: messagebus
spec:
type: pubsub.kafka
version: v1
metadata:
- name: brokers
value: localhost:19092,localhost:29092,localhost:39092
- name: consumerGroup
value: kafkaCertification2
- name: authType
value: "none"
- name: initialOffset
value: oldest
- name: backOffDuration
value: 50ms

View File

@ -1,92 +0,0 @@
apiVersion: dapr.io/v1alpha1
kind: Component
metadata:
name: messagebus
spec:
type: pubsub.kafka
version: v1
metadata:
- name: brokers
value: localhost:19094,localhost:29094,localhost:39094
- name: consumerGroup
value: kafkaCertification2
- name: initialOffset
value: oldest
- name: backOffDuration
value: 50ms
- name: authType
value: mtls
- name: caCert
value: |
-----BEGIN CERTIFICATE-----
MIIDJjCCAg6gAwIBAgIUJPqvjfNx6kMf7mE5FtW81+X8HekwDQYJKoZIhvcNAQEL
BQAwKzESMBAGA1UECxMJRGFwciBUZXN0MRUwEwYDVQQDEwxEYXByIFRlc3QgQ0Ew
HhcNMjExMjA0MTYyNzAwWhcNMjYxMjAzMTYyNzAwWjArMRIwEAYDVQQLEwlEYXBy
IFRlc3QxFTATBgNVBAMTDERhcHIgVGVzdCBDQTCCASIwDQYJKoZIhvcNAQEBBQAD
ggEPADCCAQoCggEBAMPLpsfCUdYf+7RAY7mktcj4/qJJyNroHxS8ChwSeJ0M/dLk
I6G4kyty3TGvzmrdxkr2DW2B+ZmrZFzSVQg+kNESMhEWLJt4MtyGMNuDZcwV5kJL
NPltLYmov2z8hyD2v6agZNyiWM0k2p/dl+Ikp4DJmd08PSd+nhc5Wj9X33gsEAoK
jKptl+XGGvSlC3tIbHmBhRsP42QlLjqk5PWxINbMDePHOiYFmau3VRrbPweKTFuF
bY0Y0w8t1qOFX55hU7LkMEXjLmuUfFUEZvn3NUTvH80gKDioiJTC7NBRE6sCYAlm
b4Vvix3p9Y/yNKbMA5J3chaZdTZfVqAXplZY3jMCAwEAAaNCMEAwDgYDVR0PAQH/
BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFH6X+clU0D49SZ9ezWRg
lsF83glvMA0GCSqGSIb3DQEBCwUAA4IBAQAjeaFNxIhWZkDYiwsVP5R2JqFifZbq
A/m9YJypRwA+rUeBLFGuIh4QPFf2fZlskJYmFaDB3aplQGoSIzB1HCC0OAhJM5Ec
z6gm+bhqDfCaWz1HfmpvvQes1l/mUzYx5GfiX202W87CMKMQ+5WSg1IsCPFwYN2w
nZkGKYkh9D9TzIFMfi2b1G+O+BuUUyOAXvT8zcJ17GexRHHdc1Gq+1PgDPDL1Sug
rLHmo+dDTZhIV5D14wvxsNHTTr5tt0aaQw3fJqo6P2HE2dBiqadSYnlwS7BQ9Jxc
MlmFggFubM9/QGQ/hGQYmTp+LSlM5ndaVA80o7+SOQZ2aliuH0fQN3ST
-----END CERTIFICATE-----
- name: clientCert
value: |
-----BEGIN CERTIFICATE-----
MIIDpTCCAo2gAwIBAgIUTAjabskCLxIqbh2E4MnYIsivipswDQYJKoZIhvcNAQEL
BQAwKzESMBAGA1UECxMJRGFwciBUZXN0MRUwEwYDVQQDEwxEYXByIFRlc3QgQ0Ew
HhcNMjExMjA0MTkwMjAwWhcNMjIxMjA0MTkwMjAwWjAjMRIwEAYDVQQKEwlEYXBy
IFRlc3QxDTALBgNVBAMTBGRhcHIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
AoIBAQC5rlhpdzY2RuRRRKevotZnLUx/dh2wLvCMluSxKFJYvC7DXK3cHZh1+6Wo
cdlsEYY3ZQ7Pt/N8DkV7ODqSvFyhJu+1fCY3elMfZcxSw24UJ2aXzlx5RbNhLAI0
E804ugAp3qss4ygCwQ4U2jMGXqeVpi7gyGsYybEUOMSorx5OBgiJAKkaATNMBqdp
MX2FKzBU3owpAcuXhIGSdKblYQuZJmAfITnaJFO4ffLyn9m4I9n/dDfZag/TCZBL
27uIo79mZO99YfhMfdrifH3FkvE/14/JUPhwHAChoCbDol0/V/KDv0tp3vQbQH+7
1dyrAWhszswSXQGgADYm8y74dlQpAgMBAAGjgcgwgcUwDgYDVR0PAQH/BAQDAgWg
MB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAMBgNVHRMBAf8EAjAAMB0G
A1UdDgQWBBQ4eToXZz4AH4YbuW23vy99T8d8OTAfBgNVHSMEGDAWgBR+l/nJVNA+
PUmfXs1kYJbBfN4JbzBGBgNVHREEPzA9ggRkYXBygglsb2NhbGhvc3SCB2thZmth
LTGCB2thZmthLTKCB2thZmthLTOCD2thZmFrLWJvb3RzdHJhcDANBgkqhkiG9w0B
AQsFAAOCAQEAAapIJIdQhGF2qz/N4i/nIwJHGxUapgtVrydC8kw7DeuQi2usG62Y
hGNnBAoJCR0auSQ2P3SWEO19o1doZjFroqFkNIXdTT+aHxLg0k89H203oeMSI43x
xTlmJCjBNw4zQD9jC1c6u/W6WBwN2SJGBZrdmA95KQrz+gan9nh6ecPYeGF89io2
G20dRE2cGwbt7LAImK87M8LXbw/Of28gYMh3L14CNy6oma3izMix9xhUhDVACnVy
TaltjNIiAlFP2g4GIsPSYTMAOeIzIU/LxKlxg8mLg1bTPwb5IZK1wFwPBY5rnNqx
OrycW7rZKfrg2eZml8FnYlzO64u41oC47A==
-----END CERTIFICATE-----
- name: clientKey
value: |
-----BEGIN RSA PRIVATE KEY-----
MIIEogIBAAKCAQEAua5YaXc2NkbkUUSnr6LWZy1Mf3YdsC7wjJbksShSWLwuw1yt
3B2YdfulqHHZbBGGN2UOz7fzfA5Fezg6krxcoSbvtXwmN3pTH2XMUsNuFCdml85c
eUWzYSwCNBPNOLoAKd6rLOMoAsEOFNozBl6nlaYu4MhrGMmxFDjEqK8eTgYIiQCp
GgEzTAanaTF9hSswVN6MKQHLl4SBknSm5WELmSZgHyE52iRTuH3y8p/ZuCPZ/3Q3
2WoP0wmQS9u7iKO/ZmTvfWH4TH3a4nx9xZLxP9ePyVD4cBwAoaAmw6JdP1fyg79L
ad70G0B/u9XcqwFobM7MEl0BoAA2JvMu+HZUKQIDAQABAoIBACZz2JNewLdUzwuV
cDSLQGN1mhX7XAKUdfRne0zE0OjXb8e9dbPT3TLxvki36xLaPjVSlFKoAaB7RCBU
cKzanUQyUAoBf9iVWIl0B3BMUIuT7Uca0UO8D33cI0itoR5SRp5lIoXVNP/9AvGG
jnKPP51aIPMkDim/+w/5AaD9QwVdGC2BWNn8bFykz/DfIB0tiVTec8/pWaP7vHGM
FriQbL07Yrj3BE0ndp5cL52ZbH9OmQ/hXUHCk6vCuV/yrqljeLPGbEYkpmhm/fMO
Fa3pX6wR+QgZ5lta870jK52bexyoGWgsMcTTl8+7q4DYM2YREEKicAlbOh92bdm4
tnjIiVECgYEA1btWqCtxWat5tzXeYAowYs/uia/ANbmg+SGqIeVqGn4EyLIBYnmZ
jexfWliLj7Nk802fbNIO9sStMt6q7vvRbYR2ZHFPU0Th9m/XVPdJKJ9qpMkSWdY3
P7VlQuYHSZvU1ny/QtDc8dGoaxluiaJsIBde0UUcwOo/tA66OnP2n7cCgYEA3mbf
hz6W+ThofDPyJN5kFTnx4g+uNA8hnqyJeh9xcnh1t/A5BH4faZBPhokoskahUWis
yI4v6e552CHkF9jo6k397xUb/W/HO0BlKhapf8prdrG4zSE5pr140eTdr10h95SD
Wr4twfEaBNsSXRnaMxAMaVbPKfLuW0+N1Qbk6x8CgYA8EZnKS+Ngk0vzDOXB0jtF
GjFtawK3VsOCIU8ClcqbRX2stjKjbY+VjrBB4Q7gRUgDBXbgC61+90nCOUiLQCTd
BdSMaDgmK/7h1w8K5zEdhKhhRc2tiAIhGqcqBSJZMr2/xnGuoqrmH8mYyB4D+q0u
28KfSDBLm8ppnZYDZaITwwKBgDv76xYDH50gRa4aJJklEkFXW5HpQMbxvdOaHYo+
qM6DBt0RgY9gpQBH1+slW0CaJDBc1x1QnEOv+lT87xQvgMKRPogZXW9Bkq68c4yi
iBzbb5iX3owVBgOe3tNdsxz1NZAdEkCLQrQoXygoHg/WRS+4iGBw9XcO+pLOJibq
sRtpAoGARUL0cfedOtIgGOQTNzfHqQZsRbLEKx64FI6Q8g1womr7lWWXy6RX4BZv
vm41g/PkdiES9ZfaNihRHcEhaNuA26OhiCbXe/FRcyZRX9TeCkuyQgNn9nssPIgR
edWdnN8kZKQ7ReZwMlw2UpXenAwlVoQQbHw9zpkcD2Exmp/TLAk=
-----END RSA PRIVATE KEY-----

View File

@ -1,47 +0,0 @@
apiVersion: dapr.io/v1alpha1
kind: Component
metadata:
name: messagebus
spec:
type: pubsub.kafka
version: v1
metadata:
- name: brokers
value: localhost:19093,localhost:29093,localhost:39093
- name: consumerGroup
value: kafkaCertification2
- name: authType
value: "oidc"
- name: initialOffset
value: oldest
- name: backOffDuration
value: 50ms
- name: oidcTokenEndpoint
value: https://localhost:4443/oauth2/token
- name: oidcClientID
value: "dapr"
- name: oidcClientSecret
value: "dapr-test"
- name: oidcScopes
value: openid,kafka
- name: caCert
value: |
-----BEGIN CERTIFICATE-----
MIIDJjCCAg6gAwIBAgIUJPqvjfNx6kMf7mE5FtW81+X8HekwDQYJKoZIhvcNAQEL
BQAwKzESMBAGA1UECxMJRGFwciBUZXN0MRUwEwYDVQQDEwxEYXByIFRlc3QgQ0Ew
HhcNMjExMjA0MTYyNzAwWhcNMjYxMjAzMTYyNzAwWjArMRIwEAYDVQQLEwlEYXBy
IFRlc3QxFTATBgNVBAMTDERhcHIgVGVzdCBDQTCCASIwDQYJKoZIhvcNAQEBBQAD
ggEPADCCAQoCggEBAMPLpsfCUdYf+7RAY7mktcj4/qJJyNroHxS8ChwSeJ0M/dLk
I6G4kyty3TGvzmrdxkr2DW2B+ZmrZFzSVQg+kNESMhEWLJt4MtyGMNuDZcwV5kJL
NPltLYmov2z8hyD2v6agZNyiWM0k2p/dl+Ikp4DJmd08PSd+nhc5Wj9X33gsEAoK
jKptl+XGGvSlC3tIbHmBhRsP42QlLjqk5PWxINbMDePHOiYFmau3VRrbPweKTFuF
bY0Y0w8t1qOFX55hU7LkMEXjLmuUfFUEZvn3NUTvH80gKDioiJTC7NBRE6sCYAlm
b4Vvix3p9Y/yNKbMA5J3chaZdTZfVqAXplZY3jMCAwEAAaNCMEAwDgYDVR0PAQH/
BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFH6X+clU0D49SZ9ezWRg
lsF83glvMA0GCSqGSIb3DQEBCwUAA4IBAQAjeaFNxIhWZkDYiwsVP5R2JqFifZbq
A/m9YJypRwA+rUeBLFGuIh4QPFf2fZlskJYmFaDB3aplQGoSIzB1HCC0OAhJM5Ec
z6gm+bhqDfCaWz1HfmpvvQes1l/mUzYx5GfiX202W87CMKMQ+5WSg1IsCPFwYN2w
nZkGKYkh9D9TzIFMfi2b1G+O+BuUUyOAXvT8zcJ17GexRHHdc1Gq+1PgDPDL1Sug
rLHmo+dDTZhIV5D14wvxsNHTTr5tt0aaQw3fJqo6P2HE2dBiqadSYnlwS7BQ9Jxc
MlmFggFubM9/QGQ/hGQYmTp+LSlM5ndaVA80o7+SOQZ2aliuH0fQN3ST
-----END CERTIFICATE-----

View File

@ -1,191 +1,68 @@
version: "3.7"
services:
zookeeper:
image: confluentinc/cp-zookeeper:5.4.0
hostname: zookeeper
container_name: zookeeper
ports:
- "2181:2181"
environment:
ZOOKEEPER_CLIENT_PORT: 2181
ZOOKEEPER_TICK_TIME: 2000
kafka1:
image: quay.io/strimzi/kafka:0.26.0-kafka-3.0.0
hostname: kafka-1
container_name: kafka-1
read_only: false
entrypoint:
/bin/bash -c "mkdir -p /var/opt/kafka && chown -R kafka:0 /var/lib/kafka/data /var/opt/kafka && su kafka -p -c '/opt/kafka/kafka_run.sh'"
user: root
depends_on:
- zookeeper
ports:
- "19094:19094"
- "19093:19093"
- "19092:19092"
volumes:
- type: bind
source: ./strimzi-ca-certs
target: /opt/kafka/cluster-ca-certs
read_only: true
- type: bind
source: ./strimzi-broker-certs
target: /opt/kafka/broker-certs
read_only: true
- type: bind
source: ./strimzi-client-ca
target: /opt/kafka/client-ca-certs
read_only: true
- type: bind
source: ./strimzi-listener-certs
target: /opt/kafka/certificates/custom-mtls-9094-certs
read_only: true
- type: bind
source: ./strimzi-listener-certs
target: /opt/kafka/certificates/custom-oauth-9093-certs
read_only: true
- type: bind
source: ./strimzi-listener-certs
target: /opt/kafka/certificates/oauth-oauth-9093-certs
read_only: true
- type: bind
source: ./strimzi-kafka1-config
target: /opt/kafka/custom-config
read_only: true
- type: volume
source: kafka1-data
target: /var/lib/kafka/data
environment:
KAFKA_METRICS_ENABLED: "false"
STRIMZI_KAFKA_GC_LOG_ENABLED: "false"
KAFKA_HEAP_OPTS: "-Xms128M"
kafka2:
image: quay.io/strimzi/kafka:0.26.0-kafka-3.0.0
hostname: kafka-2
container_name: kafka-2
read_only: false
entrypoint:
/bin/bash -c "mkdir -p /var/opt/kafka && chown -R kafka:0 /var/lib/kafka/data /var/opt/kafka && su kafka -p -c '/opt/kafka/kafka_run.sh'"
user: root
depends_on:
- zookeeper
ports:
- "29094:29094"
- "29093:29093"
- "29092:29092"
volumes:
- type: bind
source: ./strimzi-ca-certs
target: /opt/kafka/cluster-ca-certs
read_only: true
- type: bind
source: ./strimzi-broker-certs
target: /opt/kafka/broker-certs
read_only: true
- type: bind
source: ./strimzi-client-ca
target: /opt/kafka/client-ca-certs
read_only: true
- type: bind
source: ./strimzi-listener-certs
target: /opt/kafka/certificates/custom-mtls-9094-certs
read_only: true
- type: bind
source: ./strimzi-listener-certs
target: /opt/kafka/certificates/custom-oauth-9093-certs
read_only: true
- type: bind
source: ./strimzi-listener-certs
target: /opt/kafka/certificates/oauth-oauth-9093-certs
read_only: true
- type: bind
source: ./strimzi-kafka2-config
target: /opt/kafka/custom-config
read_only: true
- type: volume
source: kafka2-data
target: /var/lib/kafka/data
environment:
KAFKA_METRICS_ENABLED: "false"
STRIMZI_KAFKA_GC_LOG_ENABLED: "false"
KAFKA_HEAP_OPTS: "-Xms128M"
kafka3:
image: quay.io/strimzi/kafka:0.26.0-kafka-3.0.0
hostname: kafka-3
container_name: kafka-3
read_only: false
entrypoint:
/bin/bash -c "mkdir -p /var/opt/kafka && chown -R kafka:0 /var/lib/kafka/data /var/opt/kafka && su kafka -p -c '/opt/kafka/kafka_run.sh'"
user: root
depends_on:
- zookeeper
ports:
- "39094:39094"
- "39093:39093"
- "39092:39092"
volumes:
- type: bind
source: ./strimzi-ca-certs
target: /opt/kafka/cluster-ca-certs
read_only: true
- type: bind
source: ./strimzi-broker-certs
target: /opt/kafka/broker-certs
read_only: true
- type: bind
source: ./strimzi-client-ca
target: /opt/kafka/client-ca-certs
read_only: true
- type: bind
source: ./strimzi-listener-certs
target: /opt/kafka/certificates/custom-mtls-9094-certs
read_only: true
- type: bind
source: ./strimzi-listener-certs
target: /opt/kafka/certificates/custom-oauth-9093-certs
read_only: true
- type: bind
source: ./strimzi-listener-certs
target: /opt/kafka/certificates/oauth-oauth-9093-certs
read_only: true
- type: bind
source: ./strimzi-kafka3-config
target: /opt/kafka/custom-config
read_only: true
- type: volume
source: kafka3-data
target: /var/lib/kafka/data
environment:
KAFKA_METRICS_ENABLED: "false"
STRIMZI_KAFKA_GC_LOG_ENABLED: "false"
KAFKA_HEAP_OPTS: "-Xms128M"
hydra:
image: oryd/hydra:v1.10.6-sqlite
hostname: hydra
container_name: hydra
ports:
- "4443:4443"
- "4444:4444"
read_only: false
entrypoint: hydra serve all -c /config/config.yaml --sqa-opt-out
volumes:
- type: bind
source: ./oauth-config
target: /config
read_only: true
hydra-config:
image: oryd/hydra:v1.10.6-sqlite
hostname: hydra-config
container_name: hydra-config
depends_on:
- hydra
entrypoint: |
/bin/sh -c "sleep 20;hydra clients create --skip-tls-verify -g client_credentials --id dapr -n dapr -r token -a openid,kafka --secret dapr-test; hydra clients create --skip-tls-verify -g client_credentials --id kafka -n kafka -r token -a openid --secret dapr-test"
environment:
HYDRA_ADMIN_URL: https://hydra:4444
volumes:
kafka1-data: {}
kafka2-data: {}
kafka3-data: {}
version: "3.7"
services:
zookeeper:
image: confluentinc/cp-zookeeper:7.3.0
hostname: zookeeper
container_name: zookeeper
ports:
- "2181:2181"
environment:
ZOOKEEPER_CLIENT_PORT: 2181
ZOOKEEPER_TICK_TIME: 2000
kafka1:
image: confluentinc/cp-server:7.3.0
hostname: kafka1
container_name: kafka1
depends_on:
- zookeeper
ports:
- "19092:19092"
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181'
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka1:9092,PLAINTEXT_HOST://localhost:19092
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 3
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
KAFKA_CONFLUENT_LICENSE_TOPIC_REPLICATION_FACTOR: 1
KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'true'
KAFKA_NUM_PARTITIONS: 10
kafka2:
image: confluentinc/cp-server:7.3.0
hostname: kafka2
container_name: kafka2
depends_on:
- zookeeper
ports:
- "29092:29092"
environment:
KAFKA_BROKER_ID: 2
KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181'
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka2:9092,PLAINTEXT_HOST://localhost:29092
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 3
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
KAFKA_CONFLUENT_LICENSE_TOPIC_REPLICATION_FACTOR: 1
KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'true'
KAFKA_NUM_PARTITIONS: 10
kafka3:
image: confluentinc/cp-server:7.3.0
hostname: kafka3
container_name: kafka3
depends_on:
- zookeeper
ports:
- "39092:39092"
environment:
KAFKA_BROKER_ID: 3
KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181'
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka3:9092,PLAINTEXT_HOST://localhost:39092
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 3
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
KAFKA_CONFLUENT_LICENSE_TOPIC_REPLICATION_FACTOR: 1
KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'true'
KAFKA_NUM_PARTITIONS: 10

View File

@ -15,9 +15,7 @@ package kafka_test
import (
"context"
"crypto/tls"
"fmt"
"net/http"
"testing"
"time"
@ -69,10 +67,7 @@ const (
topicName = "neworder"
)
var (
brokers = []string{"localhost:19092", "localhost:29092", "localhost:39092"}
oauthClientQuery = "https://localhost:4444/clients/dapr"
)
var brokers = []string{"localhost:19092", "localhost:29092", "localhost:39092"}
func TestKafka(t *testing.T) {
// For Kafka, we should ensure messages are received in order.
@ -242,24 +237,6 @@ func TestKafka(t *testing.T) {
return err
})).
Step("wait for Dapr OAuth client", retry.Do(20*time.Second, 6, func(ctx flow.Context) error {
httpClient := &http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true, // test server certificate is not trusted.
},
},
}
resp, err := httpClient.Get(oauthClientQuery)
if err != nil {
return err
}
if resp.StatusCode != 200 {
return fmt.Errorf("oauth client query for 'dapr' not successful")
}
return nil
})).
//
// Run the application logic above.
Step(app.Run(appID1, fmt.Sprintf(":%d", appPort),
@ -280,7 +257,7 @@ func TestKafka(t *testing.T) {
//
// Run the Dapr sidecar with the Kafka component.
Step(sidecar.Run(sidecarName2,
embedded.WithComponentsPath("./components/mtls-consumer"),
embedded.WithComponentsPath("./components/consumer2"),
embedded.WithAppProtocol(runtime.HTTPProtocol, appPort+portOffset),
embedded.WithDaprGRPCPort(runtime.DefaultDaprAPIGRPCPort+portOffset),
embedded.WithDaprHTTPPort(runtime.DefaultDaprHTTPPort+portOffset),
@ -298,7 +275,7 @@ func TestKafka(t *testing.T) {
//
// Run the Dapr sidecar with the Kafka component.
Step(sidecar.Run(sidecarName3,
embedded.WithComponentsPath("./components/oauth-consumer"),
embedded.WithComponentsPath("./components/consumer2"),
embedded.WithAppProtocol(runtime.HTTPProtocol, appPort+portOffset*2),
embedded.WithDaprGRPCPort(runtime.DefaultDaprAPIGRPCPort+portOffset*2),
embedded.WithDaprHTTPPort(runtime.DefaultDaprHTTPPort+portOffset*2),

View File

@ -1,22 +0,0 @@
serve:
admin:
host: 0.0.0.0
port: 4444
public:
host: 0.0.0.0
port: 4443
tls:
cert:
path: /config/tls/hydra.crt
key:
path: /config/tls/hydra.key
dsn: memory
log:
leak_sensitive_values: true
level: debug
urls:
self:
issuer: https://hydra:4443
strategies:
access_token: opaque

View File

@ -1,21 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIDejCCAmKgAwIBAgIUIgMF15XiDisW+e4I+clKWYvxcfMwDQYJKoZIhvcNAQEL
BQAwKzESMBAGA1UECxMJRGFwciBUZXN0MRUwEwYDVQQDEwxEYXByIFRlc3QgQ0Ew
HhcNMjExMjEwMDA1ODAwWhcNMjIxMjEwMDA1ODAwWjAjMRIwEAYDVQQKEwlEYXBy
IFRlc3QxDTALBgNVBAMTBGRhcHIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
AoIBAQDICbhBmpxFPFtoRTjdiki2ouZQbUoHE4llIQnJz3ta/+gWi/czrOmC3aHz
x9pJ1kifBG5MlbdnH8WCQXx/vPXP5hpTmTDjAp87Fygk2KWdb/bQBrpRTIEgAuK3
IWJ9tYhcDDxSwEF52xNnRkklxZpVRZX1SmcdndEqioaAnxWEM1x+JJcjrk6Ud4dv
aX0G1xw8g6u0KT1I61Aja2OAAj+iPih6RK6xSRdxvELXbehClBHOpJP6sRw03Xw4
HRJEesWqrGAFEp0qSZulKwn2MHAW80VVF/U9hogUQrBVFTKw/5oS9eu+BV2AY3Rh
8DACB0blpEkjIachjjo2A8wuhBeNAgMBAAGjgZ0wgZowDgYDVR0PAQH/BAQDAgWg
MB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAMBgNVHRMBAf8EAjAAMB0G
A1UdDgQWBBRVxfGJ7a+7DBz2PM2w/U5aeJFOfjAfBgNVHSMEGDAWgBR+l/nJVNA+
PUmfXs1kYJbBfN4JbzAbBgNVHREEFDASggVoeWRyYYIJbG9jYWxob3N0MA0GCSqG
SIb3DQEBCwUAA4IBAQA+0zkBNBZ8okLiEl9B4nbfBvQXdkYOl9H9TdDYlWLNKb1S
8Y4SNQ4hrfKspYVIBVvWfuwnphdLeexs4ovU6OkXeVPFPSsjihX9I+sJ3bFCLvkj
lVXY/pJy/Z6QQlPg71LkCiH0Hv2RIvGZ1UtTu12d8BiF3oO8Nnzq4kiyfpPJ5QDR
GsTKmXxEzgCcR+DI4g05hI2BQuq8Xjw4jZzt0IOcWhR2ZxBwfzLQp/hAQK69iPCN
3DfD/eMr1EF8kAWec4eo3CFwHvrPpEdIMeNE7q9fuyfVPJGQZFKNHl7rF4YqYn/F
4XGJxRCjd860JkJDLrmXazED6cLE1IvYPCLUsfK8
-----END CERTIFICATE-----

View File

@ -1,27 +0,0 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEowIBAAKCAQEAyAm4QZqcRTxbaEU43YpItqLmUG1KBxOJZSEJyc97Wv/oFov3
M6zpgt2h88faSdZInwRuTJW3Zx/FgkF8f7z1z+YaU5kw4wKfOxcoJNilnW/20Aa6
UUyBIALityFifbWIXAw8UsBBedsTZ0ZJJcWaVUWV9UpnHZ3RKoqGgJ8VhDNcfiSX
I65OlHeHb2l9BtccPIOrtCk9SOtQI2tjgAI/oj4oekSusUkXcbxC123oQpQRzqST
+rEcNN18OB0SRHrFqqxgBRKdKkmbpSsJ9jBwFvNFVRf1PYaIFEKwVRUysP+aEvXr
vgVdgGN0YfAwAgdG5aRJIyGnIY46NgPMLoQXjQIDAQABAoIBAQDEErLmqxOt0aGP
LPq2PEtVqYqzHszG7uFnnOCpTZQN+HSXVQ4zOrOQMIoEF8rhQQbhx0gODVo93KiO
Kn5L/v26kEMR2kBO400McIBKzYhYL1zvPwj1k1Wl+O4crr6JlZxZDS07t3L2bEQy
oHQmb+/80T5RtmIoZ36Ugj+gZ06BytKPY2yZRpLnF/p9V77JK2BT2pg1EXahU5LL
wGhodg+MqFrKPk0TpdQ7edipHEiqprk/sEH9KA4cPfa83LBv6xRcHYBzlA0mHnZo
jgGdptDAFJeJcMLwywF1CvI/x5Y0mAkDN95uFcw8/ozX2pKGuIZYY9BjR444zKm2
8V7Br2gBAoGBAN2n2BlBXTjOgZ7c50fGFA+oR23C90r3AHwnh1FOnCzKOUNbW48F
tsKvmI0DUK+sg+ZkGIEz1ll81FVzCAZQ8sii3LV5qnW7QVhZszHbKWtI9ulcFDqe
ZqKlOahy5GmcGfxbniufrHaBlP+Y1gwJd8NXjoFKNxLLtQ8S25e4QwKNAoGBAOcI
ZH+eaZ3653fFPzuJtsbbfqB5HW6bTLIUqnwNRGghvMP0JTLzYYVlcaLMrI2L50Qf
Z5IEl7+uVeTmRehkoe5J3r5tIifKrVGnQM7inpTfkCOlY2tsAL8/XvQ/6ikBEt2J
r166mOk3RfjuuXuBFrPwfpZ5fMggFa92e5ukWqkBAoGAQ12VsedJu9AXWP7uU8QB
qNiODO/qVKBJR3KED9QCZyJ20N/dLdSgvP69MG5HgXy/AbB+OhZVGRF1Pxsc3z6O
6yeESKtXgTyOGZn5ejePmQmt8TKI+1/U9a2dnnJ8tRQ6WZZGth9rPQEZFa2PsEzY
V0gvCWBS6KV8u74Re0UHKKkCgYB9j8Ae49d+9rgKDfd5wjTGCtDdIjXuwRSDzFuD
pCpDdeKDlRMKh9++gg2qbxZwr1J3YaIGZ9yZXoRsLQJddSPUv+0BDYr8mVhtAjtk
tSF+w6ow1VgdL8uQJT7T/FClDGJWaNgY4cztIw8yZXwFNXlDPjduTISWt2lRvVEc
m8xyAQKBgF+aAk2qJ8/MM4aXoWgjkWiDGvgfVmWsYMpalz34PDP+hzPg3LxaGKsn
jm+LQs9Z/WX26hxZK0HWQbcCsJ81mBvgeXnUrY/T50Zvd7zUFF+1WG7Is9KUlLA1
ceQzJcixurQtuUSkwj2PfVziiufkHk43tuzDQ57carUX6kg2OwAD
-----END RSA PRIVATE KEY-----

View File

@ -1,22 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIDpTCCAo2gAwIBAgIUTAjabskCLxIqbh2E4MnYIsivipswDQYJKoZIhvcNAQEL
BQAwKzESMBAGA1UECxMJRGFwciBUZXN0MRUwEwYDVQQDEwxEYXByIFRlc3QgQ0Ew
HhcNMjExMjA0MTkwMjAwWhcNMjIxMjA0MTkwMjAwWjAjMRIwEAYDVQQKEwlEYXBy
IFRlc3QxDTALBgNVBAMTBGRhcHIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
AoIBAQC5rlhpdzY2RuRRRKevotZnLUx/dh2wLvCMluSxKFJYvC7DXK3cHZh1+6Wo
cdlsEYY3ZQ7Pt/N8DkV7ODqSvFyhJu+1fCY3elMfZcxSw24UJ2aXzlx5RbNhLAI0
E804ugAp3qss4ygCwQ4U2jMGXqeVpi7gyGsYybEUOMSorx5OBgiJAKkaATNMBqdp
MX2FKzBU3owpAcuXhIGSdKblYQuZJmAfITnaJFO4ffLyn9m4I9n/dDfZag/TCZBL
27uIo79mZO99YfhMfdrifH3FkvE/14/JUPhwHAChoCbDol0/V/KDv0tp3vQbQH+7
1dyrAWhszswSXQGgADYm8y74dlQpAgMBAAGjgcgwgcUwDgYDVR0PAQH/BAQDAgWg
MB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAMBgNVHRMBAf8EAjAAMB0G
A1UdDgQWBBQ4eToXZz4AH4YbuW23vy99T8d8OTAfBgNVHSMEGDAWgBR+l/nJVNA+
PUmfXs1kYJbBfN4JbzBGBgNVHREEPzA9ggRkYXBygglsb2NhbGhvc3SCB2thZmth
LTGCB2thZmthLTKCB2thZmthLTOCD2thZmFrLWJvb3RzdHJhcDANBgkqhkiG9w0B
AQsFAAOCAQEAAapIJIdQhGF2qz/N4i/nIwJHGxUapgtVrydC8kw7DeuQi2usG62Y
hGNnBAoJCR0auSQ2P3SWEO19o1doZjFroqFkNIXdTT+aHxLg0k89H203oeMSI43x
xTlmJCjBNw4zQD9jC1c6u/W6WBwN2SJGBZrdmA95KQrz+gan9nh6ecPYeGF89io2
G20dRE2cGwbt7LAImK87M8LXbw/Of28gYMh3L14CNy6oma3izMix9xhUhDVACnVy
TaltjNIiAlFP2g4GIsPSYTMAOeIzIU/LxKlxg8mLg1bTPwb5IZK1wFwPBY5rnNqx
OrycW7rZKfrg2eZml8FnYlzO64u41oC47A==
-----END CERTIFICATE-----

View File

@ -1,22 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIDpTCCAo2gAwIBAgIUTAjabskCLxIqbh2E4MnYIsivipswDQYJKoZIhvcNAQEL
BQAwKzESMBAGA1UECxMJRGFwciBUZXN0MRUwEwYDVQQDEwxEYXByIFRlc3QgQ0Ew
HhcNMjExMjA0MTkwMjAwWhcNMjIxMjA0MTkwMjAwWjAjMRIwEAYDVQQKEwlEYXBy
IFRlc3QxDTALBgNVBAMTBGRhcHIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
AoIBAQC5rlhpdzY2RuRRRKevotZnLUx/dh2wLvCMluSxKFJYvC7DXK3cHZh1+6Wo
cdlsEYY3ZQ7Pt/N8DkV7ODqSvFyhJu+1fCY3elMfZcxSw24UJ2aXzlx5RbNhLAI0
E804ugAp3qss4ygCwQ4U2jMGXqeVpi7gyGsYybEUOMSorx5OBgiJAKkaATNMBqdp
MX2FKzBU3owpAcuXhIGSdKblYQuZJmAfITnaJFO4ffLyn9m4I9n/dDfZag/TCZBL
27uIo79mZO99YfhMfdrifH3FkvE/14/JUPhwHAChoCbDol0/V/KDv0tp3vQbQH+7
1dyrAWhszswSXQGgADYm8y74dlQpAgMBAAGjgcgwgcUwDgYDVR0PAQH/BAQDAgWg
MB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAMBgNVHRMBAf8EAjAAMB0G
A1UdDgQWBBQ4eToXZz4AH4YbuW23vy99T8d8OTAfBgNVHSMEGDAWgBR+l/nJVNA+
PUmfXs1kYJbBfN4JbzBGBgNVHREEPzA9ggRkYXBygglsb2NhbGhvc3SCB2thZmth
LTGCB2thZmthLTKCB2thZmthLTOCD2thZmFrLWJvb3RzdHJhcDANBgkqhkiG9w0B
AQsFAAOCAQEAAapIJIdQhGF2qz/N4i/nIwJHGxUapgtVrydC8kw7DeuQi2usG62Y
hGNnBAoJCR0auSQ2P3SWEO19o1doZjFroqFkNIXdTT+aHxLg0k89H203oeMSI43x
xTlmJCjBNw4zQD9jC1c6u/W6WBwN2SJGBZrdmA95KQrz+gan9nh6ecPYeGF89io2
G20dRE2cGwbt7LAImK87M8LXbw/Of28gYMh3L14CNy6oma3izMix9xhUhDVACnVy
TaltjNIiAlFP2g4GIsPSYTMAOeIzIU/LxKlxg8mLg1bTPwb5IZK1wFwPBY5rnNqx
OrycW7rZKfrg2eZml8FnYlzO64u41oC47A==
-----END CERTIFICATE-----

View File

@ -1,27 +0,0 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEogIBAAKCAQEAua5YaXc2NkbkUUSnr6LWZy1Mf3YdsC7wjJbksShSWLwuw1yt
3B2YdfulqHHZbBGGN2UOz7fzfA5Fezg6krxcoSbvtXwmN3pTH2XMUsNuFCdml85c
eUWzYSwCNBPNOLoAKd6rLOMoAsEOFNozBl6nlaYu4MhrGMmxFDjEqK8eTgYIiQCp
GgEzTAanaTF9hSswVN6MKQHLl4SBknSm5WELmSZgHyE52iRTuH3y8p/ZuCPZ/3Q3
2WoP0wmQS9u7iKO/ZmTvfWH4TH3a4nx9xZLxP9ePyVD4cBwAoaAmw6JdP1fyg79L
ad70G0B/u9XcqwFobM7MEl0BoAA2JvMu+HZUKQIDAQABAoIBACZz2JNewLdUzwuV
cDSLQGN1mhX7XAKUdfRne0zE0OjXb8e9dbPT3TLxvki36xLaPjVSlFKoAaB7RCBU
cKzanUQyUAoBf9iVWIl0B3BMUIuT7Uca0UO8D33cI0itoR5SRp5lIoXVNP/9AvGG
jnKPP51aIPMkDim/+w/5AaD9QwVdGC2BWNn8bFykz/DfIB0tiVTec8/pWaP7vHGM
FriQbL07Yrj3BE0ndp5cL52ZbH9OmQ/hXUHCk6vCuV/yrqljeLPGbEYkpmhm/fMO
Fa3pX6wR+QgZ5lta870jK52bexyoGWgsMcTTl8+7q4DYM2YREEKicAlbOh92bdm4
tnjIiVECgYEA1btWqCtxWat5tzXeYAowYs/uia/ANbmg+SGqIeVqGn4EyLIBYnmZ
jexfWliLj7Nk802fbNIO9sStMt6q7vvRbYR2ZHFPU0Th9m/XVPdJKJ9qpMkSWdY3
P7VlQuYHSZvU1ny/QtDc8dGoaxluiaJsIBde0UUcwOo/tA66OnP2n7cCgYEA3mbf
hz6W+ThofDPyJN5kFTnx4g+uNA8hnqyJeh9xcnh1t/A5BH4faZBPhokoskahUWis
yI4v6e552CHkF9jo6k397xUb/W/HO0BlKhapf8prdrG4zSE5pr140eTdr10h95SD
Wr4twfEaBNsSXRnaMxAMaVbPKfLuW0+N1Qbk6x8CgYA8EZnKS+Ngk0vzDOXB0jtF
GjFtawK3VsOCIU8ClcqbRX2stjKjbY+VjrBB4Q7gRUgDBXbgC61+90nCOUiLQCTd
BdSMaDgmK/7h1w8K5zEdhKhhRc2tiAIhGqcqBSJZMr2/xnGuoqrmH8mYyB4D+q0u
28KfSDBLm8ppnZYDZaITwwKBgDv76xYDH50gRa4aJJklEkFXW5HpQMbxvdOaHYo+
qM6DBt0RgY9gpQBH1+slW0CaJDBc1x1QnEOv+lT87xQvgMKRPogZXW9Bkq68c4yi
iBzbb5iX3owVBgOe3tNdsxz1NZAdEkCLQrQoXygoHg/WRS+4iGBw9XcO+pLOJibq
sRtpAoGARUL0cfedOtIgGOQTNzfHqQZsRbLEKx64FI6Q8g1womr7lWWXy6RX4BZv
vm41g/PkdiES9ZfaNihRHcEhaNuA26OhiCbXe/FRcyZRX9TeCkuyQgNn9nssPIgR
edWdnN8kZKQ7ReZwMlw2UpXenAwlVoQQbHw9zpkcD2Exmp/TLAk=
-----END RSA PRIVATE KEY-----

View File

@ -1,19 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIDJjCCAg6gAwIBAgIUJPqvjfNx6kMf7mE5FtW81+X8HekwDQYJKoZIhvcNAQEL
BQAwKzESMBAGA1UECxMJRGFwciBUZXN0MRUwEwYDVQQDEwxEYXByIFRlc3QgQ0Ew
HhcNMjExMjA0MTYyNzAwWhcNMjYxMjAzMTYyNzAwWjArMRIwEAYDVQQLEwlEYXBy
IFRlc3QxFTATBgNVBAMTDERhcHIgVGVzdCBDQTCCASIwDQYJKoZIhvcNAQEBBQAD
ggEPADCCAQoCggEBAMPLpsfCUdYf+7RAY7mktcj4/qJJyNroHxS8ChwSeJ0M/dLk
I6G4kyty3TGvzmrdxkr2DW2B+ZmrZFzSVQg+kNESMhEWLJt4MtyGMNuDZcwV5kJL
NPltLYmov2z8hyD2v6agZNyiWM0k2p/dl+Ikp4DJmd08PSd+nhc5Wj9X33gsEAoK
jKptl+XGGvSlC3tIbHmBhRsP42QlLjqk5PWxINbMDePHOiYFmau3VRrbPweKTFuF
bY0Y0w8t1qOFX55hU7LkMEXjLmuUfFUEZvn3NUTvH80gKDioiJTC7NBRE6sCYAlm
b4Vvix3p9Y/yNKbMA5J3chaZdTZfVqAXplZY3jMCAwEAAaNCMEAwDgYDVR0PAQH/
BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFH6X+clU0D49SZ9ezWRg
lsF83glvMA0GCSqGSIb3DQEBCwUAA4IBAQAjeaFNxIhWZkDYiwsVP5R2JqFifZbq
A/m9YJypRwA+rUeBLFGuIh4QPFf2fZlskJYmFaDB3aplQGoSIzB1HCC0OAhJM5Ec
z6gm+bhqDfCaWz1HfmpvvQes1l/mUzYx5GfiX202W87CMKMQ+5WSg1IsCPFwYN2w
nZkGKYkh9D9TzIFMfi2b1G+O+BuUUyOAXvT8zcJ17GexRHHdc1Gq+1PgDPDL1Sug
rLHmo+dDTZhIV5D14wvxsNHTTr5tt0aaQw3fJqo6P2HE2dBiqadSYnlwS7BQ9Jxc
MlmFggFubM9/QGQ/hGQYmTp+LSlM5ndaVA80o7+SOQZ2aliuH0fQN3ST
-----END CERTIFICATE-----

Some files were not shown because too many files have changed in this diff Show More