Add metadata analyzer tools (#2696)
Signed-off-by: Bernd Verst <github@bernd.dev> Signed-off-by: ItalyPaleAle <43508+ItalyPaleAle@users.noreply.github.com> Co-authored-by: Alessandro (Ale) Segala <43508+ItalyPaleAle@users.noreply.github.com>
This commit is contained in:
parent
99219b6731
commit
e5cbb34990
|
@ -58,7 +58,7 @@ var bundleComponentMetadataCmd = &cobra.Command{
|
|||
for _, component := range list {
|
||||
componentMetadata, err := parser.LoadForComponent(component)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("Failed to load metadata for component %s: %w", component, err))
|
||||
panic(fmt.Errorf("failed to load metadata for component %s: %w", component, err))
|
||||
}
|
||||
if componentMetadata == nil {
|
||||
fmt.Fprintln(os.Stderr, "Info: metadata file not found in component "+component)
|
||||
|
@ -73,7 +73,7 @@ var bundleComponentMetadataCmd = &cobra.Command{
|
|||
enc.SetIndent("", " ")
|
||||
err = enc.Encode(bundle)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("Failed to encode bundle to JSON: %w", err))
|
||||
panic(fmt.Errorf("failed to encode bundle to JSON: %w", err))
|
||||
}
|
||||
},
|
||||
}
|
||||
|
|
|
@ -0,0 +1,47 @@
|
|||
/*
|
||||
Copyright 2022 The Dapr Authors
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/dapr/components-contrib/build-tools/pkg/metadataanalyzer"
|
||||
)
|
||||
|
||||
// generateMetadataAnalyzerAppCmd generates the go program file to analyze component metadata
|
||||
var generateMetadataAnalyzerAppCmd = &cobra.Command{
|
||||
Use: "generate-metadata-analyzer-app",
|
||||
Short: "Generates the component metadata analyzer app",
|
||||
Long: `Generates the component metadata analyzer app as a go program file, and outputs it to a standard file location.`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
// Navigate to the root of the repo
|
||||
err := cwdToRepoRoot()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
outputfile, _ := cmd.Flags().GetString("outputfile")
|
||||
|
||||
if outputfile == "" {
|
||||
panic("flag outputfile is required")
|
||||
}
|
||||
|
||||
metadataanalyzer.GenerateMetadataAnalyzer("./", ComponentFolders, outputfile)
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
generateMetadataAnalyzerAppCmd.PersistentFlags().String("outputfile", "", "The output file for the generated go program.")
|
||||
rootCmd.AddCommand(generateMetadataAnalyzerAppCmd)
|
||||
}
|
|
@ -0,0 +1,109 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"encoding/json"
|
||||
"strings"
|
||||
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/dapr/kit/logger"
|
||||
{{range $fullpkg, $val := .Pkgs}}
|
||||
{{index $val 0}} "{{print "github.com/dapr/components-contrib/" $fullpkg}}"
|
||||
{{end}}
|
||||
)
|
||||
|
||||
func main() {
|
||||
if len(os.Args) < 2 {
|
||||
fmt.Println("Please provide the path to the components-contrib root as an argument")
|
||||
os.Exit(1)
|
||||
}
|
||||
basePath := os.Args[1]
|
||||
log := logger.NewLogger("metadata")
|
||||
|
||||
var yamlMetadata *map[string]string
|
||||
var missing map[string]string
|
||||
missingByComponent := make(map[string]map[string]string)
|
||||
|
||||
{{range $fullpkg, $val := .Pkgs}}
|
||||
instanceOf_{{index $val 0}} := {{index $val 0}}.{{index $val 1}}(log)
|
||||
metadataFor_{{index $val 0}} := instanceOf_{{index $val 0}}.GetComponentMetadata()
|
||||
yamlMetadata = getYamlMetadata(basePath, "{{$fullpkg}}")
|
||||
missing = checkMissingMetadata(yamlMetadata, metadataFor_{{index $val 0}})
|
||||
if len(missing) > 0 {
|
||||
missingByComponent["{{$fullpkg}}"] = missing
|
||||
}
|
||||
{{end}}
|
||||
|
||||
jsonData, err := json.MarshalIndent(missingByComponent, "", " ")
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
if len(missingByComponent) > 0 {
|
||||
fmt.Println("The following components are missing metadata in their metadata.yaml:\n")
|
||||
fmt.Println(string(jsonData))
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
type Data struct {
|
||||
Metadata []Metadata `yaml:"metadata"`
|
||||
AuthenticationProfiles []struct {
|
||||
Metadata []Metadata `yaml:"metadata"`
|
||||
} `yaml:"authenticationProfiles"`
|
||||
}
|
||||
|
||||
type Metadata struct {
|
||||
Name string `yaml:"name"`
|
||||
Type string `yaml:"type"`
|
||||
}
|
||||
|
||||
func getYamlMetadata(basePath string, pkg string) *map[string]string {
|
||||
metadatayamlpath := basePath + "/" + pkg + "/metadata.yaml"
|
||||
data, err := os.ReadFile(metadatayamlpath)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var d Data
|
||||
err = yaml.Unmarshal(data, &d)
|
||||
if err != nil {
|
||||
fmt.Println(fmt.Errorf("Invalid metadata yaml format. Error unmarshalling yaml %s: %s", metadatayamlpath, err.Error()))
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
names := make(map[string]string)
|
||||
for _, m := range d.Metadata {
|
||||
names[strings.ToLower(m.Name)] = "string"
|
||||
if m.Type != "" {
|
||||
names[strings.ToLower(m.Name)] = m.Type
|
||||
}
|
||||
}
|
||||
for _, ap := range d.AuthenticationProfiles {
|
||||
for _, m := range ap.Metadata {
|
||||
names[strings.ToLower(m.Name)] = "string"
|
||||
if m.Type != "" {
|
||||
names[strings.ToLower(m.Name)] = m.Type
|
||||
}
|
||||
}
|
||||
}
|
||||
return &names
|
||||
}
|
||||
|
||||
func checkMissingMetadata(yamlMetadataP *map[string]string, componentMetadata map[string]string) map[string]string {
|
||||
missingMetadata := make(map[string]string)
|
||||
// if there is no yaml metadata, then we are not missing anything yet
|
||||
if yamlMetadataP != nil {
|
||||
yamlMetadata := *yamlMetadataP
|
||||
for key := range componentMetadata {
|
||||
lowerKey := strings.ToLower(key)
|
||||
if _, ok := yamlMetadata[lowerKey]; !ok {
|
||||
missingMetadata[lowerKey] = componentMetadata[key]
|
||||
}
|
||||
// todo - check if the metadata is the same data type
|
||||
}
|
||||
}
|
||||
return missingMetadata
|
||||
}
|
|
@ -0,0 +1,148 @@
|
|||
package metadataanalyzer
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
"io/fs"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"text/template"
|
||||
)
|
||||
|
||||
func GenerateMetadataAnalyzer(contribRoot string, componentFolders []string, outputfile string) {
|
||||
fset := token.NewFileSet()
|
||||
pkgs := make(map[string]string)
|
||||
|
||||
err := filepath.WalkDir(contribRoot, func(path string, file fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if file.IsDir() {
|
||||
if file.Name() == "vendor" || file.Name() == "tests" || file.Name() == "internal" {
|
||||
return fs.SkipDir
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if filepath.Ext(path) != ".go" {
|
||||
return nil
|
||||
}
|
||||
|
||||
componentType := ""
|
||||
packageName := ""
|
||||
skip := true
|
||||
dir := filepath.Dir(path)
|
||||
for dir != "." && !strings.HasSuffix(dir, "components-contrib") {
|
||||
if !skip {
|
||||
packageName = filepath.Base(dir) + "/" + packageName
|
||||
} else {
|
||||
packageName = filepath.Base(dir)
|
||||
}
|
||||
skip = false
|
||||
dir = filepath.Dir(dir)
|
||||
|
||||
curFolder := filepath.Base(dir)
|
||||
|
||||
for _, val := range componentFolders {
|
||||
if curFolder == val {
|
||||
componentType = curFolder
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
parsedFile, err := parser.ParseFile(fset, path, nil, parser.ParseComments)
|
||||
if err != nil {
|
||||
log.Printf("could not parse %s: %v", path, err)
|
||||
return nil
|
||||
}
|
||||
|
||||
var method string
|
||||
var methodFinderErr error
|
||||
methodFound := false
|
||||
|
||||
switch componentType {
|
||||
// Only the component types listed here implement the GetComponentMetadata method today
|
||||
case "secretstores":
|
||||
method, methodFinderErr = getConstructorMethod("secretstores.SecretStore", parsedFile)
|
||||
if methodFinderErr == nil {
|
||||
methodFound = true
|
||||
}
|
||||
case "state":
|
||||
method, methodFinderErr = getConstructorMethod("state.Store", parsedFile)
|
||||
if methodFinderErr == nil {
|
||||
methodFound = true
|
||||
}
|
||||
}
|
||||
|
||||
if methodFound {
|
||||
pkgs[packageName] = method
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
data := make(map[string][]string)
|
||||
|
||||
for fullpkg, method := range pkgs {
|
||||
sanitizedPkg := strings.ReplaceAll(strings.ReplaceAll(fullpkg, "/", "_"), "-", "_")
|
||||
data[fullpkg] = []string{sanitizedPkg, method}
|
||||
}
|
||||
|
||||
templateData := struct {
|
||||
Pkgs map[string][]string
|
||||
}{
|
||||
Pkgs: data,
|
||||
}
|
||||
|
||||
// let's try loading the template
|
||||
bytes, fileErr := os.ReadFile(".build-tools/pkg/metadataanalyzer/analyzer.template")
|
||||
tmpl := string(bytes)
|
||||
if fileErr != nil {
|
||||
log.Fatal(fileErr)
|
||||
}
|
||||
|
||||
f, err := os.Create(outputfile)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
t := template.Must(template.New("tmpl").Parse(tmpl))
|
||||
err = t.Execute(f, templateData)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func getConstructorMethod(componentType string, file *ast.File) (string, error) {
|
||||
typeSplit := strings.Split(componentType, ".")
|
||||
if len(typeSplit) != 2 {
|
||||
return "", fmt.Errorf("invalid component type: %s", componentType)
|
||||
}
|
||||
|
||||
for _, d := range file.Decls {
|
||||
if f, ok := d.(*ast.FuncDecl); ok {
|
||||
if f.Type.Results != nil && len(f.Type.Results.List) > 0 {
|
||||
if selExpr, ok := f.Type.Results.List[0].Type.(*ast.SelectorExpr); ok {
|
||||
xIdent, ok := selExpr.X.(*ast.Ident)
|
||||
if !ok || xIdent.Name != typeSplit[0] {
|
||||
continue
|
||||
}
|
||||
if selExpr.Sel.Name == typeSplit[1] {
|
||||
return f.Name.Name, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return "", errors.New("could not find constructor method")
|
||||
}
|
|
@ -56,6 +56,9 @@ jobs:
|
|||
- name: Check components-schema
|
||||
if: steps.skip_check.outputs.should_skip != 'true'
|
||||
run: make check-component-metadata-schema-diff
|
||||
- name: Check components metadata yaml up to date
|
||||
if: steps.skip_check.outputs.should_skip != 'true'
|
||||
run: make check-component-metadata
|
||||
- name: Run golangci-lint
|
||||
if: steps.skip_check.outputs.should_skip != 'true'
|
||||
uses: golangci/golangci-lint-action@v3.4.0
|
||||
|
|
|
@ -8,3 +8,4 @@ go.work.sum
|
|||
.DS_Store
|
||||
component-metadata-bundle.json
|
||||
*.log
|
||||
metadataanalyzer
|
||||
|
|
16
Makefile
16
Makefile
|
@ -216,6 +216,22 @@ check-component-metadata-schema-diff: component-metadata-schema
|
|||
bundle-component-metadata:
|
||||
$(RUN_BUILD_TOOLS) bundle-component-metadata > ../component-metadata-bundle.json
|
||||
|
||||
################################################################################
|
||||
# Component metadata check #
|
||||
################################################################################
|
||||
.PHONE: check-component-metadata
|
||||
check-component-metadata:
|
||||
mkdir -p metadataanalyzer
|
||||
$(RUN_BUILD_TOOLS) generate-metadata-analyzer-app --outputfile ./metadataanalyzer/main.go
|
||||
cd metadataanalyzer && \
|
||||
go mod init metadataanalyzer && \
|
||||
go get "github.com/dapr/components-contrib@master" && \
|
||||
go mod edit -replace "github.com/dapr/components-contrib"="../" && \
|
||||
go mod tidy && \
|
||||
go build . && \
|
||||
rm ./go.mod && rm ./go.sum && rm ./main.go && \
|
||||
./metadataanalyzer ../
|
||||
|
||||
################################################################################
|
||||
# Prettier #
|
||||
################################################################################
|
||||
|
|
|
@ -15,7 +15,6 @@ package postgresql
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/dapr/components-contrib/metadata"
|
||||
|
@ -39,8 +38,8 @@ type postgresMetadataStruct struct {
|
|||
TableName string // Could be in the format "schema.table" or just "table"
|
||||
MetadataTableName string // Could be in the format "schema.table" or just "table"
|
||||
|
||||
timeout time.Duration
|
||||
cleanupInterval *time.Duration
|
||||
Timeout time.Duration `mapstructure:"timeoutInSeconds"`
|
||||
CleanupInterval *time.Duration `mapstructure:"cleanupIntervalInSeconds"`
|
||||
}
|
||||
|
||||
func (m *postgresMetadataStruct) InitWithMetadata(meta state.Metadata) error {
|
||||
|
@ -48,8 +47,8 @@ func (m *postgresMetadataStruct) InitWithMetadata(meta state.Metadata) error {
|
|||
m.ConnectionString = ""
|
||||
m.TableName = defaultTableName
|
||||
m.MetadataTableName = defaultMetadataTableName
|
||||
m.cleanupInterval = ptr.Of(defaultCleanupInternal * time.Second)
|
||||
m.timeout = defaultTimeout * time.Second
|
||||
m.CleanupInterval = ptr.Of(defaultCleanupInternal * time.Second)
|
||||
m.Timeout = defaultTimeout * time.Second
|
||||
|
||||
// Decode the metadata
|
||||
err := metadata.DecodeMetadata(meta.Properties, &m)
|
||||
|
@ -63,32 +62,20 @@ func (m *postgresMetadataStruct) InitWithMetadata(meta state.Metadata) error {
|
|||
}
|
||||
|
||||
// Timeout
|
||||
s, ok := meta.Properties[timeoutKey]
|
||||
if ok && s != "" {
|
||||
timeoutInSec, err := strconv.ParseInt(s, 10, 0)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid value for '%s': %s", timeoutKey, s)
|
||||
}
|
||||
if timeoutInSec < 1 {
|
||||
return fmt.Errorf("invalid value for '%s': must be greater than 0", timeoutKey)
|
||||
}
|
||||
|
||||
m.timeout = time.Duration(timeoutInSec) * time.Second
|
||||
if m.Timeout < 1*time.Second {
|
||||
return fmt.Errorf("invalid value for '%s': must be greater than 0", timeoutKey)
|
||||
}
|
||||
|
||||
// Cleanup interval
|
||||
s, ok = meta.Properties[cleanupIntervalKey]
|
||||
if ok && s != "" {
|
||||
cleanupIntervalInSec, err := strconv.ParseInt(s, 10, 0)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid value for '%s': %s", cleanupIntervalKey, s)
|
||||
}
|
||||
|
||||
if m.CleanupInterval != nil {
|
||||
// Non-positive value from meta means disable auto cleanup.
|
||||
if cleanupIntervalInSec > 0 {
|
||||
m.cleanupInterval = ptr.Of(time.Duration(cleanupIntervalInSec) * time.Second)
|
||||
} else {
|
||||
m.cleanupInterval = nil
|
||||
if *m.CleanupInterval <= 0 {
|
||||
if meta.Properties[cleanupIntervalKey] == "" {
|
||||
// unfortunately the mapstructure decoder decodes an empty string to 0, a missing key would be nil however
|
||||
m.CleanupInterval = ptr.Of(defaultCleanupInternal * time.Second)
|
||||
} else {
|
||||
m.CleanupInterval = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -74,7 +74,7 @@ func TestMetadata(t *testing.T) {
|
|||
|
||||
err := m.InitWithMetadata(state.Metadata{Base: metadata.Base{Properties: props}})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, defaultTimeout*time.Second, m.timeout)
|
||||
assert.Equal(t, defaultTimeout*time.Second, m.Timeout)
|
||||
})
|
||||
|
||||
t.Run("invalid timeout", func(t *testing.T) {
|
||||
|
@ -97,7 +97,7 @@ func TestMetadata(t *testing.T) {
|
|||
|
||||
err := m.InitWithMetadata(state.Metadata{Base: metadata.Base{Properties: props}})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 42*time.Second, m.timeout)
|
||||
assert.Equal(t, 42*time.Second, m.Timeout)
|
||||
})
|
||||
|
||||
t.Run("zero timeout", func(t *testing.T) {
|
||||
|
@ -119,8 +119,8 @@ func TestMetadata(t *testing.T) {
|
|||
|
||||
err := m.InitWithMetadata(state.Metadata{Base: metadata.Base{Properties: props}})
|
||||
assert.NoError(t, err)
|
||||
_ = assert.NotNil(t, m.cleanupInterval) &&
|
||||
assert.Equal(t, defaultCleanupInternal*time.Second, *m.cleanupInterval)
|
||||
_ = assert.NotNil(t, m.CleanupInterval) &&
|
||||
assert.Equal(t, defaultCleanupInternal*time.Second, *m.CleanupInterval)
|
||||
})
|
||||
|
||||
t.Run("invalid cleanupIntervalInSeconds", func(t *testing.T) {
|
||||
|
@ -143,8 +143,8 @@ func TestMetadata(t *testing.T) {
|
|||
|
||||
err := m.InitWithMetadata(state.Metadata{Base: metadata.Base{Properties: props}})
|
||||
assert.NoError(t, err)
|
||||
_ = assert.NotNil(t, m.cleanupInterval) &&
|
||||
assert.Equal(t, 42*time.Second, *m.cleanupInterval)
|
||||
_ = assert.NotNil(t, m.CleanupInterval) &&
|
||||
assert.Equal(t, 42*time.Second, *m.CleanupInterval)
|
||||
})
|
||||
|
||||
t.Run("zero cleanupIntervalInSeconds", func(t *testing.T) {
|
||||
|
@ -156,6 +156,6 @@ func TestMetadata(t *testing.T) {
|
|||
|
||||
err := m.InitWithMetadata(state.Metadata{Base: metadata.Base{Properties: props}})
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, m.cleanupInterval)
|
||||
assert.Nil(t, m.CleanupInterval)
|
||||
})
|
||||
}
|
||||
|
|
|
@ -94,7 +94,7 @@ func (p *PostgresDBAccess) Init(ctx context.Context, meta state.Metadata) error
|
|||
config.MaxConnIdleTime = p.metadata.ConnectionMaxIdleTime
|
||||
}
|
||||
|
||||
connCtx, connCancel := context.WithTimeout(ctx, p.metadata.timeout)
|
||||
connCtx, connCancel := context.WithTimeout(ctx, p.metadata.Timeout)
|
||||
p.db, err = pgxpool.NewWithConfig(connCtx, config)
|
||||
connCancel()
|
||||
if err != nil {
|
||||
|
@ -103,7 +103,7 @@ func (p *PostgresDBAccess) Init(ctx context.Context, meta state.Metadata) error
|
|||
return err
|
||||
}
|
||||
|
||||
pingCtx, pingCancel := context.WithTimeout(ctx, p.metadata.timeout)
|
||||
pingCtx, pingCancel := context.WithTimeout(ctx, p.metadata.Timeout)
|
||||
err = p.db.Ping(pingCtx)
|
||||
pingCancel()
|
||||
if err != nil {
|
||||
|
@ -120,7 +120,7 @@ func (p *PostgresDBAccess) Init(ctx context.Context, meta state.Metadata) error
|
|||
return err
|
||||
}
|
||||
|
||||
if p.metadata.cleanupInterval != nil {
|
||||
if p.metadata.CleanupInterval != nil {
|
||||
gc, err := internalsql.ScheduleGarbageCollector(internalsql.GCOptions{
|
||||
Logger: p.logger,
|
||||
UpdateLastCleanupQuery: fmt.Sprintf(
|
||||
|
@ -135,7 +135,7 @@ func (p *PostgresDBAccess) Init(ctx context.Context, meta state.Metadata) error
|
|||
`DELETE FROM %s WHERE expiredate IS NOT NULL AND expiredate < CURRENT_TIMESTAMP`,
|
||||
p.metadata.TableName,
|
||||
),
|
||||
CleanupInterval: *p.metadata.cleanupInterval,
|
||||
CleanupInterval: *p.metadata.CleanupInterval,
|
||||
DBPgx: p.db,
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -249,7 +249,7 @@ func (p *PostgresDBAccess) BulkSet(parentCtx context.Context, req []state.SetReq
|
|||
}
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(parentCtx, p.metadata.timeout)
|
||||
ctx, cancel := context.WithTimeout(parentCtx, p.metadata.Timeout)
|
||||
err = tx.Commit(ctx)
|
||||
cancel()
|
||||
if err != nil {
|
||||
|
@ -369,7 +369,7 @@ func (p *PostgresDBAccess) BulkDelete(parentCtx context.Context, req []state.Del
|
|||
}
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(parentCtx, p.metadata.timeout)
|
||||
ctx, cancel := context.WithTimeout(parentCtx, p.metadata.Timeout)
|
||||
err = tx.Commit(ctx)
|
||||
cancel()
|
||||
if err != nil {
|
||||
|
@ -417,7 +417,7 @@ func (p *PostgresDBAccess) ExecuteMulti(parentCtx context.Context, request *stat
|
|||
}
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(parentCtx, p.metadata.timeout)
|
||||
ctx, cancel := context.WithTimeout(parentCtx, p.metadata.Timeout)
|
||||
err = tx.Commit(ctx)
|
||||
cancel()
|
||||
if err != nil {
|
||||
|
@ -474,7 +474,7 @@ func (p *PostgresDBAccess) Close() error {
|
|||
// GetCleanupInterval returns the cleanupInterval property.
|
||||
// This is primarily used for tests.
|
||||
func (p *PostgresDBAccess) GetCleanupInterval() *time.Duration {
|
||||
return p.metadata.cleanupInterval
|
||||
return p.metadata.CleanupInterval
|
||||
}
|
||||
|
||||
// Returns the set requests.
|
||||
|
@ -507,7 +507,7 @@ func getDelete(req state.TransactionalStateOperation) (state.DeleteRequest, erro
|
|||
|
||||
// Internal function that begins a transaction.
|
||||
func (p *PostgresDBAccess) beginTx(parentCtx context.Context) (pgx.Tx, error) {
|
||||
ctx, cancel := context.WithTimeout(parentCtx, p.metadata.timeout)
|
||||
ctx, cancel := context.WithTimeout(parentCtx, p.metadata.Timeout)
|
||||
tx, err := p.db.Begin(ctx)
|
||||
cancel()
|
||||
if err != nil {
|
||||
|
@ -520,7 +520,7 @@ func (p *PostgresDBAccess) beginTx(parentCtx context.Context) (pgx.Tx, error) {
|
|||
// Normally called as a deferred function in methods that use transactions.
|
||||
// In case of errors, they are logged but not actioned upon.
|
||||
func (p *PostgresDBAccess) rollbackTx(parentCtx context.Context, tx pgx.Tx, methodName string) {
|
||||
rollbackCtx, rollbackCancel := context.WithTimeout(parentCtx, p.metadata.timeout)
|
||||
rollbackCtx, rollbackCancel := context.WithTimeout(parentCtx, p.metadata.Timeout)
|
||||
rollbackErr := tx.Rollback(rollbackCtx)
|
||||
rollbackCancel()
|
||||
if rollbackErr != nil && !errors.Is(rollbackErr, pgx.ErrTxClosed) {
|
||||
|
|
|
@ -83,7 +83,12 @@ func toTimeDurationHookFunc() mapstructure.DecodeHookFunc {
|
|||
var err error
|
||||
val, err = time.ParseDuration(data.(string))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
// If we can't parse the duration, try parsing it as int64 seconds
|
||||
seconds, errParse := strconv.ParseInt(data.(string), 10, 0)
|
||||
if errParse != nil {
|
||||
return nil, errors.Join(err, errParse)
|
||||
}
|
||||
val = time.Duration(seconds * int64(time.Second))
|
||||
}
|
||||
}
|
||||
if t != reflect.TypeOf(Duration{}) {
|
||||
|
|
|
@ -103,6 +103,7 @@ func TestMetadataDecode(t *testing.T) {
|
|||
Myfloat64 float64 `json:"myfloat64,string"`
|
||||
Mybool *bool `json:"mybool,omitempty"`
|
||||
MyRegularDuration time.Duration `json:"myregularduration"`
|
||||
MyDurationWithoutUnit time.Duration `json:"mydurationwithoutunit"`
|
||||
MyRegularDurationEmpty time.Duration `json:"myregulardurationempty"`
|
||||
|
||||
MyRegularDurationDefaultValueUnset time.Duration `json:"myregulardurationdefaultvalueunset"`
|
||||
|
@ -120,6 +121,7 @@ func TestMetadataDecode(t *testing.T) {
|
|||
"myfloat64": "1.1",
|
||||
"mybool": "true",
|
||||
"myregularduration": "6m",
|
||||
"mydurationwithoutunit": "17",
|
||||
"myregulardurationempty": "",
|
||||
// Not setting myregulardurationdefaultvalueunset on purpose
|
||||
"myregulardurationdefaultvalueempty": "",
|
||||
|
@ -134,6 +136,7 @@ func TestMetadataDecode(t *testing.T) {
|
|||
assert.Equal(t, 1.1, m.Myfloat64)
|
||||
assert.Equal(t, Duration{Duration: 3 * time.Second}, m.Myduration)
|
||||
assert.Equal(t, 6*time.Minute, m.MyRegularDuration)
|
||||
assert.Equal(t, time.Second*17, m.MyDurationWithoutUnit)
|
||||
assert.Equal(t, time.Duration(0), m.MyRegularDurationEmpty)
|
||||
assert.Equal(t, time.Hour, m.MyRegularDurationDefaultValueUnset)
|
||||
assert.Equal(t, time.Duration(0), m.MyRegularDurationDefaultValueEmpty)
|
||||
|
|
|
@ -18,76 +18,105 @@ capabilities:
|
|||
- query
|
||||
authenticationProfiles:
|
||||
- title: "Master key"
|
||||
description: "Authenticate using a pre-shared \"master key\"."
|
||||
description: |
|
||||
Authenticate using a pre-shared "master key".
|
||||
metadata:
|
||||
- name: masterKey
|
||||
required: true
|
||||
sensitive: true
|
||||
description: "The key to authenticate to the Cosmos DB account."
|
||||
description: |
|
||||
The key to authenticate to the Cosmos DB account.
|
||||
example: '"my-secret-key"'
|
||||
- title: "Azure AD: Managed identity"
|
||||
description: "Authenticate using Azure AD and a managed identity."
|
||||
description: |
|
||||
Authenticate using Azure AD and a managed identity.
|
||||
metadata:
|
||||
- name: azureClientId
|
||||
required: false
|
||||
description: "Client ID (application ID). Required if the service has multiple identities assigned."
|
||||
description: |
|
||||
Client ID (application ID). Required if the service has multiple identities assigned.
|
||||
example: '"c7dd251f-811f-4ba2-a905-acd4d3f8f08b"'
|
||||
- title: "Azure AD: Client credentials"
|
||||
description: "Authenticate using Azure AD with client credentials, also known as \"service principals\"."
|
||||
description: |
|
||||
Authenticate using Azure AD with client credentials, also known as "service principals".
|
||||
metadata:
|
||||
- name: azureTenantId
|
||||
required: true
|
||||
description: "ID of the Azure AD tenant"
|
||||
description: |
|
||||
ID of the Azure AD tenant
|
||||
example: '"cd4b2887-304c-47e1-b4d5-65447fdd542b"'
|
||||
- name: azureClientId
|
||||
required: true
|
||||
description: "Client ID (application ID)"
|
||||
description: |
|
||||
Client ID (application ID)
|
||||
example: '"c7dd251f-811f-4ba2-a905-acd4d3f8f08b"'
|
||||
- name: azureClientSecret
|
||||
required: true
|
||||
sensitive: true
|
||||
description: "Client secret (application password)"
|
||||
description: |
|
||||
Client secret (application password)
|
||||
example: '"Ecy3XG7zVZK3/vl/a2NSB+a1zXLa8RnMum/IgD0E"'
|
||||
- title: "Azure AD: Client certificate"
|
||||
description: "Authenticate using Azure AD with a client certificate (in PFX/PKCS#12 format). One of azureCertificate and azureCertificateFile is required."
|
||||
description: |
|
||||
Authenticate using Azure AD with a client certificate (in PFX/PKCS#12 format). One of `azureCertificate` and `azureCertificateFile` is required.
|
||||
metadata:
|
||||
- name: azureTenantId
|
||||
required: true
|
||||
description: "ID of the Azure AD tenant."
|
||||
description: |
|
||||
ID of the Azure AD tenant.
|
||||
example: '"cd4b2887-304c-47e1-b4d5-65447fdd542b"'
|
||||
- name: azureClientId
|
||||
required: true
|
||||
description: "Client ID (application ID)."
|
||||
description: |
|
||||
Client ID (application ID).
|
||||
example: '"c7dd251f-811f-4ba2-a905-acd4d3f8f08b"'
|
||||
- name: azureCertificate
|
||||
required: false
|
||||
sensitive: true
|
||||
description: "Certificate and private key (in PFX/PKCS#12 format)."
|
||||
description: |
|
||||
Certificate and private key (in PFX/PKCS#12 format).
|
||||
example: |
|
||||
"-----BEGIN PRIVATE KEY-----\n MIIEvgI... \n -----END PRIVATE KEY----- \n -----BEGIN CERTIFICATE----- \n MIICoTC... \n -----END CERTIFICATE-----"
|
||||
-----BEGIN PRIVATE KEY-----
|
||||
MIIEvgI...
|
||||
-----END PRIVATE KEY-----
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIICoTC...
|
||||
-----END CERTIFICATE-----
|
||||
- name: azureCertificateFile
|
||||
required: false
|
||||
sensitive: true
|
||||
description: "Path to PFX/PKCS#12 file on disk, containing the certificate and private key."
|
||||
description: |
|
||||
Path to PFX/PKCS#12 file on disk, containing the certificate and private key.
|
||||
example: "/path/to/file.pem"
|
||||
- name: azureCertificatePassword
|
||||
required: false
|
||||
sensitive: true
|
||||
description: "Password for the certificate if encrypted."
|
||||
description: |
|
||||
Password for the certificate if encrypted.
|
||||
example: "password"
|
||||
metadata:
|
||||
- name: url
|
||||
required: true
|
||||
description: "The Cosmos DB url."
|
||||
description: |
|
||||
The Cosmos DB url.
|
||||
example: '"https://******.documents.azure.com:443/"'
|
||||
type: string
|
||||
- name: database
|
||||
required: true
|
||||
description: "The name of the database."
|
||||
description: |
|
||||
The name of the database.
|
||||
example: '"db"'
|
||||
type: string
|
||||
- name: collection
|
||||
required: true
|
||||
description: "The name of the collection (container)."
|
||||
description: |
|
||||
The name of the collection (container).
|
||||
example: '"collection"'
|
||||
type: string
|
||||
- name: contenttype
|
||||
required: false
|
||||
description: |
|
||||
The default content type of the data.
|
||||
example: "application/json"
|
||||
default: "application/json"
|
||||
type: string
|
||||
|
|
|
@ -21,33 +21,47 @@ metadata:
|
|||
- name: server
|
||||
# Required if host is not set
|
||||
required: false
|
||||
description: "The server to connect to, when using DNS SRV record. One of \"server\" and \"host\" is required."
|
||||
description: |
|
||||
The server to connect to, when using DNS SRV record. One of `server` and `host` is required.
|
||||
example: '"server.example.com"'
|
||||
- name: host
|
||||
# Required if server is not set
|
||||
required: false
|
||||
description: "The host to connect to. One of \"server\" and \"host\" is required."
|
||||
description: |
|
||||
The host to connect to. One of `server` and `host` is required.
|
||||
example: '"mongo-mongodb.default.svc.cluster.local:27017"'
|
||||
- name: connectionstring
|
||||
# Required if host and server are not set
|
||||
required: false
|
||||
description: |
|
||||
The connection string to use. One of `server` or `host` or `connectionstring` is required.
|
||||
example: '"mongodb://localhost:27017"'
|
||||
- name: username
|
||||
description: "The username of the user to connect with (applicable in conjunction with \"host\")"
|
||||
description: |
|
||||
The username of the user to connect with (applicable in conjunction with `host`)
|
||||
example: '"admin"'
|
||||
- name: password
|
||||
sensitive: true
|
||||
description: "The password of the user (applicable in conjunction with \"host\")"
|
||||
description: |
|
||||
The password of the user (applicable in conjunction with `host`)
|
||||
example: '"password"'
|
||||
- name: databaseName
|
||||
description: "The name of the database to use."
|
||||
description: |
|
||||
The name of the database to use.
|
||||
default: '"daprStore"'
|
||||
example: '"daprStore"'
|
||||
- name: collectionName
|
||||
description: "The name of the collection to use."
|
||||
description: |
|
||||
The name of the collection to use.
|
||||
default: '"daprCollection"'
|
||||
example: '"daprCollection"'
|
||||
- name: writeconcern
|
||||
description: "The write concern to use"
|
||||
description: |
|
||||
The write concern to use
|
||||
example: '"majority", "2"'
|
||||
- name: readconcern
|
||||
description: "The read concern to use"
|
||||
description: |
|
||||
The read concern to use
|
||||
type: string
|
||||
allowedValues:
|
||||
- "available"
|
||||
|
@ -57,10 +71,12 @@ metadata:
|
|||
- "snapshot"
|
||||
example: '"local"'
|
||||
- name: operationTimeout
|
||||
description: "The timeout for the operation."
|
||||
description: |
|
||||
The timeout for the operation.
|
||||
type: duration
|
||||
default: '"5s"'
|
||||
example: '"10s"'
|
||||
- name: params
|
||||
description: "Additional parameters to use when connecting. The params field accepts a query string that specifies connection specific options as \"<name>=<value>\" pairs, separated by \"&\" and prefixed with \"?\". See the MongoDB manual for the list of available options and their use cases."
|
||||
description: |
|
||||
Additional parameters to use when connecting. The params field accepts a query string that specifies connection specific options as `<name>=<value>` pairs, separated by `&` and prefixed with `?`. See the MongoDB manual for the list of available options and their use cases.
|
||||
example: '"?authSource=daprStore&ssl=true"'
|
||||
|
|
Loading…
Reference in New Issue