Some fixes for Redis Lock component (#3044)

Signed-off-by: ItalyPaleAle <43508+ItalyPaleAle@users.noreply.github.com>
This commit is contained in:
Alessandro (Ale) Segala 2023-08-07 16:03:16 -07:00 committed by GitHub
parent f00bfdaeff
commit 94cb843d79
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 115 additions and 133 deletions

View File

@ -221,6 +221,33 @@ func GetServerVersion(c RedisClient) (string, error) {
return "", fmt.Errorf("could not find redis_version in redis info response") return "", fmt.Errorf("could not find redis_version in redis info response")
} }
// GetConnectedSlaves returns the number of slaves connected to the Redis master.
func GetConnectedSlaves(ctx context.Context, c RedisClient) (int, error) {
const connectedSlavesReplicas = "connected_slaves:"
res, err := c.DoRead(ctx, "INFO", "replication")
if err != nil {
return 0, err
}
// Response example: https://redis.io/commands/info#return-value
// # Replication\r\nrole:master\r\nconnected_slaves:1\r\n
s, _ := strconv.Unquote(fmt.Sprintf("%q", res))
if len(s) == 0 {
return 0, nil
}
infos := strings.Split(s, "\r\n")
for _, info := range infos {
if strings.HasPrefix(info, connectedSlavesReplicas) {
parsedReplicas, _ := strconv.ParseInt(info[len(connectedSlavesReplicas):], 10, 32)
return int(parsedReplicas), nil
}
}
return 0, nil
}
type RedisError string type RedisError string
func (e RedisError) Error() string { return string(e) } func (e RedisError) Error() string { return string(e) }

View File

@ -15,10 +15,9 @@ package redis
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"reflect" "reflect"
"strconv"
"strings"
"time" "time"
rediscomponent "github.com/dapr/components-contrib/internal/component/redis" rediscomponent "github.com/dapr/components-contrib/internal/component/redis"
@ -27,13 +26,10 @@ import (
"github.com/dapr/kit/logger" "github.com/dapr/kit/logger"
) )
const ( const unlockScript = `local v = redis.call("get",KEYS[1]); if v==false then return -1 end; if v~=ARGV[1] then return -2 else return redis.call("del",KEYS[1]) end`
unlockScript = "local v = redis.call(\"get\",KEYS[1]); if v==false then return -1 end; if v~=ARGV[1] then return -2 else return redis.call(\"del\",KEYS[1]) end"
connectedSlavesReplicas = "connected_slaves:"
infoReplicationDelimiter = "\r\n"
)
// Standalone Redis lock store.Any fail-over related features are not supported,such as Sentinel and Redis Cluster. // Standalone Redis lock store.
// Any fail-over related features are not supported, such as Sentinel and Redis Cluster.
type StandaloneRedisLock struct { type StandaloneRedisLock struct {
client rediscomponent.RedisClient client rediscomponent.RedisClient
clientSettings *rediscomponent.Settings clientSettings *rediscomponent.Settings
@ -52,83 +48,46 @@ func NewStandaloneRedisLock(logger logger.Logger) lock.Store {
} }
// Init StandaloneRedisLock. // Init StandaloneRedisLock.
func (r *StandaloneRedisLock) InitLockStore(ctx context.Context, metadata lock.Metadata) error { func (r *StandaloneRedisLock) InitLockStore(ctx context.Context, metadata lock.Metadata) (err error) {
// must have `redisHost` // Create the client
if metadata.Properties["redisHost"] == "" {
return fmt.Errorf("[standaloneRedisLock]: InitLockStore error. redisHost is empty")
}
// no failover
if needFailover(metadata.Properties) {
return fmt.Errorf("[standaloneRedisLock]: InitLockStore error. Failover is not supported")
}
// construct client
var err error
r.client, r.clientSettings, err = rediscomponent.ParseClientFromProperties(metadata.Properties, contribMetadata.LockStoreType) r.client, r.clientSettings, err = rediscomponent.ParseClientFromProperties(metadata.Properties, contribMetadata.LockStoreType)
if err != nil { if err != nil {
return err return err
} }
// 3. connect to redis
if _, err = r.client.PingResult(ctx); err != nil { // Ensure we have a host
return fmt.Errorf("[standaloneRedisLock]: error connecting to redis at %s: %s", r.clientSettings.Host, err) if r.clientSettings.Host == "" {
return errors.New("metadata property redisHost is empty")
} }
// no replica
replicas, err := r.getConnectedSlaves(ctx) // We do not support failover or having replicas
// pass the validation if error occurs, if r.clientSettings.Failover {
// since some redis versions such as miniredis do not recognize the `INFO` command. return errors.New("this component does not support connecting to Redis with failover")
}
// Ping Redis to ensure the connection is uo
if _, err = r.client.PingResult(ctx); err != nil {
return fmt.Errorf("error connecting to Redis: %v", err)
}
// Ensure there are no replicas
// Pass the validation if error occurs, since some Redis versions such as miniredis do not recognize the `INFO` command.
replicas, err := rediscomponent.GetConnectedSlaves(ctx, r.client)
if err == nil && replicas > 0 { if err == nil && replicas > 0 {
return fmt.Errorf("[standaloneRedisLock]: InitLockStore error. Replication is not supported") return errors.New("replication is not supported")
} }
return nil return nil
} }
func needFailover(properties map[string]string) bool { // TryLock tries to acquire a lock.
if val, ok := properties["failover"]; ok && val != "" { // If the lock cannot be acquired, it returns immediately.
parsedVal, err := strconv.ParseBool(val)
if err != nil {
return false
}
return parsedVal
}
return false
}
func (r *StandaloneRedisLock) getConnectedSlaves(ctx context.Context) (int, error) {
res, err := r.client.DoRead(ctx, "INFO", "replication")
if err != nil {
return 0, err
}
// Response example: https://redis.io/commands/info#return-value
// # Replication\r\nrole:master\r\nconnected_slaves:1\r\n
s, _ := strconv.Unquote(fmt.Sprintf("%q", res))
if len(s) == 0 {
return 0, nil
}
return r.parseConnectedSlaves(s), nil
}
func (r *StandaloneRedisLock) parseConnectedSlaves(res string) int {
infos := strings.Split(res, infoReplicationDelimiter)
for _, info := range infos {
if strings.Contains(info, connectedSlavesReplicas) {
parsedReplicas, _ := strconv.ParseUint(info[len(connectedSlavesReplicas):], 10, 32)
return int(parsedReplicas)
}
}
return 0
}
// Try to acquire a redis lock.
func (r *StandaloneRedisLock) TryLock(ctx context.Context, req *lock.TryLockRequest) (*lock.TryLockResponse, error) { func (r *StandaloneRedisLock) TryLock(ctx context.Context, req *lock.TryLockRequest) (*lock.TryLockResponse, error) {
// 1.Setting redis expiration time // Set a key if doesn't exist with an expiration time
nxval, err := r.client.SetNX(ctx, req.ResourceID, req.LockOwner, time.Second*time.Duration(req.ExpiryInSeconds)) nxval, err := r.client.SetNX(ctx, req.ResourceID, req.LockOwner, time.Second*time.Duration(req.ExpiryInSeconds))
if nxval == nil { if nxval == nil {
return &lock.TryLockResponse{}, fmt.Errorf("[standaloneRedisLock]: SetNX returned nil.ResourceID: %s", req.ResourceID) return &lock.TryLockResponse{}, fmt.Errorf("setNX returned a nil response")
} }
// 2. check error
if err != nil { if err != nil {
return &lock.TryLockResponse{}, err return &lock.TryLockResponse{}, err
} }
@ -138,46 +97,46 @@ func (r *StandaloneRedisLock) TryLock(ctx context.Context, req *lock.TryLockRequ
}, nil }, nil
} }
// Try to release a redis lock. // Unlock tries to release a lock if the lock is still valid.
func (r *StandaloneRedisLock) Unlock(ctx context.Context, req *lock.UnlockRequest) (*lock.UnlockResponse, error) { func (r *StandaloneRedisLock) Unlock(ctx context.Context, req *lock.UnlockRequest) (*lock.UnlockResponse, error) {
// 1. delegate to client.eval lua script // Delegate to client.eval lua script
evalInt, parseErr, err := r.client.EvalInt(ctx, unlockScript, []string{req.ResourceID}, req.LockOwner) evalInt, parseErr, err := r.client.EvalInt(ctx, unlockScript, []string{req.ResourceID}, req.LockOwner)
// 2. check error
if evalInt == nil { if evalInt == nil {
return newInternalErrorUnlockResponse(), fmt.Errorf("[standaloneRedisLock]: Eval unlock script returned nil.ResourceID: %s", req.ResourceID) res := &lock.UnlockResponse{
Status: lock.InternalError,
}
return res, errors.New("eval unlock script returned a nil response")
} }
// 3. parse result
i := *evalInt // Parse result
status := lock.InternalError
if parseErr != nil { if parseErr != nil {
return &lock.UnlockResponse{ return &lock.UnlockResponse{
Status: status, Status: lock.InternalError,
}, err }, err
} }
if i >= 0 { var status lock.Status
switch {
case *evalInt >= 0:
status = lock.Success status = lock.Success
} else if i == -1 { case *evalInt == -1:
status = lock.LockDoesNotExist status = lock.LockDoesNotExist
} else if i == -2 { case *evalInt == -2:
status = lock.LockBelongsToOthers status = lock.LockBelongsToOthers
default:
status = lock.InternalError
} }
return &lock.UnlockResponse{ return &lock.UnlockResponse{
Status: status, Status: status,
}, nil }, nil
} }
func newInternalErrorUnlockResponse() *lock.UnlockResponse {
return &lock.UnlockResponse{
Status: lock.InternalError,
}
}
// Close shuts down the client's redis connections. // Close shuts down the client's redis connections.
func (r *StandaloneRedisLock) Close() error { func (r *StandaloneRedisLock) Close() error {
if r.client != nil { if r.client != nil {
closeErr := r.client.Close() err := r.client.Close()
r.client = nil r.client = nil
return closeErr return err
} }
return nil return nil
} }

View File

@ -15,12 +15,12 @@ package redis
import ( import (
"context" "context"
"sync"
"testing" "testing"
miniredis "github.com/alicebob/miniredis/v2" miniredis "github.com/alicebob/miniredis/v2"
"github.com/google/uuid" "github.com/google/uuid"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/dapr/components-contrib/lock" "github.com/dapr/components-contrib/lock"
"github.com/dapr/components-contrib/metadata" "github.com/dapr/components-contrib/metadata"
@ -84,9 +84,10 @@ func TestStandaloneRedisLock_TryLock(t *testing.T) {
// 0. prepare // 0. prepare
// start redis // start redis
s, err := miniredis.Run() s, err := miniredis.Run()
assert.NoError(t, err) require.NoError(t, err)
defer s.Close() defer s.Close()
// construct component
// Construct component
comp := NewStandaloneRedisLock(logger.NewLogger("test")).(*StandaloneRedisLock) comp := NewStandaloneRedisLock(logger.NewLogger("test")).(*StandaloneRedisLock)
defer comp.Close() defer comp.Close()
@ -95,9 +96,11 @@ func TestStandaloneRedisLock_TryLock(t *testing.T) {
}} }}
cfg.Properties["redisHost"] = s.Addr() cfg.Properties["redisHost"] = s.Addr()
cfg.Properties["redisPassword"] = "" cfg.Properties["redisPassword"] = ""
// init
// Init
err = comp.InitLockStore(context.Background(), cfg) err = comp.InitLockStore(context.Background(), cfg)
assert.NoError(t, err) require.NoError(t, err)
// 1. client1 trylock // 1. client1 trylock
ownerID1 := uuid.New().String() ownerID1 := uuid.New().String()
resp, err := comp.TryLock(context.Background(), &lock.TryLockRequest{ resp, err := comp.TryLock(context.Background(), &lock.TryLockRequest{
@ -105,49 +108,42 @@ func TestStandaloneRedisLock_TryLock(t *testing.T) {
LockOwner: ownerID1, LockOwner: ownerID1,
ExpiryInSeconds: 10, ExpiryInSeconds: 10,
}) })
assert.NoError(t, err) require.NoError(t, err)
assert.True(t, resp.Success) assert.True(t, resp.Success)
var wg sync.WaitGroup
wg.Add(1)
// 2. Client2 tryLock fail // 2. Client2 tryLock fail
go func() { owner2 := uuid.New().String()
owner2 := uuid.New().String() resp, err = comp.TryLock(context.Background(), &lock.TryLockRequest{
resp2, err2 := comp.TryLock(context.Background(), &lock.TryLockRequest{ ResourceID: resourceID,
ResourceID: resourceID, LockOwner: owner2,
LockOwner: owner2, ExpiryInSeconds: 10,
ExpiryInSeconds: 10, })
}) require.NoError(t, err)
assert.NoError(t, err2) assert.False(t, resp.Success)
assert.False(t, resp2.Success)
wg.Done() // 3. Client 1 unlock
}()
wg.Wait()
// 3. client 1 unlock
unlockResp, err := comp.Unlock(context.Background(), &lock.UnlockRequest{ unlockResp, err := comp.Unlock(context.Background(), &lock.UnlockRequest{
ResourceID: resourceID, ResourceID: resourceID,
LockOwner: ownerID1, LockOwner: ownerID1,
}) })
assert.NoError(t, err) require.NoError(t, err)
assert.True(t, unlockResp.Status == 0, "client1 failed to unlock!") assert.True(t, unlockResp.Status == 0, "client1 failed to unlock!")
// 4. client 2 get lock
wg.Add(1) // 4. Client 2 get lock
go func() { owner2 = uuid.New().String()
owner2 := uuid.New().String() resp2, err2 := comp.TryLock(context.Background(), &lock.TryLockRequest{
resp2, err2 := comp.TryLock(context.Background(), &lock.TryLockRequest{ ResourceID: resourceID,
ResourceID: resourceID, LockOwner: owner2,
LockOwner: owner2, ExpiryInSeconds: 10,
ExpiryInSeconds: 10, })
}) require.NoError(t, err2)
assert.NoError(t, err2) assert.True(t, resp2.Success, "client2 failed to get lock?!")
assert.True(t, resp2.Success, "client2 failed to get lock?!")
// 5. client2 unlock // 5. client2 unlock
unlockResp, err := comp.Unlock(context.Background(), &lock.UnlockRequest{ unlockResp, err = comp.Unlock(context.Background(), &lock.UnlockRequest{
ResourceID: resourceID, ResourceID: resourceID,
LockOwner: owner2, LockOwner: owner2,
}) })
assert.NoError(t, err) require.NoError(t, err)
assert.True(t, unlockResp.Status == 0, "client2 failed to unlock!") assert.True(t, unlockResp.Status == 0, "client2 failed to unlock!")
wg.Done()
}()
wg.Wait()
} }