diff --git a/cmd/rocsp-tool/client_test.go b/cmd/rocsp-tool/client_test.go
index 5b966d1ca..7b3a1188b 100644
--- a/cmd/rocsp-tool/client_test.go
+++ b/cmd/rocsp-tool/client_test.go
@@ -7,7 +7,6 @@ import (
"testing"
"time"
- "github.com/go-redis/redis/v8"
"github.com/jmhodges/clock"
capb "github.com/letsencrypt/boulder/ca/proto"
"github.com/letsencrypt/boulder/cmd"
@@ -18,6 +17,7 @@ import (
"github.com/letsencrypt/boulder/sa"
"github.com/letsencrypt/boulder/test"
"github.com/letsencrypt/boulder/test/vars"
+ "github.com/redis/go-redis/v9"
"golang.org/x/crypto/ocsp"
"google.golang.org/grpc"
)
diff --git a/cmd/shell.go b/cmd/shell.go
index 373bb0229..d4fc5fa99 100644
--- a/cmd/shell.go
+++ b/cmd/shell.go
@@ -21,11 +21,11 @@ import (
"time"
"github.com/go-logr/stdr"
- "github.com/go-redis/redis/v8"
"github.com/go-sql-driver/mysql"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/collectors"
"github.com/prometheus/client_golang/prometheus/promhttp"
+ "github.com/redis/go-redis/v9"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
"go.opentelemetry.io/otel/propagation"
diff --git a/go.mod b/go.mod
index f976be546..d9ea8499c 100644
--- a/go.mod
+++ b/go.mod
@@ -9,7 +9,6 @@ require (
github.com/aws/smithy-go v1.14.1
github.com/eggsampler/acme/v3 v3.4.0
github.com/go-logr/stdr v1.2.2
- github.com/go-redis/redis/v8 v8.11.5
github.com/go-sql-driver/mysql v1.5.0
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da
github.com/google/certificate-transparency-go v1.1.6
@@ -24,6 +23,7 @@ require (
github.com/miekg/pkcs11 v1.1.1
github.com/prometheus/client_golang v1.15.1
github.com/prometheus/client_model v0.4.0
+ github.com/redis/go-redis/v9 v9.1.0
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399
github.com/weppos/publicsuffix-go v0.30.1-0.20230620154423-38c92ad2d5c6
github.com/zmap/zcrypto v0.0.0-20230310154051-c8b263fd8300
@@ -66,6 +66,7 @@ require (
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
github.com/felixge/httpsnoop v1.0.3 // indirect
+ github.com/fsnotify/fsnotify v1.4.9 // indirect
github.com/go-logr/logr v1.2.4 // indirect
github.com/go-playground/locales v0.14.1 // indirect
github.com/go-playground/universal-translator v0.18.1 // indirect
diff --git a/go.sum b/go.sum
index f5968b7e0..d767772f0 100644
--- a/go.sum
+++ b/go.sum
@@ -90,6 +90,8 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
+github.com/bsm/ginkgo/v2 v2.9.5 h1:rtVBYPs3+TC5iLUVOis1B9tjLTup7Cj5IfzosKtvTJ0=
+github.com/bsm/gomega v1.26.0 h1:LhQm+AFcgV2M0WyKroMASzAzCAJVpAxQXv4SaI9a69Y=
github.com/bwesterb/go-ristretto v1.2.0/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0=
github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
@@ -137,6 +139,7 @@ github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBd
github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
+github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
@@ -155,8 +158,6 @@ github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/o
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
-github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI=
-github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo=
github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs=
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
@@ -292,10 +293,7 @@ github.com/mreiferson/go-httpclient v0.0.0-20160630210159-31f0106b4474/go.mod h1
github.com/mreiferson/go-httpclient v0.0.0-20201222173833-5e475fde3a4d/go.mod h1:OQA4XLvDbMgS8P0CevmM4m9Q3Jq4phKUzcocxuGJ5m8=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/nelsam/hel/v2 v2.3.2/go.mod h1:1ZTGfU2PFTOd5mx22i5O0Lc2GY933lQ2wb/ggy+rL3w=
-github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
-github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
-github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE=
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/pelletier/go-toml v1.9.3 h1:zeC5b1GviRUyKYd6OJPvBU/mcVDVoL1OhT17FCt5dSQ=
@@ -324,6 +322,8 @@ github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7z
github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI=
github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
+github.com/redis/go-redis/v9 v9.1.0 h1:137FnGdk+EQdCbye1FW+qOEcY5S+SpY9T0NiuqvtfMY=
+github.com/redis/go-redis/v9 v9.1.0/go.mod h1:urWj3He21Dj5k4TK1y59xH8Uj6ATueP8AH1cY3lZl4c=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
@@ -534,6 +534,7 @@ golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -755,7 +756,6 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git a/ratelimits/source_redis.go b/ratelimits/source_redis.go
index 88de91ea1..1dc01587b 100644
--- a/ratelimits/source_redis.go
+++ b/ratelimits/source_redis.go
@@ -8,9 +8,9 @@ import (
bredis "github.com/letsencrypt/boulder/redis"
- "github.com/go-redis/redis/v8"
"github.com/jmhodges/clock"
"github.com/prometheus/client_golang/prometheus"
+ "github.com/redis/go-redis/v9"
)
// Compile-time check that RedisSource implements the source interface.
diff --git a/ratelimits/source_redis_test.go b/ratelimits/source_redis_test.go
index 5328d378e..2f0e5d97d 100644
--- a/ratelimits/source_redis_test.go
+++ b/ratelimits/source_redis_test.go
@@ -9,8 +9,8 @@ import (
"github.com/letsencrypt/boulder/test"
"golang.org/x/net/context"
- "github.com/go-redis/redis/v8"
"github.com/jmhodges/clock"
+ "github.com/redis/go-redis/v9"
)
func newTestRedisSource(clk clock.FakeClock, addrs map[string]string) *RedisSource {
diff --git a/redis/metrics.go b/redis/metrics.go
index df5e3364a..1438e76ff 100644
--- a/redis/metrics.go
+++ b/redis/metrics.go
@@ -1,8 +1,8 @@
package redis
import (
- "github.com/go-redis/redis/v8"
"github.com/prometheus/client_golang/prometheus"
+ "github.com/redis/go-redis/v9"
)
// An interface satisfied by *redis.ClusterClient and also by a mock in our tests.
diff --git a/redis/metrics_test.go b/redis/metrics_test.go
index 5d34117dc..180f55148 100644
--- a/redis/metrics_test.go
+++ b/redis/metrics_test.go
@@ -4,9 +4,9 @@ import (
"strings"
"testing"
- "github.com/go-redis/redis/v8"
"github.com/letsencrypt/boulder/metrics"
"github.com/prometheus/client_golang/prometheus"
+ "github.com/redis/go-redis/v9"
)
type mockPoolStatGetter struct{}
diff --git a/rocsp/config/rocsp_config.go b/rocsp/config/rocsp_config.go
index 0492175c7..c91069b41 100644
--- a/rocsp/config/rocsp_config.go
+++ b/rocsp/config/rocsp_config.go
@@ -8,9 +8,9 @@ import (
"fmt"
"strings"
- "github.com/go-redis/redis/v8"
"github.com/jmhodges/clock"
"github.com/prometheus/client_golang/prometheus"
+ "github.com/redis/go-redis/v9"
"golang.org/x/crypto/ocsp"
"github.com/letsencrypt/boulder/cmd"
@@ -92,6 +92,7 @@ type RedisConfig struct {
// Default is 1 minute. -1 disables idle connections reaper,
// but idle connections are still discarded by the client
// if IdleTimeout is set.
+ // Deprecated: This field has been deprecated and will be removed.
IdleCheckFrequency config.Duration `validate:"-"`
}
@@ -120,12 +121,11 @@ func MakeClient(c *RedisConfig, clk clock.Clock, stats prometheus.Registerer) (*
ReadTimeout: c.ReadTimeout.Duration,
WriteTimeout: c.WriteTimeout.Duration,
- PoolSize: c.PoolSize,
- MinIdleConns: c.MinIdleConns,
- MaxConnAge: c.MaxConnAge.Duration,
- PoolTimeout: c.PoolTimeout.Duration,
- IdleTimeout: c.IdleTimeout.Duration,
- IdleCheckFrequency: c.IdleCheckFrequency.Duration,
+ PoolSize: c.PoolSize,
+ MinIdleConns: c.MinIdleConns,
+ ConnMaxLifetime: c.MaxConnAge.Duration,
+ PoolTimeout: c.PoolTimeout.Duration,
+ ConnMaxIdleTime: c.IdleTimeout.Duration,
})
return rocsp.NewWritingClient(rdb, c.Timeout.Duration, clk, stats), nil
}
@@ -160,12 +160,11 @@ func MakeReadClient(c *RedisConfig, clk clock.Clock, stats prometheus.Registerer
DialTimeout: c.DialTimeout.Duration,
ReadTimeout: c.ReadTimeout.Duration,
- PoolSize: c.PoolSize,
- MinIdleConns: c.MinIdleConns,
- MaxConnAge: c.MaxConnAge.Duration,
- PoolTimeout: c.PoolTimeout.Duration,
- IdleTimeout: c.IdleTimeout.Duration,
- IdleCheckFrequency: c.IdleCheckFrequency.Duration,
+ PoolSize: c.PoolSize,
+ MinIdleConns: c.MinIdleConns,
+ ConnMaxLifetime: c.MaxConnAge.Duration,
+ PoolTimeout: c.PoolTimeout.Duration,
+ ConnMaxIdleTime: c.IdleTimeout.Duration,
})
return rocsp.NewReadingClient(rdb, c.Timeout.Duration, clk, stats), nil
}
diff --git a/rocsp/rocsp.go b/rocsp/rocsp.go
index fbc2789b0..f4bef1b38 100644
--- a/rocsp/rocsp.go
+++ b/rocsp/rocsp.go
@@ -10,9 +10,9 @@ import (
"github.com/letsencrypt/boulder/core"
bredis "github.com/letsencrypt/boulder/redis"
- "github.com/go-redis/redis/v8"
"github.com/jmhodges/clock"
"github.com/prometheus/client_golang/prometheus"
+ "github.com/redis/go-redis/v9"
"golang.org/x/crypto/ocsp"
)
diff --git a/rocsp/rocsp_test.go b/rocsp/rocsp_test.go
index 4039da79d..8120aaf21 100644
--- a/rocsp/rocsp_test.go
+++ b/rocsp/rocsp_test.go
@@ -8,10 +8,10 @@ import (
"testing"
"time"
- "github.com/go-redis/redis/v8"
"github.com/jmhodges/clock"
"github.com/letsencrypt/boulder/cmd"
"github.com/letsencrypt/boulder/metrics"
+ "github.com/redis/go-redis/v9"
"golang.org/x/crypto/ocsp"
)
diff --git a/vendor/github.com/go-redis/redis/v8/.gitignore b/vendor/github.com/go-redis/redis/v8/.gitignore
deleted file mode 100644
index b975a7b4c..000000000
--- a/vendor/github.com/go-redis/redis/v8/.gitignore
+++ /dev/null
@@ -1,3 +0,0 @@
-*.rdb
-testdata/*/
-.idea/
diff --git a/vendor/github.com/go-redis/redis/v8/CHANGELOG.md b/vendor/github.com/go-redis/redis/v8/CHANGELOG.md
deleted file mode 100644
index 195e51933..000000000
--- a/vendor/github.com/go-redis/redis/v8/CHANGELOG.md
+++ /dev/null
@@ -1,177 +0,0 @@
-## [8.11.5](https://github.com/go-redis/redis/compare/v8.11.4...v8.11.5) (2022-03-17)
-
-
-### Bug Fixes
-
-* add missing Expire methods to Cmdable ([17e3b43](https://github.com/go-redis/redis/commit/17e3b43879d516437ada71cf9c0deac6a382ed9a))
-* add whitespace for avoid unlikely colisions ([7f7c181](https://github.com/go-redis/redis/commit/7f7c1817617cfec909efb13d14ad22ef05a6ad4c))
-* example/otel compile error ([#2028](https://github.com/go-redis/redis/issues/2028)) ([187c07c](https://github.com/go-redis/redis/commit/187c07c41bf68dc3ab280bc3a925e960bbef6475))
-* **extra/redisotel:** set span.kind attribute to client ([065b200](https://github.com/go-redis/redis/commit/065b200070b41e6e949710b4f9e01b50ccc60ab2))
-* format ([96f53a0](https://github.com/go-redis/redis/commit/96f53a0159a28affa94beec1543a62234e7f8b32))
-* invalid type assert in stringArg ([de6c131](https://github.com/go-redis/redis/commit/de6c131865b8263400c8491777b295035f2408e4))
-* rename Golang to Go ([#2030](https://github.com/go-redis/redis/issues/2030)) ([b82a2d9](https://github.com/go-redis/redis/commit/b82a2d9d4d2de7b7cbe8fcd4895be62dbcacacbc))
-* set timeout for WAIT command. Fixes [#1963](https://github.com/go-redis/redis/issues/1963) ([333fee1](https://github.com/go-redis/redis/commit/333fee1a8fd98a2fbff1ab187c1b03246a7eb01f))
-* update some argument counts in pre-allocs ([f6974eb](https://github.com/go-redis/redis/commit/f6974ebb5c40a8adf90d2cacab6dc297f4eba4c2))
-
-
-### Features
-
-* Add redis v7's NX, XX, GT, LT expire variants ([e19bbb2](https://github.com/go-redis/redis/commit/e19bbb26e2e395c6e077b48d80d79e99f729a8b8))
-* add support for acl sentinel auth in universal client ([ab0ccc4](https://github.com/go-redis/redis/commit/ab0ccc47413f9b2a6eabc852fed5005a3ee1af6e))
-* add support for COPY command ([#2016](https://github.com/go-redis/redis/issues/2016)) ([730afbc](https://github.com/go-redis/redis/commit/730afbcffb93760e8a36cc06cfe55ab102b693a7))
-* add support for passing extra attributes added to spans ([39faaa1](https://github.com/go-redis/redis/commit/39faaa171523834ba527c9789710c4fde87f5a2e))
-* add support for time.Duration write and scan ([2f1b74e](https://github.com/go-redis/redis/commit/2f1b74e20cdd7719b2aecf0768d3e3ae7c3e781b))
-* **redisotel:** ability to override TracerProvider ([#1998](https://github.com/go-redis/redis/issues/1998)) ([bf8d4aa](https://github.com/go-redis/redis/commit/bf8d4aa60c00366cda2e98c3ddddc8cf68507417))
-* set net.peer.name and net.peer.port in otel example ([69bf454](https://github.com/go-redis/redis/commit/69bf454f706204211cd34835f76b2e8192d3766d))
-
-
-
-## [8.11.4](https://github.com/go-redis/redis/compare/v8.11.3...v8.11.4) (2021-10-04)
-
-
-### Features
-
-* add acl auth support for sentinels ([f66582f](https://github.com/go-redis/redis/commit/f66582f44f3dc3a4705a5260f982043fde4aa634))
-* add Cmd.{String,Int,Float,Bool}Slice helpers and an example ([5d3d293](https://github.com/go-redis/redis/commit/5d3d293cc9c60b90871e2420602001463708ce24))
-* add SetVal method for each command ([168981d](https://github.com/go-redis/redis/commit/168981da2d84ee9e07d15d3e74d738c162e264c4))
-
-
-
-## v8.11
-
-- Remove OpenTelemetry metrics.
-- Supports more redis commands and options.
-
-## v8.10
-
-- Removed extra OpenTelemetry spans from go-redis core. Now go-redis instrumentation only adds a
- single span with a Redis command (instead of 4 spans). There are multiple reasons behind this
- decision:
-
- - Traces become smaller and less noisy.
- - It may be costly to process those 3 extra spans for each query.
- - go-redis no longer depends on OpenTelemetry.
-
- Eventually we hope to replace the information that we no longer collect with OpenTelemetry
- Metrics.
-
-## v8.9
-
-- Changed `PubSub.Channel` to only rely on `Ping` result. You can now use `WithChannelSize`,
- `WithChannelHealthCheckInterval`, and `WithChannelSendTimeout` to override default settings.
-
-## v8.8
-
-- To make updating easier, extra modules now have the same version as go-redis does. That means that
- you need to update your imports:
-
-```
-github.com/go-redis/redis/extra/redisotel -> github.com/go-redis/redis/extra/redisotel/v8
-github.com/go-redis/redis/extra/rediscensus -> github.com/go-redis/redis/extra/rediscensus/v8
-```
-
-## v8.5
-
-- [knadh](https://github.com/knadh) contributed long-awaited ability to scan Redis Hash into a
- struct:
-
-```go
-err := rdb.HGetAll(ctx, "hash").Scan(&data)
-
-err := rdb.MGet(ctx, "key1", "key2").Scan(&data)
-```
-
-- Please check [redismock](https://github.com/go-redis/redismock) by
- [monkey92t](https://github.com/monkey92t) if you are looking for mocking Redis Client.
-
-## v8
-
-- All commands require `context.Context` as a first argument, e.g. `rdb.Ping(ctx)`. If you are not
- using `context.Context` yet, the simplest option is to define global package variable
- `var ctx = context.TODO()` and use it when `ctx` is required.
-
-- Full support for `context.Context` canceling.
-
-- Added `redis.NewFailoverClusterClient` that supports routing read-only commands to a slave node.
-
-- Added `redisext.OpenTemetryHook` that adds
- [Redis OpenTelemetry instrumentation](https://redis.uptrace.dev/tracing/).
-
-- Redis slow log support.
-
-- Ring uses Rendezvous Hashing by default which provides better distribution. You need to move
- existing keys to a new location or keys will be inaccessible / lost. To use old hashing scheme:
-
-```go
-import "github.com/golang/groupcache/consistenthash"
-
-ring := redis.NewRing(&redis.RingOptions{
- NewConsistentHash: func() {
- return consistenthash.New(100, crc32.ChecksumIEEE)
- },
-})
-```
-
-- `ClusterOptions.MaxRedirects` default value is changed from 8 to 3.
-- `Options.MaxRetries` default value is changed from 0 to 3.
-
-- `Cluster.ForEachNode` is renamed to `ForEachShard` for consistency with `Ring`.
-
-## v7.3
-
-- New option `Options.Username` which causes client to use `AuthACL`. Be aware if your connection
- URL contains username.
-
-## v7.2
-
-- Existing `HMSet` is renamed to `HSet` and old deprecated `HMSet` is restored for Redis 3 users.
-
-## v7.1
-
-- Existing `Cmd.String` is renamed to `Cmd.Text`. New `Cmd.String` implements `fmt.Stringer`
- interface.
-
-## v7
-
-- _Important_. Tx.Pipeline now returns a non-transactional pipeline. Use Tx.TxPipeline for a
- transactional pipeline.
-- WrapProcess is replaced with more convenient AddHook that has access to context.Context.
-- WithContext now can not be used to create a shallow copy of the client.
-- New methods ProcessContext, DoContext, and ExecContext.
-- Client respects Context.Deadline when setting net.Conn deadline.
-- Client listens on Context.Done while waiting for a connection from the pool and returns an error
- when context context is cancelled.
-- Add PubSub.ChannelWithSubscriptions that sends `*Subscription` in addition to `*Message` to allow
- detecting reconnections.
-- `time.Time` is now marshalled in RFC3339 format. `rdb.Get("foo").Time()` helper is added to parse
- the time.
-- `SetLimiter` is removed and added `Options.Limiter` instead.
-- `HMSet` is deprecated as of Redis v4.
-
-## v6.15
-
-- Cluster and Ring pipelines process commands for each node in its own goroutine.
-
-## 6.14
-
-- Added Options.MinIdleConns.
-- Added Options.MaxConnAge.
-- PoolStats.FreeConns is renamed to PoolStats.IdleConns.
-- Add Client.Do to simplify creating custom commands.
-- Add Cmd.String, Cmd.Int, Cmd.Int64, Cmd.Uint64, Cmd.Float64, and Cmd.Bool helpers.
-- Lower memory usage.
-
-## v6.13
-
-- Ring got new options called `HashReplicas` and `Hash`. It is recommended to set
- `HashReplicas = 1000` for better keys distribution between shards.
-- Cluster client was optimized to use much less memory when reloading cluster state.
-- PubSub.ReceiveMessage is re-worked to not use ReceiveTimeout so it does not lose data when timeout
- occurres. In most cases it is recommended to use PubSub.Channel instead.
-- Dialer.KeepAlive is set to 5 minutes by default.
-
-## v6.12
-
-- ClusterClient got new option called `ClusterSlots` which allows to build cluster of normal Redis
- Servers that don't have cluster mode enabled. See
- https://godoc.org/github.com/go-redis/redis#example-NewClusterClient--ManualSetup
diff --git a/vendor/github.com/go-redis/redis/v8/Makefile b/vendor/github.com/go-redis/redis/v8/Makefile
deleted file mode 100644
index a4cfe0576..000000000
--- a/vendor/github.com/go-redis/redis/v8/Makefile
+++ /dev/null
@@ -1,35 +0,0 @@
-PACKAGE_DIRS := $(shell find . -mindepth 2 -type f -name 'go.mod' -exec dirname {} \; | sort)
-
-test: testdeps
- go test ./...
- go test ./... -short -race
- go test ./... -run=NONE -bench=. -benchmem
- env GOOS=linux GOARCH=386 go test ./...
- go vet
-
-testdeps: testdata/redis/src/redis-server
-
-bench: testdeps
- go test ./... -test.run=NONE -test.bench=. -test.benchmem
-
-.PHONY: all test testdeps bench
-
-testdata/redis:
- mkdir -p $@
- wget -qO- https://download.redis.io/releases/redis-6.2.5.tar.gz | tar xvz --strip-components=1 -C $@
-
-testdata/redis/src/redis-server: testdata/redis
- cd $< && make all
-
-fmt:
- gofmt -w -s ./
- goimports -w -local github.com/go-redis/redis ./
-
-go_mod_tidy:
- go get -u && go mod tidy
- set -e; for dir in $(PACKAGE_DIRS); do \
- echo "go mod tidy in $${dir}"; \
- (cd "$${dir}" && \
- go get -u && \
- go mod tidy); \
- done
diff --git a/vendor/github.com/go-redis/redis/v8/README.md b/vendor/github.com/go-redis/redis/v8/README.md
deleted file mode 100644
index f3b6a018c..000000000
--- a/vendor/github.com/go-redis/redis/v8/README.md
+++ /dev/null
@@ -1,175 +0,0 @@
-# Redis client for Go
-
-
-[](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc)
-[](https://redis.uptrace.dev/)
-
-go-redis is brought to you by :star: [**uptrace/uptrace**](https://github.com/uptrace/uptrace).
-Uptrace is an open source and blazingly fast **distributed tracing** backend powered by
-OpenTelemetry and ClickHouse. Give it a star as well!
-
-## Resources
-
-- [Discussions](https://github.com/go-redis/redis/discussions)
-- [Documentation](https://redis.uptrace.dev)
-- [Reference](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc)
-- [Examples](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#pkg-examples)
-- [RealWorld example app](https://github.com/uptrace/go-treemux-realworld-example-app)
-
-Other projects you may like:
-
-- [Bun](https://bun.uptrace.dev) - fast and simple SQL client for PostgreSQL, MySQL, and SQLite.
-- [BunRouter](https://bunrouter.uptrace.dev/) - fast and flexible HTTP router for Go.
-
-## Ecosystem
-
-- [Redis Mock](https://github.com/go-redis/redismock)
-- [Distributed Locks](https://github.com/bsm/redislock)
-- [Redis Cache](https://github.com/go-redis/cache)
-- [Rate limiting](https://github.com/go-redis/redis_rate)
-
-## Features
-
-- Redis 3 commands except QUIT, MONITOR, and SYNC.
-- Automatic connection pooling with
- [circuit breaker](https://en.wikipedia.org/wiki/Circuit_breaker_design_pattern) support.
-- [Pub/Sub](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#PubSub).
-- [Transactions](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client-TxPipeline).
-- [Pipeline](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client.Pipeline) and
- [TxPipeline](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client.TxPipeline).
-- [Scripting](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#Script).
-- [Timeouts](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#Options).
-- [Redis Sentinel](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewFailoverClient).
-- [Redis Cluster](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewClusterClient).
-- [Cluster of Redis Servers](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-NewClusterClient-ManualSetup)
- without using cluster mode and Redis Sentinel.
-- [Ring](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewRing).
-- [Instrumentation](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-package-Instrumentation).
-
-## Installation
-
-go-redis supports 2 last Go versions and requires a Go version with
-[modules](https://github.com/golang/go/wiki/Modules) support. So make sure to initialize a Go
-module:
-
-```shell
-go mod init github.com/my/repo
-```
-
-And then install go-redis/v8 (note _v8_ in the import; omitting it is a popular mistake):
-
-```shell
-go get github.com/go-redis/redis/v8
-```
-
-## Quickstart
-
-```go
-import (
- "context"
- "github.com/go-redis/redis/v8"
- "fmt"
-)
-
-var ctx = context.Background()
-
-func ExampleClient() {
- rdb := redis.NewClient(&redis.Options{
- Addr: "localhost:6379",
- Password: "", // no password set
- DB: 0, // use default DB
- })
-
- err := rdb.Set(ctx, "key", "value", 0).Err()
- if err != nil {
- panic(err)
- }
-
- val, err := rdb.Get(ctx, "key").Result()
- if err != nil {
- panic(err)
- }
- fmt.Println("key", val)
-
- val2, err := rdb.Get(ctx, "key2").Result()
- if err == redis.Nil {
- fmt.Println("key2 does not exist")
- } else if err != nil {
- panic(err)
- } else {
- fmt.Println("key2", val2)
- }
- // Output: key value
- // key2 does not exist
-}
-```
-
-## Look and feel
-
-Some corner cases:
-
-```go
-// SET key value EX 10 NX
-set, err := rdb.SetNX(ctx, "key", "value", 10*time.Second).Result()
-
-// SET key value keepttl NX
-set, err := rdb.SetNX(ctx, "key", "value", redis.KeepTTL).Result()
-
-// SORT list LIMIT 0 2 ASC
-vals, err := rdb.Sort(ctx, "list", &redis.Sort{Offset: 0, Count: 2, Order: "ASC"}).Result()
-
-// ZRANGEBYSCORE zset -inf +inf WITHSCORES LIMIT 0 2
-vals, err := rdb.ZRangeByScoreWithScores(ctx, "zset", &redis.ZRangeBy{
- Min: "-inf",
- Max: "+inf",
- Offset: 0,
- Count: 2,
-}).Result()
-
-// ZINTERSTORE out 2 zset1 zset2 WEIGHTS 2 3 AGGREGATE SUM
-vals, err := rdb.ZInterStore(ctx, "out", &redis.ZStore{
- Keys: []string{"zset1", "zset2"},
- Weights: []int64{2, 3}
-}).Result()
-
-// EVAL "return {KEYS[1],ARGV[1]}" 1 "key" "hello"
-vals, err := rdb.Eval(ctx, "return {KEYS[1],ARGV[1]}", []string{"key"}, "hello").Result()
-
-// custom command
-res, err := rdb.Do(ctx, "set", "key", "value").Result()
-```
-
-## Run the test
-
-go-redis will start a redis-server and run the test cases.
-
-The paths of redis-server bin file and redis config file are defined in `main_test.go`:
-
-```
-var (
- redisServerBin, _ = filepath.Abs(filepath.Join("testdata", "redis", "src", "redis-server"))
- redisServerConf, _ = filepath.Abs(filepath.Join("testdata", "redis", "redis.conf"))
-)
-```
-
-For local testing, you can change the variables to refer to your local files, or create a soft link
-to the corresponding folder for redis-server and copy the config file to `testdata/redis/`:
-
-```
-ln -s /usr/bin/redis-server ./go-redis/testdata/redis/src
-cp ./go-redis/testdata/redis.conf ./go-redis/testdata/redis/
-```
-
-Lastly, run:
-
-```
-go test
-```
-
-## Contributors
-
-Thanks to all the people who already contributed!
-
-
-
-
diff --git a/vendor/github.com/go-redis/redis/v8/command.go b/vendor/github.com/go-redis/redis/v8/command.go
deleted file mode 100644
index 4bb12a85b..000000000
--- a/vendor/github.com/go-redis/redis/v8/command.go
+++ /dev/null
@@ -1,3478 +0,0 @@
-package redis
-
-import (
- "context"
- "fmt"
- "net"
- "strconv"
- "time"
-
- "github.com/go-redis/redis/v8/internal"
- "github.com/go-redis/redis/v8/internal/hscan"
- "github.com/go-redis/redis/v8/internal/proto"
- "github.com/go-redis/redis/v8/internal/util"
-)
-
-type Cmder interface {
- Name() string
- FullName() string
- Args() []interface{}
- String() string
- stringArg(int) string
- firstKeyPos() int8
- SetFirstKeyPos(int8)
-
- readTimeout() *time.Duration
- readReply(rd *proto.Reader) error
-
- SetErr(error)
- Err() error
-}
-
-func setCmdsErr(cmds []Cmder, e error) {
- for _, cmd := range cmds {
- if cmd.Err() == nil {
- cmd.SetErr(e)
- }
- }
-}
-
-func cmdsFirstErr(cmds []Cmder) error {
- for _, cmd := range cmds {
- if err := cmd.Err(); err != nil {
- return err
- }
- }
- return nil
-}
-
-func writeCmds(wr *proto.Writer, cmds []Cmder) error {
- for _, cmd := range cmds {
- if err := writeCmd(wr, cmd); err != nil {
- return err
- }
- }
- return nil
-}
-
-func writeCmd(wr *proto.Writer, cmd Cmder) error {
- return wr.WriteArgs(cmd.Args())
-}
-
-func cmdFirstKeyPos(cmd Cmder, info *CommandInfo) int {
- if pos := cmd.firstKeyPos(); pos != 0 {
- return int(pos)
- }
-
- switch cmd.Name() {
- case "eval", "evalsha":
- if cmd.stringArg(2) != "0" {
- return 3
- }
-
- return 0
- case "publish":
- return 1
- case "memory":
- // https://github.com/redis/redis/issues/7493
- if cmd.stringArg(1) == "usage" {
- return 2
- }
- }
-
- if info != nil {
- return int(info.FirstKeyPos)
- }
- return 0
-}
-
-func cmdString(cmd Cmder, val interface{}) string {
- b := make([]byte, 0, 64)
-
- for i, arg := range cmd.Args() {
- if i > 0 {
- b = append(b, ' ')
- }
- b = internal.AppendArg(b, arg)
- }
-
- if err := cmd.Err(); err != nil {
- b = append(b, ": "...)
- b = append(b, err.Error()...)
- } else if val != nil {
- b = append(b, ": "...)
- b = internal.AppendArg(b, val)
- }
-
- return internal.String(b)
-}
-
-//------------------------------------------------------------------------------
-
-type baseCmd struct {
- ctx context.Context
- args []interface{}
- err error
- keyPos int8
-
- _readTimeout *time.Duration
-}
-
-var _ Cmder = (*Cmd)(nil)
-
-func (cmd *baseCmd) Name() string {
- if len(cmd.args) == 0 {
- return ""
- }
- // Cmd name must be lower cased.
- return internal.ToLower(cmd.stringArg(0))
-}
-
-func (cmd *baseCmd) FullName() string {
- switch name := cmd.Name(); name {
- case "cluster", "command":
- if len(cmd.args) == 1 {
- return name
- }
- if s2, ok := cmd.args[1].(string); ok {
- return name + " " + s2
- }
- return name
- default:
- return name
- }
-}
-
-func (cmd *baseCmd) Args() []interface{} {
- return cmd.args
-}
-
-func (cmd *baseCmd) stringArg(pos int) string {
- if pos < 0 || pos >= len(cmd.args) {
- return ""
- }
- arg := cmd.args[pos]
- switch v := arg.(type) {
- case string:
- return v
- default:
- // TODO: consider using appendArg
- return fmt.Sprint(v)
- }
-}
-
-func (cmd *baseCmd) firstKeyPos() int8 {
- return cmd.keyPos
-}
-
-func (cmd *baseCmd) SetFirstKeyPos(keyPos int8) {
- cmd.keyPos = keyPos
-}
-
-func (cmd *baseCmd) SetErr(e error) {
- cmd.err = e
-}
-
-func (cmd *baseCmd) Err() error {
- return cmd.err
-}
-
-func (cmd *baseCmd) readTimeout() *time.Duration {
- return cmd._readTimeout
-}
-
-func (cmd *baseCmd) setReadTimeout(d time.Duration) {
- cmd._readTimeout = &d
-}
-
-//------------------------------------------------------------------------------
-
-type Cmd struct {
- baseCmd
-
- val interface{}
-}
-
-func NewCmd(ctx context.Context, args ...interface{}) *Cmd {
- return &Cmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *Cmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *Cmd) SetVal(val interface{}) {
- cmd.val = val
-}
-
-func (cmd *Cmd) Val() interface{} {
- return cmd.val
-}
-
-func (cmd *Cmd) Result() (interface{}, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *Cmd) Text() (string, error) {
- if cmd.err != nil {
- return "", cmd.err
- }
- return toString(cmd.val)
-}
-
-func toString(val interface{}) (string, error) {
- switch val := val.(type) {
- case string:
- return val, nil
- default:
- err := fmt.Errorf("redis: unexpected type=%T for String", val)
- return "", err
- }
-}
-
-func (cmd *Cmd) Int() (int, error) {
- if cmd.err != nil {
- return 0, cmd.err
- }
- switch val := cmd.val.(type) {
- case int64:
- return int(val), nil
- case string:
- return strconv.Atoi(val)
- default:
- err := fmt.Errorf("redis: unexpected type=%T for Int", val)
- return 0, err
- }
-}
-
-func (cmd *Cmd) Int64() (int64, error) {
- if cmd.err != nil {
- return 0, cmd.err
- }
- return toInt64(cmd.val)
-}
-
-func toInt64(val interface{}) (int64, error) {
- switch val := val.(type) {
- case int64:
- return val, nil
- case string:
- return strconv.ParseInt(val, 10, 64)
- default:
- err := fmt.Errorf("redis: unexpected type=%T for Int64", val)
- return 0, err
- }
-}
-
-func (cmd *Cmd) Uint64() (uint64, error) {
- if cmd.err != nil {
- return 0, cmd.err
- }
- return toUint64(cmd.val)
-}
-
-func toUint64(val interface{}) (uint64, error) {
- switch val := val.(type) {
- case int64:
- return uint64(val), nil
- case string:
- return strconv.ParseUint(val, 10, 64)
- default:
- err := fmt.Errorf("redis: unexpected type=%T for Uint64", val)
- return 0, err
- }
-}
-
-func (cmd *Cmd) Float32() (float32, error) {
- if cmd.err != nil {
- return 0, cmd.err
- }
- return toFloat32(cmd.val)
-}
-
-func toFloat32(val interface{}) (float32, error) {
- switch val := val.(type) {
- case int64:
- return float32(val), nil
- case string:
- f, err := strconv.ParseFloat(val, 32)
- if err != nil {
- return 0, err
- }
- return float32(f), nil
- default:
- err := fmt.Errorf("redis: unexpected type=%T for Float32", val)
- return 0, err
- }
-}
-
-func (cmd *Cmd) Float64() (float64, error) {
- if cmd.err != nil {
- return 0, cmd.err
- }
- return toFloat64(cmd.val)
-}
-
-func toFloat64(val interface{}) (float64, error) {
- switch val := val.(type) {
- case int64:
- return float64(val), nil
- case string:
- return strconv.ParseFloat(val, 64)
- default:
- err := fmt.Errorf("redis: unexpected type=%T for Float64", val)
- return 0, err
- }
-}
-
-func (cmd *Cmd) Bool() (bool, error) {
- if cmd.err != nil {
- return false, cmd.err
- }
- return toBool(cmd.val)
-}
-
-func toBool(val interface{}) (bool, error) {
- switch val := val.(type) {
- case int64:
- return val != 0, nil
- case string:
- return strconv.ParseBool(val)
- default:
- err := fmt.Errorf("redis: unexpected type=%T for Bool", val)
- return false, err
- }
-}
-
-func (cmd *Cmd) Slice() ([]interface{}, error) {
- if cmd.err != nil {
- return nil, cmd.err
- }
- switch val := cmd.val.(type) {
- case []interface{}:
- return val, nil
- default:
- return nil, fmt.Errorf("redis: unexpected type=%T for Slice", val)
- }
-}
-
-func (cmd *Cmd) StringSlice() ([]string, error) {
- slice, err := cmd.Slice()
- if err != nil {
- return nil, err
- }
-
- ss := make([]string, len(slice))
- for i, iface := range slice {
- val, err := toString(iface)
- if err != nil {
- return nil, err
- }
- ss[i] = val
- }
- return ss, nil
-}
-
-func (cmd *Cmd) Int64Slice() ([]int64, error) {
- slice, err := cmd.Slice()
- if err != nil {
- return nil, err
- }
-
- nums := make([]int64, len(slice))
- for i, iface := range slice {
- val, err := toInt64(iface)
- if err != nil {
- return nil, err
- }
- nums[i] = val
- }
- return nums, nil
-}
-
-func (cmd *Cmd) Uint64Slice() ([]uint64, error) {
- slice, err := cmd.Slice()
- if err != nil {
- return nil, err
- }
-
- nums := make([]uint64, len(slice))
- for i, iface := range slice {
- val, err := toUint64(iface)
- if err != nil {
- return nil, err
- }
- nums[i] = val
- }
- return nums, nil
-}
-
-func (cmd *Cmd) Float32Slice() ([]float32, error) {
- slice, err := cmd.Slice()
- if err != nil {
- return nil, err
- }
-
- floats := make([]float32, len(slice))
- for i, iface := range slice {
- val, err := toFloat32(iface)
- if err != nil {
- return nil, err
- }
- floats[i] = val
- }
- return floats, nil
-}
-
-func (cmd *Cmd) Float64Slice() ([]float64, error) {
- slice, err := cmd.Slice()
- if err != nil {
- return nil, err
- }
-
- floats := make([]float64, len(slice))
- for i, iface := range slice {
- val, err := toFloat64(iface)
- if err != nil {
- return nil, err
- }
- floats[i] = val
- }
- return floats, nil
-}
-
-func (cmd *Cmd) BoolSlice() ([]bool, error) {
- slice, err := cmd.Slice()
- if err != nil {
- return nil, err
- }
-
- bools := make([]bool, len(slice))
- for i, iface := range slice {
- val, err := toBool(iface)
- if err != nil {
- return nil, err
- }
- bools[i] = val
- }
- return bools, nil
-}
-
-func (cmd *Cmd) readReply(rd *proto.Reader) (err error) {
- cmd.val, err = rd.ReadReply(sliceParser)
- return err
-}
-
-// sliceParser implements proto.MultiBulkParse.
-func sliceParser(rd *proto.Reader, n int64) (interface{}, error) {
- vals := make([]interface{}, n)
- for i := 0; i < len(vals); i++ {
- v, err := rd.ReadReply(sliceParser)
- if err != nil {
- if err == Nil {
- vals[i] = nil
- continue
- }
- if err, ok := err.(proto.RedisError); ok {
- vals[i] = err
- continue
- }
- return nil, err
- }
- vals[i] = v
- }
- return vals, nil
-}
-
-//------------------------------------------------------------------------------
-
-type SliceCmd struct {
- baseCmd
-
- val []interface{}
-}
-
-var _ Cmder = (*SliceCmd)(nil)
-
-func NewSliceCmd(ctx context.Context, args ...interface{}) *SliceCmd {
- return &SliceCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *SliceCmd) SetVal(val []interface{}) {
- cmd.val = val
-}
-
-func (cmd *SliceCmd) Val() []interface{} {
- return cmd.val
-}
-
-func (cmd *SliceCmd) Result() ([]interface{}, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *SliceCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-// Scan scans the results from the map into a destination struct. The map keys
-// are matched in the Redis struct fields by the `redis:"field"` tag.
-func (cmd *SliceCmd) Scan(dst interface{}) error {
- if cmd.err != nil {
- return cmd.err
- }
-
- // Pass the list of keys and values.
- // Skip the first two args for: HMGET key
- var args []interface{}
- if cmd.args[0] == "hmget" {
- args = cmd.args[2:]
- } else {
- // Otherwise, it's: MGET field field ...
- args = cmd.args[1:]
- }
-
- return hscan.Scan(dst, args, cmd.val)
-}
-
-func (cmd *SliceCmd) readReply(rd *proto.Reader) error {
- v, err := rd.ReadArrayReply(sliceParser)
- if err != nil {
- return err
- }
- cmd.val = v.([]interface{})
- return nil
-}
-
-//------------------------------------------------------------------------------
-
-type StatusCmd struct {
- baseCmd
-
- val string
-}
-
-var _ Cmder = (*StatusCmd)(nil)
-
-func NewStatusCmd(ctx context.Context, args ...interface{}) *StatusCmd {
- return &StatusCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *StatusCmd) SetVal(val string) {
- cmd.val = val
-}
-
-func (cmd *StatusCmd) Val() string {
- return cmd.val
-}
-
-func (cmd *StatusCmd) Result() (string, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *StatusCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *StatusCmd) readReply(rd *proto.Reader) (err error) {
- cmd.val, err = rd.ReadString()
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type IntCmd struct {
- baseCmd
-
- val int64
-}
-
-var _ Cmder = (*IntCmd)(nil)
-
-func NewIntCmd(ctx context.Context, args ...interface{}) *IntCmd {
- return &IntCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *IntCmd) SetVal(val int64) {
- cmd.val = val
-}
-
-func (cmd *IntCmd) Val() int64 {
- return cmd.val
-}
-
-func (cmd *IntCmd) Result() (int64, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *IntCmd) Uint64() (uint64, error) {
- return uint64(cmd.val), cmd.err
-}
-
-func (cmd *IntCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *IntCmd) readReply(rd *proto.Reader) (err error) {
- cmd.val, err = rd.ReadIntReply()
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type IntSliceCmd struct {
- baseCmd
-
- val []int64
-}
-
-var _ Cmder = (*IntSliceCmd)(nil)
-
-func NewIntSliceCmd(ctx context.Context, args ...interface{}) *IntSliceCmd {
- return &IntSliceCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *IntSliceCmd) SetVal(val []int64) {
- cmd.val = val
-}
-
-func (cmd *IntSliceCmd) Val() []int64 {
- return cmd.val
-}
-
-func (cmd *IntSliceCmd) Result() ([]int64, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *IntSliceCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *IntSliceCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.val = make([]int64, n)
- for i := 0; i < len(cmd.val); i++ {
- num, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
- cmd.val[i] = num
- }
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type DurationCmd struct {
- baseCmd
-
- val time.Duration
- precision time.Duration
-}
-
-var _ Cmder = (*DurationCmd)(nil)
-
-func NewDurationCmd(ctx context.Context, precision time.Duration, args ...interface{}) *DurationCmd {
- return &DurationCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- precision: precision,
- }
-}
-
-func (cmd *DurationCmd) SetVal(val time.Duration) {
- cmd.val = val
-}
-
-func (cmd *DurationCmd) Val() time.Duration {
- return cmd.val
-}
-
-func (cmd *DurationCmd) Result() (time.Duration, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *DurationCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *DurationCmd) readReply(rd *proto.Reader) error {
- n, err := rd.ReadIntReply()
- if err != nil {
- return err
- }
- switch n {
- // -2 if the key does not exist
- // -1 if the key exists but has no associated expire
- case -2, -1:
- cmd.val = time.Duration(n)
- default:
- cmd.val = time.Duration(n) * cmd.precision
- }
- return nil
-}
-
-//------------------------------------------------------------------------------
-
-type TimeCmd struct {
- baseCmd
-
- val time.Time
-}
-
-var _ Cmder = (*TimeCmd)(nil)
-
-func NewTimeCmd(ctx context.Context, args ...interface{}) *TimeCmd {
- return &TimeCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *TimeCmd) SetVal(val time.Time) {
- cmd.val = val
-}
-
-func (cmd *TimeCmd) Val() time.Time {
- return cmd.val
-}
-
-func (cmd *TimeCmd) Result() (time.Time, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *TimeCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *TimeCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- if n != 2 {
- return nil, fmt.Errorf("got %d elements, expected 2", n)
- }
-
- sec, err := rd.ReadInt()
- if err != nil {
- return nil, err
- }
-
- microsec, err := rd.ReadInt()
- if err != nil {
- return nil, err
- }
-
- cmd.val = time.Unix(sec, microsec*1000)
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type BoolCmd struct {
- baseCmd
-
- val bool
-}
-
-var _ Cmder = (*BoolCmd)(nil)
-
-func NewBoolCmd(ctx context.Context, args ...interface{}) *BoolCmd {
- return &BoolCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *BoolCmd) SetVal(val bool) {
- cmd.val = val
-}
-
-func (cmd *BoolCmd) Val() bool {
- return cmd.val
-}
-
-func (cmd *BoolCmd) Result() (bool, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *BoolCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *BoolCmd) readReply(rd *proto.Reader) error {
- v, err := rd.ReadReply(nil)
- // `SET key value NX` returns nil when key already exists. But
- // `SETNX key value` returns bool (0/1). So convert nil to bool.
- if err == Nil {
- cmd.val = false
- return nil
- }
- if err != nil {
- return err
- }
- switch v := v.(type) {
- case int64:
- cmd.val = v == 1
- return nil
- case string:
- cmd.val = v == "OK"
- return nil
- default:
- return fmt.Errorf("got %T, wanted int64 or string", v)
- }
-}
-
-//------------------------------------------------------------------------------
-
-type StringCmd struct {
- baseCmd
-
- val string
-}
-
-var _ Cmder = (*StringCmd)(nil)
-
-func NewStringCmd(ctx context.Context, args ...interface{}) *StringCmd {
- return &StringCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *StringCmd) SetVal(val string) {
- cmd.val = val
-}
-
-func (cmd *StringCmd) Val() string {
- return cmd.val
-}
-
-func (cmd *StringCmd) Result() (string, error) {
- return cmd.Val(), cmd.err
-}
-
-func (cmd *StringCmd) Bytes() ([]byte, error) {
- return util.StringToBytes(cmd.val), cmd.err
-}
-
-func (cmd *StringCmd) Bool() (bool, error) {
- if cmd.err != nil {
- return false, cmd.err
- }
- return strconv.ParseBool(cmd.val)
-}
-
-func (cmd *StringCmd) Int() (int, error) {
- if cmd.err != nil {
- return 0, cmd.err
- }
- return strconv.Atoi(cmd.Val())
-}
-
-func (cmd *StringCmd) Int64() (int64, error) {
- if cmd.err != nil {
- return 0, cmd.err
- }
- return strconv.ParseInt(cmd.Val(), 10, 64)
-}
-
-func (cmd *StringCmd) Uint64() (uint64, error) {
- if cmd.err != nil {
- return 0, cmd.err
- }
- return strconv.ParseUint(cmd.Val(), 10, 64)
-}
-
-func (cmd *StringCmd) Float32() (float32, error) {
- if cmd.err != nil {
- return 0, cmd.err
- }
- f, err := strconv.ParseFloat(cmd.Val(), 32)
- if err != nil {
- return 0, err
- }
- return float32(f), nil
-}
-
-func (cmd *StringCmd) Float64() (float64, error) {
- if cmd.err != nil {
- return 0, cmd.err
- }
- return strconv.ParseFloat(cmd.Val(), 64)
-}
-
-func (cmd *StringCmd) Time() (time.Time, error) {
- if cmd.err != nil {
- return time.Time{}, cmd.err
- }
- return time.Parse(time.RFC3339Nano, cmd.Val())
-}
-
-func (cmd *StringCmd) Scan(val interface{}) error {
- if cmd.err != nil {
- return cmd.err
- }
- return proto.Scan([]byte(cmd.val), val)
-}
-
-func (cmd *StringCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *StringCmd) readReply(rd *proto.Reader) (err error) {
- cmd.val, err = rd.ReadString()
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type FloatCmd struct {
- baseCmd
-
- val float64
-}
-
-var _ Cmder = (*FloatCmd)(nil)
-
-func NewFloatCmd(ctx context.Context, args ...interface{}) *FloatCmd {
- return &FloatCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *FloatCmd) SetVal(val float64) {
- cmd.val = val
-}
-
-func (cmd *FloatCmd) Val() float64 {
- return cmd.val
-}
-
-func (cmd *FloatCmd) Result() (float64, error) {
- return cmd.Val(), cmd.Err()
-}
-
-func (cmd *FloatCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *FloatCmd) readReply(rd *proto.Reader) (err error) {
- cmd.val, err = rd.ReadFloatReply()
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type FloatSliceCmd struct {
- baseCmd
-
- val []float64
-}
-
-var _ Cmder = (*FloatSliceCmd)(nil)
-
-func NewFloatSliceCmd(ctx context.Context, args ...interface{}) *FloatSliceCmd {
- return &FloatSliceCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *FloatSliceCmd) SetVal(val []float64) {
- cmd.val = val
-}
-
-func (cmd *FloatSliceCmd) Val() []float64 {
- return cmd.val
-}
-
-func (cmd *FloatSliceCmd) Result() ([]float64, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *FloatSliceCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *FloatSliceCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.val = make([]float64, n)
- for i := 0; i < len(cmd.val); i++ {
- switch num, err := rd.ReadFloatReply(); {
- case err == Nil:
- cmd.val[i] = 0
- case err != nil:
- return nil, err
- default:
- cmd.val[i] = num
- }
- }
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type StringSliceCmd struct {
- baseCmd
-
- val []string
-}
-
-var _ Cmder = (*StringSliceCmd)(nil)
-
-func NewStringSliceCmd(ctx context.Context, args ...interface{}) *StringSliceCmd {
- return &StringSliceCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *StringSliceCmd) SetVal(val []string) {
- cmd.val = val
-}
-
-func (cmd *StringSliceCmd) Val() []string {
- return cmd.val
-}
-
-func (cmd *StringSliceCmd) Result() ([]string, error) {
- return cmd.Val(), cmd.Err()
-}
-
-func (cmd *StringSliceCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *StringSliceCmd) ScanSlice(container interface{}) error {
- return proto.ScanSlice(cmd.Val(), container)
-}
-
-func (cmd *StringSliceCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.val = make([]string, n)
- for i := 0; i < len(cmd.val); i++ {
- switch s, err := rd.ReadString(); {
- case err == Nil:
- cmd.val[i] = ""
- case err != nil:
- return nil, err
- default:
- cmd.val[i] = s
- }
- }
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type BoolSliceCmd struct {
- baseCmd
-
- val []bool
-}
-
-var _ Cmder = (*BoolSliceCmd)(nil)
-
-func NewBoolSliceCmd(ctx context.Context, args ...interface{}) *BoolSliceCmd {
- return &BoolSliceCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *BoolSliceCmd) SetVal(val []bool) {
- cmd.val = val
-}
-
-func (cmd *BoolSliceCmd) Val() []bool {
- return cmd.val
-}
-
-func (cmd *BoolSliceCmd) Result() ([]bool, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *BoolSliceCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *BoolSliceCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.val = make([]bool, n)
- for i := 0; i < len(cmd.val); i++ {
- n, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
- cmd.val[i] = n == 1
- }
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type StringStringMapCmd struct {
- baseCmd
-
- val map[string]string
-}
-
-var _ Cmder = (*StringStringMapCmd)(nil)
-
-func NewStringStringMapCmd(ctx context.Context, args ...interface{}) *StringStringMapCmd {
- return &StringStringMapCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *StringStringMapCmd) SetVal(val map[string]string) {
- cmd.val = val
-}
-
-func (cmd *StringStringMapCmd) Val() map[string]string {
- return cmd.val
-}
-
-func (cmd *StringStringMapCmd) Result() (map[string]string, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *StringStringMapCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-// Scan scans the results from the map into a destination struct. The map keys
-// are matched in the Redis struct fields by the `redis:"field"` tag.
-func (cmd *StringStringMapCmd) Scan(dest interface{}) error {
- if cmd.err != nil {
- return cmd.err
- }
-
- strct, err := hscan.Struct(dest)
- if err != nil {
- return err
- }
-
- for k, v := range cmd.val {
- if err := strct.Scan(k, v); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (cmd *StringStringMapCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.val = make(map[string]string, n/2)
- for i := int64(0); i < n; i += 2 {
- key, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- value, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- cmd.val[key] = value
- }
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type StringIntMapCmd struct {
- baseCmd
-
- val map[string]int64
-}
-
-var _ Cmder = (*StringIntMapCmd)(nil)
-
-func NewStringIntMapCmd(ctx context.Context, args ...interface{}) *StringIntMapCmd {
- return &StringIntMapCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *StringIntMapCmd) SetVal(val map[string]int64) {
- cmd.val = val
-}
-
-func (cmd *StringIntMapCmd) Val() map[string]int64 {
- return cmd.val
-}
-
-func (cmd *StringIntMapCmd) Result() (map[string]int64, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *StringIntMapCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *StringIntMapCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.val = make(map[string]int64, n/2)
- for i := int64(0); i < n; i += 2 {
- key, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- n, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
-
- cmd.val[key] = n
- }
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type StringStructMapCmd struct {
- baseCmd
-
- val map[string]struct{}
-}
-
-var _ Cmder = (*StringStructMapCmd)(nil)
-
-func NewStringStructMapCmd(ctx context.Context, args ...interface{}) *StringStructMapCmd {
- return &StringStructMapCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *StringStructMapCmd) SetVal(val map[string]struct{}) {
- cmd.val = val
-}
-
-func (cmd *StringStructMapCmd) Val() map[string]struct{} {
- return cmd.val
-}
-
-func (cmd *StringStructMapCmd) Result() (map[string]struct{}, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *StringStructMapCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *StringStructMapCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.val = make(map[string]struct{}, n)
- for i := int64(0); i < n; i++ {
- key, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
- cmd.val[key] = struct{}{}
- }
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type XMessage struct {
- ID string
- Values map[string]interface{}
-}
-
-type XMessageSliceCmd struct {
- baseCmd
-
- val []XMessage
-}
-
-var _ Cmder = (*XMessageSliceCmd)(nil)
-
-func NewXMessageSliceCmd(ctx context.Context, args ...interface{}) *XMessageSliceCmd {
- return &XMessageSliceCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *XMessageSliceCmd) SetVal(val []XMessage) {
- cmd.val = val
-}
-
-func (cmd *XMessageSliceCmd) Val() []XMessage {
- return cmd.val
-}
-
-func (cmd *XMessageSliceCmd) Result() ([]XMessage, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *XMessageSliceCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *XMessageSliceCmd) readReply(rd *proto.Reader) error {
- var err error
- cmd.val, err = readXMessageSlice(rd)
- return err
-}
-
-func readXMessageSlice(rd *proto.Reader) ([]XMessage, error) {
- n, err := rd.ReadArrayLen()
- if err != nil {
- return nil, err
- }
-
- msgs := make([]XMessage, n)
- for i := 0; i < n; i++ {
- var err error
- msgs[i], err = readXMessage(rd)
- if err != nil {
- return nil, err
- }
- }
- return msgs, nil
-}
-
-func readXMessage(rd *proto.Reader) (XMessage, error) {
- n, err := rd.ReadArrayLen()
- if err != nil {
- return XMessage{}, err
- }
- if n != 2 {
- return XMessage{}, fmt.Errorf("got %d, wanted 2", n)
- }
-
- id, err := rd.ReadString()
- if err != nil {
- return XMessage{}, err
- }
-
- var values map[string]interface{}
-
- v, err := rd.ReadArrayReply(stringInterfaceMapParser)
- if err != nil {
- if err != proto.Nil {
- return XMessage{}, err
- }
- } else {
- values = v.(map[string]interface{})
- }
-
- return XMessage{
- ID: id,
- Values: values,
- }, nil
-}
-
-// stringInterfaceMapParser implements proto.MultiBulkParse.
-func stringInterfaceMapParser(rd *proto.Reader, n int64) (interface{}, error) {
- m := make(map[string]interface{}, n/2)
- for i := int64(0); i < n; i += 2 {
- key, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- value, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- m[key] = value
- }
- return m, nil
-}
-
-//------------------------------------------------------------------------------
-
-type XStream struct {
- Stream string
- Messages []XMessage
-}
-
-type XStreamSliceCmd struct {
- baseCmd
-
- val []XStream
-}
-
-var _ Cmder = (*XStreamSliceCmd)(nil)
-
-func NewXStreamSliceCmd(ctx context.Context, args ...interface{}) *XStreamSliceCmd {
- return &XStreamSliceCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *XStreamSliceCmd) SetVal(val []XStream) {
- cmd.val = val
-}
-
-func (cmd *XStreamSliceCmd) Val() []XStream {
- return cmd.val
-}
-
-func (cmd *XStreamSliceCmd) Result() ([]XStream, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *XStreamSliceCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *XStreamSliceCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.val = make([]XStream, n)
- for i := 0; i < len(cmd.val); i++ {
- i := i
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- if n != 2 {
- return nil, fmt.Errorf("got %d, wanted 2", n)
- }
-
- stream, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- msgs, err := readXMessageSlice(rd)
- if err != nil {
- return nil, err
- }
-
- cmd.val[i] = XStream{
- Stream: stream,
- Messages: msgs,
- }
- return nil, nil
- })
- if err != nil {
- return nil, err
- }
- }
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type XPending struct {
- Count int64
- Lower string
- Higher string
- Consumers map[string]int64
-}
-
-type XPendingCmd struct {
- baseCmd
- val *XPending
-}
-
-var _ Cmder = (*XPendingCmd)(nil)
-
-func NewXPendingCmd(ctx context.Context, args ...interface{}) *XPendingCmd {
- return &XPendingCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *XPendingCmd) SetVal(val *XPending) {
- cmd.val = val
-}
-
-func (cmd *XPendingCmd) Val() *XPending {
- return cmd.val
-}
-
-func (cmd *XPendingCmd) Result() (*XPending, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *XPendingCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *XPendingCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- if n != 4 {
- return nil, fmt.Errorf("got %d, wanted 4", n)
- }
-
- count, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
-
- lower, err := rd.ReadString()
- if err != nil && err != Nil {
- return nil, err
- }
-
- higher, err := rd.ReadString()
- if err != nil && err != Nil {
- return nil, err
- }
-
- cmd.val = &XPending{
- Count: count,
- Lower: lower,
- Higher: higher,
- }
- _, err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- for i := int64(0); i < n; i++ {
- _, err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- if n != 2 {
- return nil, fmt.Errorf("got %d, wanted 2", n)
- }
-
- consumerName, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- consumerPending, err := rd.ReadInt()
- if err != nil {
- return nil, err
- }
-
- if cmd.val.Consumers == nil {
- cmd.val.Consumers = make(map[string]int64)
- }
- cmd.val.Consumers[consumerName] = consumerPending
-
- return nil, nil
- })
- if err != nil {
- return nil, err
- }
- }
- return nil, nil
- })
- if err != nil && err != Nil {
- return nil, err
- }
-
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type XPendingExt struct {
- ID string
- Consumer string
- Idle time.Duration
- RetryCount int64
-}
-
-type XPendingExtCmd struct {
- baseCmd
- val []XPendingExt
-}
-
-var _ Cmder = (*XPendingExtCmd)(nil)
-
-func NewXPendingExtCmd(ctx context.Context, args ...interface{}) *XPendingExtCmd {
- return &XPendingExtCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *XPendingExtCmd) SetVal(val []XPendingExt) {
- cmd.val = val
-}
-
-func (cmd *XPendingExtCmd) Val() []XPendingExt {
- return cmd.val
-}
-
-func (cmd *XPendingExtCmd) Result() ([]XPendingExt, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *XPendingExtCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *XPendingExtCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.val = make([]XPendingExt, 0, n)
- for i := int64(0); i < n; i++ {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- if n != 4 {
- return nil, fmt.Errorf("got %d, wanted 4", n)
- }
-
- id, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- consumer, err := rd.ReadString()
- if err != nil && err != Nil {
- return nil, err
- }
-
- idle, err := rd.ReadIntReply()
- if err != nil && err != Nil {
- return nil, err
- }
-
- retryCount, err := rd.ReadIntReply()
- if err != nil && err != Nil {
- return nil, err
- }
-
- cmd.val = append(cmd.val, XPendingExt{
- ID: id,
- Consumer: consumer,
- Idle: time.Duration(idle) * time.Millisecond,
- RetryCount: retryCount,
- })
- return nil, nil
- })
- if err != nil {
- return nil, err
- }
- }
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type XAutoClaimCmd struct {
- baseCmd
-
- start string
- val []XMessage
-}
-
-var _ Cmder = (*XAutoClaimCmd)(nil)
-
-func NewXAutoClaimCmd(ctx context.Context, args ...interface{}) *XAutoClaimCmd {
- return &XAutoClaimCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *XAutoClaimCmd) SetVal(val []XMessage, start string) {
- cmd.val = val
- cmd.start = start
-}
-
-func (cmd *XAutoClaimCmd) Val() (messages []XMessage, start string) {
- return cmd.val, cmd.start
-}
-
-func (cmd *XAutoClaimCmd) Result() (messages []XMessage, start string, err error) {
- return cmd.val, cmd.start, cmd.err
-}
-
-func (cmd *XAutoClaimCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *XAutoClaimCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- if n != 2 {
- return nil, fmt.Errorf("got %d, wanted 2", n)
- }
- var err error
-
- cmd.start, err = rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- cmd.val, err = readXMessageSlice(rd)
- if err != nil {
- return nil, err
- }
-
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type XAutoClaimJustIDCmd struct {
- baseCmd
-
- start string
- val []string
-}
-
-var _ Cmder = (*XAutoClaimJustIDCmd)(nil)
-
-func NewXAutoClaimJustIDCmd(ctx context.Context, args ...interface{}) *XAutoClaimJustIDCmd {
- return &XAutoClaimJustIDCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *XAutoClaimJustIDCmd) SetVal(val []string, start string) {
- cmd.val = val
- cmd.start = start
-}
-
-func (cmd *XAutoClaimJustIDCmd) Val() (ids []string, start string) {
- return cmd.val, cmd.start
-}
-
-func (cmd *XAutoClaimJustIDCmd) Result() (ids []string, start string, err error) {
- return cmd.val, cmd.start, cmd.err
-}
-
-func (cmd *XAutoClaimJustIDCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *XAutoClaimJustIDCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- if n != 2 {
- return nil, fmt.Errorf("got %d, wanted 2", n)
- }
- var err error
-
- cmd.start, err = rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- nn, err := rd.ReadArrayLen()
- if err != nil {
- return nil, err
- }
-
- cmd.val = make([]string, nn)
- for i := 0; i < nn; i++ {
- cmd.val[i], err = rd.ReadString()
- if err != nil {
- return nil, err
- }
- }
-
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type XInfoConsumersCmd struct {
- baseCmd
- val []XInfoConsumer
-}
-
-type XInfoConsumer struct {
- Name string
- Pending int64
- Idle int64
-}
-
-var _ Cmder = (*XInfoConsumersCmd)(nil)
-
-func NewXInfoConsumersCmd(ctx context.Context, stream string, group string) *XInfoConsumersCmd {
- return &XInfoConsumersCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: []interface{}{"xinfo", "consumers", stream, group},
- },
- }
-}
-
-func (cmd *XInfoConsumersCmd) SetVal(val []XInfoConsumer) {
- cmd.val = val
-}
-
-func (cmd *XInfoConsumersCmd) Val() []XInfoConsumer {
- return cmd.val
-}
-
-func (cmd *XInfoConsumersCmd) Result() ([]XInfoConsumer, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *XInfoConsumersCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *XInfoConsumersCmd) readReply(rd *proto.Reader) error {
- n, err := rd.ReadArrayLen()
- if err != nil {
- return err
- }
-
- cmd.val = make([]XInfoConsumer, n)
-
- for i := 0; i < n; i++ {
- cmd.val[i], err = readXConsumerInfo(rd)
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func readXConsumerInfo(rd *proto.Reader) (XInfoConsumer, error) {
- var consumer XInfoConsumer
-
- n, err := rd.ReadArrayLen()
- if err != nil {
- return consumer, err
- }
- if n != 6 {
- return consumer, fmt.Errorf("redis: got %d elements in XINFO CONSUMERS reply, wanted 6", n)
- }
-
- for i := 0; i < 3; i++ {
- key, err := rd.ReadString()
- if err != nil {
- return consumer, err
- }
-
- val, err := rd.ReadString()
- if err != nil {
- return consumer, err
- }
-
- switch key {
- case "name":
- consumer.Name = val
- case "pending":
- consumer.Pending, err = strconv.ParseInt(val, 0, 64)
- if err != nil {
- return consumer, err
- }
- case "idle":
- consumer.Idle, err = strconv.ParseInt(val, 0, 64)
- if err != nil {
- return consumer, err
- }
- default:
- return consumer, fmt.Errorf("redis: unexpected content %s in XINFO CONSUMERS reply", key)
- }
- }
-
- return consumer, nil
-}
-
-//------------------------------------------------------------------------------
-
-type XInfoGroupsCmd struct {
- baseCmd
- val []XInfoGroup
-}
-
-type XInfoGroup struct {
- Name string
- Consumers int64
- Pending int64
- LastDeliveredID string
-}
-
-var _ Cmder = (*XInfoGroupsCmd)(nil)
-
-func NewXInfoGroupsCmd(ctx context.Context, stream string) *XInfoGroupsCmd {
- return &XInfoGroupsCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: []interface{}{"xinfo", "groups", stream},
- },
- }
-}
-
-func (cmd *XInfoGroupsCmd) SetVal(val []XInfoGroup) {
- cmd.val = val
-}
-
-func (cmd *XInfoGroupsCmd) Val() []XInfoGroup {
- return cmd.val
-}
-
-func (cmd *XInfoGroupsCmd) Result() ([]XInfoGroup, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *XInfoGroupsCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *XInfoGroupsCmd) readReply(rd *proto.Reader) error {
- n, err := rd.ReadArrayLen()
- if err != nil {
- return err
- }
-
- cmd.val = make([]XInfoGroup, n)
-
- for i := 0; i < n; i++ {
- cmd.val[i], err = readXGroupInfo(rd)
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func readXGroupInfo(rd *proto.Reader) (XInfoGroup, error) {
- var group XInfoGroup
-
- n, err := rd.ReadArrayLen()
- if err != nil {
- return group, err
- }
- if n != 8 {
- return group, fmt.Errorf("redis: got %d elements in XINFO GROUPS reply, wanted 8", n)
- }
-
- for i := 0; i < 4; i++ {
- key, err := rd.ReadString()
- if err != nil {
- return group, err
- }
-
- val, err := rd.ReadString()
- if err != nil {
- return group, err
- }
-
- switch key {
- case "name":
- group.Name = val
- case "consumers":
- group.Consumers, err = strconv.ParseInt(val, 0, 64)
- if err != nil {
- return group, err
- }
- case "pending":
- group.Pending, err = strconv.ParseInt(val, 0, 64)
- if err != nil {
- return group, err
- }
- case "last-delivered-id":
- group.LastDeliveredID = val
- default:
- return group, fmt.Errorf("redis: unexpected content %s in XINFO GROUPS reply", key)
- }
- }
-
- return group, nil
-}
-
-//------------------------------------------------------------------------------
-
-type XInfoStreamCmd struct {
- baseCmd
- val *XInfoStream
-}
-
-type XInfoStream struct {
- Length int64
- RadixTreeKeys int64
- RadixTreeNodes int64
- Groups int64
- LastGeneratedID string
- FirstEntry XMessage
- LastEntry XMessage
-}
-
-var _ Cmder = (*XInfoStreamCmd)(nil)
-
-func NewXInfoStreamCmd(ctx context.Context, stream string) *XInfoStreamCmd {
- return &XInfoStreamCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: []interface{}{"xinfo", "stream", stream},
- },
- }
-}
-
-func (cmd *XInfoStreamCmd) SetVal(val *XInfoStream) {
- cmd.val = val
-}
-
-func (cmd *XInfoStreamCmd) Val() *XInfoStream {
- return cmd.val
-}
-
-func (cmd *XInfoStreamCmd) Result() (*XInfoStream, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *XInfoStreamCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *XInfoStreamCmd) readReply(rd *proto.Reader) error {
- v, err := rd.ReadReply(xStreamInfoParser)
- if err != nil {
- return err
- }
- cmd.val = v.(*XInfoStream)
- return nil
-}
-
-func xStreamInfoParser(rd *proto.Reader, n int64) (interface{}, error) {
- if n != 14 {
- return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM reply,"+
- "wanted 14", n)
- }
- var info XInfoStream
- for i := 0; i < 7; i++ {
- key, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
- switch key {
- case "length":
- info.Length, err = rd.ReadIntReply()
- case "radix-tree-keys":
- info.RadixTreeKeys, err = rd.ReadIntReply()
- case "radix-tree-nodes":
- info.RadixTreeNodes, err = rd.ReadIntReply()
- case "groups":
- info.Groups, err = rd.ReadIntReply()
- case "last-generated-id":
- info.LastGeneratedID, err = rd.ReadString()
- case "first-entry":
- info.FirstEntry, err = readXMessage(rd)
- if err == Nil {
- err = nil
- }
- case "last-entry":
- info.LastEntry, err = readXMessage(rd)
- if err == Nil {
- err = nil
- }
- default:
- return nil, fmt.Errorf("redis: unexpected content %s "+
- "in XINFO STREAM reply", key)
- }
- if err != nil {
- return nil, err
- }
- }
- return &info, nil
-}
-
-//------------------------------------------------------------------------------
-
-type XInfoStreamFullCmd struct {
- baseCmd
- val *XInfoStreamFull
-}
-
-type XInfoStreamFull struct {
- Length int64
- RadixTreeKeys int64
- RadixTreeNodes int64
- LastGeneratedID string
- Entries []XMessage
- Groups []XInfoStreamGroup
-}
-
-type XInfoStreamGroup struct {
- Name string
- LastDeliveredID string
- PelCount int64
- Pending []XInfoStreamGroupPending
- Consumers []XInfoStreamConsumer
-}
-
-type XInfoStreamGroupPending struct {
- ID string
- Consumer string
- DeliveryTime time.Time
- DeliveryCount int64
-}
-
-type XInfoStreamConsumer struct {
- Name string
- SeenTime time.Time
- PelCount int64
- Pending []XInfoStreamConsumerPending
-}
-
-type XInfoStreamConsumerPending struct {
- ID string
- DeliveryTime time.Time
- DeliveryCount int64
-}
-
-var _ Cmder = (*XInfoStreamFullCmd)(nil)
-
-func NewXInfoStreamFullCmd(ctx context.Context, args ...interface{}) *XInfoStreamFullCmd {
- return &XInfoStreamFullCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *XInfoStreamFullCmd) SetVal(val *XInfoStreamFull) {
- cmd.val = val
-}
-
-func (cmd *XInfoStreamFullCmd) Val() *XInfoStreamFull {
- return cmd.val
-}
-
-func (cmd *XInfoStreamFullCmd) Result() (*XInfoStreamFull, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *XInfoStreamFullCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *XInfoStreamFullCmd) readReply(rd *proto.Reader) error {
- n, err := rd.ReadArrayLen()
- if err != nil {
- return err
- }
- if n != 12 {
- return fmt.Errorf("redis: got %d elements in XINFO STREAM FULL reply,"+
- "wanted 12", n)
- }
-
- cmd.val = &XInfoStreamFull{}
-
- for i := 0; i < 6; i++ {
- key, err := rd.ReadString()
- if err != nil {
- return err
- }
-
- switch key {
- case "length":
- cmd.val.Length, err = rd.ReadIntReply()
- case "radix-tree-keys":
- cmd.val.RadixTreeKeys, err = rd.ReadIntReply()
- case "radix-tree-nodes":
- cmd.val.RadixTreeNodes, err = rd.ReadIntReply()
- case "last-generated-id":
- cmd.val.LastGeneratedID, err = rd.ReadString()
- case "entries":
- cmd.val.Entries, err = readXMessageSlice(rd)
- case "groups":
- cmd.val.Groups, err = readStreamGroups(rd)
- default:
- return fmt.Errorf("redis: unexpected content %s "+
- "in XINFO STREAM reply", key)
- }
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-func readStreamGroups(rd *proto.Reader) ([]XInfoStreamGroup, error) {
- n, err := rd.ReadArrayLen()
- if err != nil {
- return nil, err
- }
- groups := make([]XInfoStreamGroup, 0, n)
- for i := 0; i < n; i++ {
- nn, err := rd.ReadArrayLen()
- if err != nil {
- return nil, err
- }
- if nn != 10 {
- return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM FULL reply,"+
- "wanted 10", nn)
- }
-
- group := XInfoStreamGroup{}
-
- for f := 0; f < 5; f++ {
- key, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- switch key {
- case "name":
- group.Name, err = rd.ReadString()
- case "last-delivered-id":
- group.LastDeliveredID, err = rd.ReadString()
- case "pel-count":
- group.PelCount, err = rd.ReadIntReply()
- case "pending":
- group.Pending, err = readXInfoStreamGroupPending(rd)
- case "consumers":
- group.Consumers, err = readXInfoStreamConsumers(rd)
- default:
- return nil, fmt.Errorf("redis: unexpected content %s "+
- "in XINFO STREAM reply", key)
- }
-
- if err != nil {
- return nil, err
- }
- }
-
- groups = append(groups, group)
- }
-
- return groups, nil
-}
-
-func readXInfoStreamGroupPending(rd *proto.Reader) ([]XInfoStreamGroupPending, error) {
- n, err := rd.ReadArrayLen()
- if err != nil {
- return nil, err
- }
-
- pending := make([]XInfoStreamGroupPending, 0, n)
-
- for i := 0; i < n; i++ {
- nn, err := rd.ReadArrayLen()
- if err != nil {
- return nil, err
- }
- if nn != 4 {
- return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM FULL reply,"+
- "wanted 4", nn)
- }
-
- p := XInfoStreamGroupPending{}
-
- p.ID, err = rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- p.Consumer, err = rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- delivery, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
- p.DeliveryTime = time.Unix(delivery/1000, delivery%1000*int64(time.Millisecond))
-
- p.DeliveryCount, err = rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
-
- pending = append(pending, p)
- }
-
- return pending, nil
-}
-
-func readXInfoStreamConsumers(rd *proto.Reader) ([]XInfoStreamConsumer, error) {
- n, err := rd.ReadArrayLen()
- if err != nil {
- return nil, err
- }
-
- consumers := make([]XInfoStreamConsumer, 0, n)
-
- for i := 0; i < n; i++ {
- nn, err := rd.ReadArrayLen()
- if err != nil {
- return nil, err
- }
- if nn != 8 {
- return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM FULL reply,"+
- "wanted 8", nn)
- }
-
- c := XInfoStreamConsumer{}
-
- for f := 0; f < 4; f++ {
- cKey, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- switch cKey {
- case "name":
- c.Name, err = rd.ReadString()
- case "seen-time":
- seen, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
- c.SeenTime = time.Unix(seen/1000, seen%1000*int64(time.Millisecond))
- case "pel-count":
- c.PelCount, err = rd.ReadIntReply()
- case "pending":
- pendingNumber, err := rd.ReadArrayLen()
- if err != nil {
- return nil, err
- }
-
- c.Pending = make([]XInfoStreamConsumerPending, 0, pendingNumber)
-
- for pn := 0; pn < pendingNumber; pn++ {
- nn, err := rd.ReadArrayLen()
- if err != nil {
- return nil, err
- }
- if nn != 3 {
- return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM reply,"+
- "wanted 3", nn)
- }
-
- p := XInfoStreamConsumerPending{}
-
- p.ID, err = rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- delivery, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
- p.DeliveryTime = time.Unix(delivery/1000, delivery%1000*int64(time.Millisecond))
-
- p.DeliveryCount, err = rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
-
- c.Pending = append(c.Pending, p)
- }
- default:
- return nil, fmt.Errorf("redis: unexpected content %s "+
- "in XINFO STREAM reply", cKey)
- }
- if err != nil {
- return nil, err
- }
- }
- consumers = append(consumers, c)
- }
-
- return consumers, nil
-}
-
-//------------------------------------------------------------------------------
-
-type ZSliceCmd struct {
- baseCmd
-
- val []Z
-}
-
-var _ Cmder = (*ZSliceCmd)(nil)
-
-func NewZSliceCmd(ctx context.Context, args ...interface{}) *ZSliceCmd {
- return &ZSliceCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *ZSliceCmd) SetVal(val []Z) {
- cmd.val = val
-}
-
-func (cmd *ZSliceCmd) Val() []Z {
- return cmd.val
-}
-
-func (cmd *ZSliceCmd) Result() ([]Z, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *ZSliceCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *ZSliceCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.val = make([]Z, n/2)
- for i := 0; i < len(cmd.val); i++ {
- member, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- score, err := rd.ReadFloatReply()
- if err != nil {
- return nil, err
- }
-
- cmd.val[i] = Z{
- Member: member,
- Score: score,
- }
- }
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type ZWithKeyCmd struct {
- baseCmd
-
- val *ZWithKey
-}
-
-var _ Cmder = (*ZWithKeyCmd)(nil)
-
-func NewZWithKeyCmd(ctx context.Context, args ...interface{}) *ZWithKeyCmd {
- return &ZWithKeyCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *ZWithKeyCmd) SetVal(val *ZWithKey) {
- cmd.val = val
-}
-
-func (cmd *ZWithKeyCmd) Val() *ZWithKey {
- return cmd.val
-}
-
-func (cmd *ZWithKeyCmd) Result() (*ZWithKey, error) {
- return cmd.Val(), cmd.Err()
-}
-
-func (cmd *ZWithKeyCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *ZWithKeyCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- if n != 3 {
- return nil, fmt.Errorf("got %d elements, expected 3", n)
- }
-
- cmd.val = &ZWithKey{}
- var err error
-
- cmd.val.Key, err = rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- cmd.val.Member, err = rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- cmd.val.Score, err = rd.ReadFloatReply()
- if err != nil {
- return nil, err
- }
-
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type ScanCmd struct {
- baseCmd
-
- page []string
- cursor uint64
-
- process cmdable
-}
-
-var _ Cmder = (*ScanCmd)(nil)
-
-func NewScanCmd(ctx context.Context, process cmdable, args ...interface{}) *ScanCmd {
- return &ScanCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- process: process,
- }
-}
-
-func (cmd *ScanCmd) SetVal(page []string, cursor uint64) {
- cmd.page = page
- cmd.cursor = cursor
-}
-
-func (cmd *ScanCmd) Val() (keys []string, cursor uint64) {
- return cmd.page, cmd.cursor
-}
-
-func (cmd *ScanCmd) Result() (keys []string, cursor uint64, err error) {
- return cmd.page, cmd.cursor, cmd.err
-}
-
-func (cmd *ScanCmd) String() string {
- return cmdString(cmd, cmd.page)
-}
-
-func (cmd *ScanCmd) readReply(rd *proto.Reader) (err error) {
- cmd.page, cmd.cursor, err = rd.ReadScanReply()
- return err
-}
-
-// Iterator creates a new ScanIterator.
-func (cmd *ScanCmd) Iterator() *ScanIterator {
- return &ScanIterator{
- cmd: cmd,
- }
-}
-
-//------------------------------------------------------------------------------
-
-type ClusterNode struct {
- ID string
- Addr string
-}
-
-type ClusterSlot struct {
- Start int
- End int
- Nodes []ClusterNode
-}
-
-type ClusterSlotsCmd struct {
- baseCmd
-
- val []ClusterSlot
-}
-
-var _ Cmder = (*ClusterSlotsCmd)(nil)
-
-func NewClusterSlotsCmd(ctx context.Context, args ...interface{}) *ClusterSlotsCmd {
- return &ClusterSlotsCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *ClusterSlotsCmd) SetVal(val []ClusterSlot) {
- cmd.val = val
-}
-
-func (cmd *ClusterSlotsCmd) Val() []ClusterSlot {
- return cmd.val
-}
-
-func (cmd *ClusterSlotsCmd) Result() ([]ClusterSlot, error) {
- return cmd.Val(), cmd.Err()
-}
-
-func (cmd *ClusterSlotsCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *ClusterSlotsCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.val = make([]ClusterSlot, n)
- for i := 0; i < len(cmd.val); i++ {
- n, err := rd.ReadArrayLen()
- if err != nil {
- return nil, err
- }
- if n < 2 {
- err := fmt.Errorf("redis: got %d elements in cluster info, expected at least 2", n)
- return nil, err
- }
-
- start, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
-
- end, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
-
- nodes := make([]ClusterNode, n-2)
- for j := 0; j < len(nodes); j++ {
- n, err := rd.ReadArrayLen()
- if err != nil {
- return nil, err
- }
- if n != 2 && n != 3 {
- err := fmt.Errorf("got %d elements in cluster info address, expected 2 or 3", n)
- return nil, err
- }
-
- ip, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- port, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- nodes[j].Addr = net.JoinHostPort(ip, port)
-
- if n == 3 {
- id, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
- nodes[j].ID = id
- }
- }
-
- cmd.val[i] = ClusterSlot{
- Start: int(start),
- End: int(end),
- Nodes: nodes,
- }
- }
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-// GeoLocation is used with GeoAdd to add geospatial location.
-type GeoLocation struct {
- Name string
- Longitude, Latitude, Dist float64
- GeoHash int64
-}
-
-// GeoRadiusQuery is used with GeoRadius to query geospatial index.
-type GeoRadiusQuery struct {
- Radius float64
- // Can be m, km, ft, or mi. Default is km.
- Unit string
- WithCoord bool
- WithDist bool
- WithGeoHash bool
- Count int
- // Can be ASC or DESC. Default is no sort order.
- Sort string
- Store string
- StoreDist string
-}
-
-type GeoLocationCmd struct {
- baseCmd
-
- q *GeoRadiusQuery
- locations []GeoLocation
-}
-
-var _ Cmder = (*GeoLocationCmd)(nil)
-
-func NewGeoLocationCmd(ctx context.Context, q *GeoRadiusQuery, args ...interface{}) *GeoLocationCmd {
- return &GeoLocationCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: geoLocationArgs(q, args...),
- },
- q: q,
- }
-}
-
-func geoLocationArgs(q *GeoRadiusQuery, args ...interface{}) []interface{} {
- args = append(args, q.Radius)
- if q.Unit != "" {
- args = append(args, q.Unit)
- } else {
- args = append(args, "km")
- }
- if q.WithCoord {
- args = append(args, "withcoord")
- }
- if q.WithDist {
- args = append(args, "withdist")
- }
- if q.WithGeoHash {
- args = append(args, "withhash")
- }
- if q.Count > 0 {
- args = append(args, "count", q.Count)
- }
- if q.Sort != "" {
- args = append(args, q.Sort)
- }
- if q.Store != "" {
- args = append(args, "store")
- args = append(args, q.Store)
- }
- if q.StoreDist != "" {
- args = append(args, "storedist")
- args = append(args, q.StoreDist)
- }
- return args
-}
-
-func (cmd *GeoLocationCmd) SetVal(locations []GeoLocation) {
- cmd.locations = locations
-}
-
-func (cmd *GeoLocationCmd) Val() []GeoLocation {
- return cmd.locations
-}
-
-func (cmd *GeoLocationCmd) Result() ([]GeoLocation, error) {
- return cmd.locations, cmd.err
-}
-
-func (cmd *GeoLocationCmd) String() string {
- return cmdString(cmd, cmd.locations)
-}
-
-func (cmd *GeoLocationCmd) readReply(rd *proto.Reader) error {
- v, err := rd.ReadArrayReply(newGeoLocationSliceParser(cmd.q))
- if err != nil {
- return err
- }
- cmd.locations = v.([]GeoLocation)
- return nil
-}
-
-func newGeoLocationSliceParser(q *GeoRadiusQuery) proto.MultiBulkParse {
- return func(rd *proto.Reader, n int64) (interface{}, error) {
- locs := make([]GeoLocation, 0, n)
- for i := int64(0); i < n; i++ {
- v, err := rd.ReadReply(newGeoLocationParser(q))
- if err != nil {
- return nil, err
- }
- switch vv := v.(type) {
- case string:
- locs = append(locs, GeoLocation{
- Name: vv,
- })
- case *GeoLocation:
- // TODO: avoid copying
- locs = append(locs, *vv)
- default:
- return nil, fmt.Errorf("got %T, expected string or *GeoLocation", v)
- }
- }
- return locs, nil
- }
-}
-
-func newGeoLocationParser(q *GeoRadiusQuery) proto.MultiBulkParse {
- return func(rd *proto.Reader, n int64) (interface{}, error) {
- var loc GeoLocation
- var err error
-
- loc.Name, err = rd.ReadString()
- if err != nil {
- return nil, err
- }
- if q.WithDist {
- loc.Dist, err = rd.ReadFloatReply()
- if err != nil {
- return nil, err
- }
- }
- if q.WithGeoHash {
- loc.GeoHash, err = rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
- }
- if q.WithCoord {
- n, err := rd.ReadArrayLen()
- if err != nil {
- return nil, err
- }
- if n != 2 {
- return nil, fmt.Errorf("got %d coordinates, expected 2", n)
- }
-
- loc.Longitude, err = rd.ReadFloatReply()
- if err != nil {
- return nil, err
- }
- loc.Latitude, err = rd.ReadFloatReply()
- if err != nil {
- return nil, err
- }
- }
-
- return &loc, nil
- }
-}
-
-//------------------------------------------------------------------------------
-
-// GeoSearchQuery is used for GEOSearch/GEOSearchStore command query.
-type GeoSearchQuery struct {
- Member string
-
- // Latitude and Longitude when using FromLonLat option.
- Longitude float64
- Latitude float64
-
- // Distance and unit when using ByRadius option.
- // Can use m, km, ft, or mi. Default is km.
- Radius float64
- RadiusUnit string
-
- // Height, width and unit when using ByBox option.
- // Can be m, km, ft, or mi. Default is km.
- BoxWidth float64
- BoxHeight float64
- BoxUnit string
-
- // Can be ASC or DESC. Default is no sort order.
- Sort string
- Count int
- CountAny bool
-}
-
-type GeoSearchLocationQuery struct {
- GeoSearchQuery
-
- WithCoord bool
- WithDist bool
- WithHash bool
-}
-
-type GeoSearchStoreQuery struct {
- GeoSearchQuery
-
- // When using the StoreDist option, the command stores the items in a
- // sorted set populated with their distance from the center of the circle or box,
- // as a floating-point number, in the same unit specified for that shape.
- StoreDist bool
-}
-
-func geoSearchLocationArgs(q *GeoSearchLocationQuery, args []interface{}) []interface{} {
- args = geoSearchArgs(&q.GeoSearchQuery, args)
-
- if q.WithCoord {
- args = append(args, "withcoord")
- }
- if q.WithDist {
- args = append(args, "withdist")
- }
- if q.WithHash {
- args = append(args, "withhash")
- }
-
- return args
-}
-
-func geoSearchArgs(q *GeoSearchQuery, args []interface{}) []interface{} {
- if q.Member != "" {
- args = append(args, "frommember", q.Member)
- } else {
- args = append(args, "fromlonlat", q.Longitude, q.Latitude)
- }
-
- if q.Radius > 0 {
- if q.RadiusUnit == "" {
- q.RadiusUnit = "km"
- }
- args = append(args, "byradius", q.Radius, q.RadiusUnit)
- } else {
- if q.BoxUnit == "" {
- q.BoxUnit = "km"
- }
- args = append(args, "bybox", q.BoxWidth, q.BoxHeight, q.BoxUnit)
- }
-
- if q.Sort != "" {
- args = append(args, q.Sort)
- }
-
- if q.Count > 0 {
- args = append(args, "count", q.Count)
- if q.CountAny {
- args = append(args, "any")
- }
- }
-
- return args
-}
-
-type GeoSearchLocationCmd struct {
- baseCmd
-
- opt *GeoSearchLocationQuery
- val []GeoLocation
-}
-
-var _ Cmder = (*GeoSearchLocationCmd)(nil)
-
-func NewGeoSearchLocationCmd(
- ctx context.Context, opt *GeoSearchLocationQuery, args ...interface{},
-) *GeoSearchLocationCmd {
- return &GeoSearchLocationCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- opt: opt,
- }
-}
-
-func (cmd *GeoSearchLocationCmd) SetVal(val []GeoLocation) {
- cmd.val = val
-}
-
-func (cmd *GeoSearchLocationCmd) Val() []GeoLocation {
- return cmd.val
-}
-
-func (cmd *GeoSearchLocationCmd) Result() ([]GeoLocation, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *GeoSearchLocationCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *GeoSearchLocationCmd) readReply(rd *proto.Reader) error {
- n, err := rd.ReadArrayLen()
- if err != nil {
- return err
- }
-
- cmd.val = make([]GeoLocation, n)
- for i := 0; i < n; i++ {
- _, err = rd.ReadArrayLen()
- if err != nil {
- return err
- }
-
- var loc GeoLocation
-
- loc.Name, err = rd.ReadString()
- if err != nil {
- return err
- }
- if cmd.opt.WithDist {
- loc.Dist, err = rd.ReadFloatReply()
- if err != nil {
- return err
- }
- }
- if cmd.opt.WithHash {
- loc.GeoHash, err = rd.ReadIntReply()
- if err != nil {
- return err
- }
- }
- if cmd.opt.WithCoord {
- nn, err := rd.ReadArrayLen()
- if err != nil {
- return err
- }
- if nn != 2 {
- return fmt.Errorf("got %d coordinates, expected 2", nn)
- }
-
- loc.Longitude, err = rd.ReadFloatReply()
- if err != nil {
- return err
- }
- loc.Latitude, err = rd.ReadFloatReply()
- if err != nil {
- return err
- }
- }
-
- cmd.val[i] = loc
- }
-
- return nil
-}
-
-//------------------------------------------------------------------------------
-
-type GeoPos struct {
- Longitude, Latitude float64
-}
-
-type GeoPosCmd struct {
- baseCmd
-
- val []*GeoPos
-}
-
-var _ Cmder = (*GeoPosCmd)(nil)
-
-func NewGeoPosCmd(ctx context.Context, args ...interface{}) *GeoPosCmd {
- return &GeoPosCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *GeoPosCmd) SetVal(val []*GeoPos) {
- cmd.val = val
-}
-
-func (cmd *GeoPosCmd) Val() []*GeoPos {
- return cmd.val
-}
-
-func (cmd *GeoPosCmd) Result() ([]*GeoPos, error) {
- return cmd.Val(), cmd.Err()
-}
-
-func (cmd *GeoPosCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *GeoPosCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.val = make([]*GeoPos, n)
- for i := 0; i < len(cmd.val); i++ {
- i := i
- _, err := rd.ReadReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- longitude, err := rd.ReadFloatReply()
- if err != nil {
- return nil, err
- }
-
- latitude, err := rd.ReadFloatReply()
- if err != nil {
- return nil, err
- }
-
- cmd.val[i] = &GeoPos{
- Longitude: longitude,
- Latitude: latitude,
- }
- return nil, nil
- })
- if err != nil {
- if err == Nil {
- cmd.val[i] = nil
- continue
- }
- return nil, err
- }
- }
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type CommandInfo struct {
- Name string
- Arity int8
- Flags []string
- ACLFlags []string
- FirstKeyPos int8
- LastKeyPos int8
- StepCount int8
- ReadOnly bool
-}
-
-type CommandsInfoCmd struct {
- baseCmd
-
- val map[string]*CommandInfo
-}
-
-var _ Cmder = (*CommandsInfoCmd)(nil)
-
-func NewCommandsInfoCmd(ctx context.Context, args ...interface{}) *CommandsInfoCmd {
- return &CommandsInfoCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *CommandsInfoCmd) SetVal(val map[string]*CommandInfo) {
- cmd.val = val
-}
-
-func (cmd *CommandsInfoCmd) Val() map[string]*CommandInfo {
- return cmd.val
-}
-
-func (cmd *CommandsInfoCmd) Result() (map[string]*CommandInfo, error) {
- return cmd.Val(), cmd.Err()
-}
-
-func (cmd *CommandsInfoCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *CommandsInfoCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.val = make(map[string]*CommandInfo, n)
- for i := int64(0); i < n; i++ {
- v, err := rd.ReadReply(commandInfoParser)
- if err != nil {
- return nil, err
- }
- vv := v.(*CommandInfo)
- cmd.val[vv.Name] = vv
- }
- return nil, nil
- })
- return err
-}
-
-func commandInfoParser(rd *proto.Reader, n int64) (interface{}, error) {
- const numArgRedis5 = 6
- const numArgRedis6 = 7
-
- switch n {
- case numArgRedis5, numArgRedis6:
- // continue
- default:
- return nil, fmt.Errorf("redis: got %d elements in COMMAND reply, wanted 7", n)
- }
-
- var cmd CommandInfo
- var err error
-
- cmd.Name, err = rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- arity, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
- cmd.Arity = int8(arity)
-
- _, err = rd.ReadReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.Flags = make([]string, n)
- for i := 0; i < len(cmd.Flags); i++ {
- switch s, err := rd.ReadString(); {
- case err == Nil:
- cmd.Flags[i] = ""
- case err != nil:
- return nil, err
- default:
- cmd.Flags[i] = s
- }
- }
- return nil, nil
- })
- if err != nil {
- return nil, err
- }
-
- firstKeyPos, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
- cmd.FirstKeyPos = int8(firstKeyPos)
-
- lastKeyPos, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
- cmd.LastKeyPos = int8(lastKeyPos)
-
- stepCount, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
- cmd.StepCount = int8(stepCount)
-
- for _, flag := range cmd.Flags {
- if flag == "readonly" {
- cmd.ReadOnly = true
- break
- }
- }
-
- if n == numArgRedis5 {
- return &cmd, nil
- }
-
- _, err = rd.ReadReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.ACLFlags = make([]string, n)
- for i := 0; i < len(cmd.ACLFlags); i++ {
- switch s, err := rd.ReadString(); {
- case err == Nil:
- cmd.ACLFlags[i] = ""
- case err != nil:
- return nil, err
- default:
- cmd.ACLFlags[i] = s
- }
- }
- return nil, nil
- })
- if err != nil {
- return nil, err
- }
-
- return &cmd, nil
-}
-
-//------------------------------------------------------------------------------
-
-type cmdsInfoCache struct {
- fn func(ctx context.Context) (map[string]*CommandInfo, error)
-
- once internal.Once
- cmds map[string]*CommandInfo
-}
-
-func newCmdsInfoCache(fn func(ctx context.Context) (map[string]*CommandInfo, error)) *cmdsInfoCache {
- return &cmdsInfoCache{
- fn: fn,
- }
-}
-
-func (c *cmdsInfoCache) Get(ctx context.Context) (map[string]*CommandInfo, error) {
- err := c.once.Do(func() error {
- cmds, err := c.fn(ctx)
- if err != nil {
- return err
- }
-
- // Extensions have cmd names in upper case. Convert them to lower case.
- for k, v := range cmds {
- lower := internal.ToLower(k)
- if lower != k {
- cmds[lower] = v
- }
- }
-
- c.cmds = cmds
- return nil
- })
- return c.cmds, err
-}
-
-//------------------------------------------------------------------------------
-
-type SlowLog struct {
- ID int64
- Time time.Time
- Duration time.Duration
- Args []string
- // These are also optional fields emitted only by Redis 4.0 or greater:
- // https://redis.io/commands/slowlog#output-format
- ClientAddr string
- ClientName string
-}
-
-type SlowLogCmd struct {
- baseCmd
-
- val []SlowLog
-}
-
-var _ Cmder = (*SlowLogCmd)(nil)
-
-func NewSlowLogCmd(ctx context.Context, args ...interface{}) *SlowLogCmd {
- return &SlowLogCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *SlowLogCmd) SetVal(val []SlowLog) {
- cmd.val = val
-}
-
-func (cmd *SlowLogCmd) Val() []SlowLog {
- return cmd.val
-}
-
-func (cmd *SlowLogCmd) Result() ([]SlowLog, error) {
- return cmd.Val(), cmd.Err()
-}
-
-func (cmd *SlowLogCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *SlowLogCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.val = make([]SlowLog, n)
- for i := 0; i < len(cmd.val); i++ {
- n, err := rd.ReadArrayLen()
- if err != nil {
- return nil, err
- }
- if n < 4 {
- err := fmt.Errorf("redis: got %d elements in slowlog get, expected at least 4", n)
- return nil, err
- }
-
- id, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
-
- createdAt, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
- createdAtTime := time.Unix(createdAt, 0)
-
- costs, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
- costsDuration := time.Duration(costs) * time.Microsecond
-
- cmdLen, err := rd.ReadArrayLen()
- if err != nil {
- return nil, err
- }
- if cmdLen < 1 {
- err := fmt.Errorf("redis: got %d elements commands reply in slowlog get, expected at least 1", cmdLen)
- return nil, err
- }
-
- cmdString := make([]string, cmdLen)
- for i := 0; i < cmdLen; i++ {
- cmdString[i], err = rd.ReadString()
- if err != nil {
- return nil, err
- }
- }
-
- var address, name string
- for i := 4; i < n; i++ {
- str, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
- if i == 4 {
- address = str
- } else if i == 5 {
- name = str
- }
- }
-
- cmd.val[i] = SlowLog{
- ID: id,
- Time: createdAtTime,
- Duration: costsDuration,
- Args: cmdString,
- ClientAddr: address,
- ClientName: name,
- }
- }
- return nil, nil
- })
- return err
-}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/proto/reader.go b/vendor/github.com/go-redis/redis/v8/internal/proto/reader.go
deleted file mode 100644
index 0e6ca779b..000000000
--- a/vendor/github.com/go-redis/redis/v8/internal/proto/reader.go
+++ /dev/null
@@ -1,332 +0,0 @@
-package proto
-
-import (
- "bufio"
- "fmt"
- "io"
-
- "github.com/go-redis/redis/v8/internal/util"
-)
-
-// redis resp protocol data type.
-const (
- ErrorReply = '-'
- StatusReply = '+'
- IntReply = ':'
- StringReply = '$'
- ArrayReply = '*'
-)
-
-//------------------------------------------------------------------------------
-
-const Nil = RedisError("redis: nil") // nolint:errname
-
-type RedisError string
-
-func (e RedisError) Error() string { return string(e) }
-
-func (RedisError) RedisError() {}
-
-//------------------------------------------------------------------------------
-
-type MultiBulkParse func(*Reader, int64) (interface{}, error)
-
-type Reader struct {
- rd *bufio.Reader
- _buf []byte
-}
-
-func NewReader(rd io.Reader) *Reader {
- return &Reader{
- rd: bufio.NewReader(rd),
- _buf: make([]byte, 64),
- }
-}
-
-func (r *Reader) Buffered() int {
- return r.rd.Buffered()
-}
-
-func (r *Reader) Peek(n int) ([]byte, error) {
- return r.rd.Peek(n)
-}
-
-func (r *Reader) Reset(rd io.Reader) {
- r.rd.Reset(rd)
-}
-
-func (r *Reader) ReadLine() ([]byte, error) {
- line, err := r.readLine()
- if err != nil {
- return nil, err
- }
- if isNilReply(line) {
- return nil, Nil
- }
- return line, nil
-}
-
-// readLine that returns an error if:
-// - there is a pending read error;
-// - or line does not end with \r\n.
-func (r *Reader) readLine() ([]byte, error) {
- b, err := r.rd.ReadSlice('\n')
- if err != nil {
- if err != bufio.ErrBufferFull {
- return nil, err
- }
-
- full := make([]byte, len(b))
- copy(full, b)
-
- b, err = r.rd.ReadBytes('\n')
- if err != nil {
- return nil, err
- }
-
- full = append(full, b...) //nolint:makezero
- b = full
- }
- if len(b) <= 2 || b[len(b)-1] != '\n' || b[len(b)-2] != '\r' {
- return nil, fmt.Errorf("redis: invalid reply: %q", b)
- }
- return b[:len(b)-2], nil
-}
-
-func (r *Reader) ReadReply(m MultiBulkParse) (interface{}, error) {
- line, err := r.ReadLine()
- if err != nil {
- return nil, err
- }
-
- switch line[0] {
- case ErrorReply:
- return nil, ParseErrorReply(line)
- case StatusReply:
- return string(line[1:]), nil
- case IntReply:
- return util.ParseInt(line[1:], 10, 64)
- case StringReply:
- return r.readStringReply(line)
- case ArrayReply:
- n, err := parseArrayLen(line)
- if err != nil {
- return nil, err
- }
- if m == nil {
- err := fmt.Errorf("redis: got %.100q, but multi bulk parser is nil", line)
- return nil, err
- }
- return m(r, n)
- }
- return nil, fmt.Errorf("redis: can't parse %.100q", line)
-}
-
-func (r *Reader) ReadIntReply() (int64, error) {
- line, err := r.ReadLine()
- if err != nil {
- return 0, err
- }
- switch line[0] {
- case ErrorReply:
- return 0, ParseErrorReply(line)
- case IntReply:
- return util.ParseInt(line[1:], 10, 64)
- default:
- return 0, fmt.Errorf("redis: can't parse int reply: %.100q", line)
- }
-}
-
-func (r *Reader) ReadString() (string, error) {
- line, err := r.ReadLine()
- if err != nil {
- return "", err
- }
- switch line[0] {
- case ErrorReply:
- return "", ParseErrorReply(line)
- case StringReply:
- return r.readStringReply(line)
- case StatusReply:
- return string(line[1:]), nil
- case IntReply:
- return string(line[1:]), nil
- default:
- return "", fmt.Errorf("redis: can't parse reply=%.100q reading string", line)
- }
-}
-
-func (r *Reader) readStringReply(line []byte) (string, error) {
- if isNilReply(line) {
- return "", Nil
- }
-
- replyLen, err := util.Atoi(line[1:])
- if err != nil {
- return "", err
- }
-
- b := make([]byte, replyLen+2)
- _, err = io.ReadFull(r.rd, b)
- if err != nil {
- return "", err
- }
-
- return util.BytesToString(b[:replyLen]), nil
-}
-
-func (r *Reader) ReadArrayReply(m MultiBulkParse) (interface{}, error) {
- line, err := r.ReadLine()
- if err != nil {
- return nil, err
- }
- switch line[0] {
- case ErrorReply:
- return nil, ParseErrorReply(line)
- case ArrayReply:
- n, err := parseArrayLen(line)
- if err != nil {
- return nil, err
- }
- return m(r, n)
- default:
- return nil, fmt.Errorf("redis: can't parse array reply: %.100q", line)
- }
-}
-
-func (r *Reader) ReadArrayLen() (int, error) {
- line, err := r.ReadLine()
- if err != nil {
- return 0, err
- }
- switch line[0] {
- case ErrorReply:
- return 0, ParseErrorReply(line)
- case ArrayReply:
- n, err := parseArrayLen(line)
- if err != nil {
- return 0, err
- }
- return int(n), nil
- default:
- return 0, fmt.Errorf("redis: can't parse array reply: %.100q", line)
- }
-}
-
-func (r *Reader) ReadScanReply() ([]string, uint64, error) {
- n, err := r.ReadArrayLen()
- if err != nil {
- return nil, 0, err
- }
- if n != 2 {
- return nil, 0, fmt.Errorf("redis: got %d elements in scan reply, expected 2", n)
- }
-
- cursor, err := r.ReadUint()
- if err != nil {
- return nil, 0, err
- }
-
- n, err = r.ReadArrayLen()
- if err != nil {
- return nil, 0, err
- }
-
- keys := make([]string, n)
-
- for i := 0; i < n; i++ {
- key, err := r.ReadString()
- if err != nil {
- return nil, 0, err
- }
- keys[i] = key
- }
-
- return keys, cursor, err
-}
-
-func (r *Reader) ReadInt() (int64, error) {
- b, err := r.readTmpBytesReply()
- if err != nil {
- return 0, err
- }
- return util.ParseInt(b, 10, 64)
-}
-
-func (r *Reader) ReadUint() (uint64, error) {
- b, err := r.readTmpBytesReply()
- if err != nil {
- return 0, err
- }
- return util.ParseUint(b, 10, 64)
-}
-
-func (r *Reader) ReadFloatReply() (float64, error) {
- b, err := r.readTmpBytesReply()
- if err != nil {
- return 0, err
- }
- return util.ParseFloat(b, 64)
-}
-
-func (r *Reader) readTmpBytesReply() ([]byte, error) {
- line, err := r.ReadLine()
- if err != nil {
- return nil, err
- }
- switch line[0] {
- case ErrorReply:
- return nil, ParseErrorReply(line)
- case StringReply:
- return r._readTmpBytesReply(line)
- case StatusReply:
- return line[1:], nil
- default:
- return nil, fmt.Errorf("redis: can't parse string reply: %.100q", line)
- }
-}
-
-func (r *Reader) _readTmpBytesReply(line []byte) ([]byte, error) {
- if isNilReply(line) {
- return nil, Nil
- }
-
- replyLen, err := util.Atoi(line[1:])
- if err != nil {
- return nil, err
- }
-
- buf := r.buf(replyLen + 2)
- _, err = io.ReadFull(r.rd, buf)
- if err != nil {
- return nil, err
- }
-
- return buf[:replyLen], nil
-}
-
-func (r *Reader) buf(n int) []byte {
- if n <= cap(r._buf) {
- return r._buf[:n]
- }
- d := n - cap(r._buf)
- r._buf = append(r._buf, make([]byte, d)...)
- return r._buf
-}
-
-func isNilReply(b []byte) bool {
- return len(b) == 3 &&
- (b[0] == StringReply || b[0] == ArrayReply) &&
- b[1] == '-' && b[2] == '1'
-}
-
-func ParseErrorReply(line []byte) error {
- return RedisError(string(line[1:]))
-}
-
-func parseArrayLen(line []byte) (int64, error) {
- if isNilReply(line) {
- return 0, Nil
- }
- return util.ParseInt(line[1:], 10, 64)
-}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/safe.go b/vendor/github.com/go-redis/redis/v8/internal/safe.go
deleted file mode 100644
index fd2f43409..000000000
--- a/vendor/github.com/go-redis/redis/v8/internal/safe.go
+++ /dev/null
@@ -1,12 +0,0 @@
-//go:build appengine
-// +build appengine
-
-package internal
-
-func String(b []byte) string {
- return string(b)
-}
-
-func Bytes(s string) []byte {
- return []byte(s)
-}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/unsafe.go b/vendor/github.com/go-redis/redis/v8/internal/unsafe.go
deleted file mode 100644
index 9f2e418f7..000000000
--- a/vendor/github.com/go-redis/redis/v8/internal/unsafe.go
+++ /dev/null
@@ -1,21 +0,0 @@
-//go:build !appengine
-// +build !appengine
-
-package internal
-
-import "unsafe"
-
-// String converts byte slice to string.
-func String(b []byte) string {
- return *(*string)(unsafe.Pointer(&b))
-}
-
-// Bytes converts string to byte slice.
-func Bytes(s string) []byte {
- return *(*[]byte)(unsafe.Pointer(
- &struct {
- string
- Cap int
- }{s, len(s)},
- ))
-}
diff --git a/vendor/github.com/go-redis/redis/v8/redis.go b/vendor/github.com/go-redis/redis/v8/redis.go
deleted file mode 100644
index bcf8a2a94..000000000
--- a/vendor/github.com/go-redis/redis/v8/redis.go
+++ /dev/null
@@ -1,773 +0,0 @@
-package redis
-
-import (
- "context"
- "errors"
- "fmt"
- "sync/atomic"
- "time"
-
- "github.com/go-redis/redis/v8/internal"
- "github.com/go-redis/redis/v8/internal/pool"
- "github.com/go-redis/redis/v8/internal/proto"
-)
-
-// Nil reply returned by Redis when key does not exist.
-const Nil = proto.Nil
-
-func SetLogger(logger internal.Logging) {
- internal.Logger = logger
-}
-
-//------------------------------------------------------------------------------
-
-type Hook interface {
- BeforeProcess(ctx context.Context, cmd Cmder) (context.Context, error)
- AfterProcess(ctx context.Context, cmd Cmder) error
-
- BeforeProcessPipeline(ctx context.Context, cmds []Cmder) (context.Context, error)
- AfterProcessPipeline(ctx context.Context, cmds []Cmder) error
-}
-
-type hooks struct {
- hooks []Hook
-}
-
-func (hs *hooks) lock() {
- hs.hooks = hs.hooks[:len(hs.hooks):len(hs.hooks)]
-}
-
-func (hs hooks) clone() hooks {
- clone := hs
- clone.lock()
- return clone
-}
-
-func (hs *hooks) AddHook(hook Hook) {
- hs.hooks = append(hs.hooks, hook)
-}
-
-func (hs hooks) process(
- ctx context.Context, cmd Cmder, fn func(context.Context, Cmder) error,
-) error {
- if len(hs.hooks) == 0 {
- err := fn(ctx, cmd)
- cmd.SetErr(err)
- return err
- }
-
- var hookIndex int
- var retErr error
-
- for ; hookIndex < len(hs.hooks) && retErr == nil; hookIndex++ {
- ctx, retErr = hs.hooks[hookIndex].BeforeProcess(ctx, cmd)
- if retErr != nil {
- cmd.SetErr(retErr)
- }
- }
-
- if retErr == nil {
- retErr = fn(ctx, cmd)
- cmd.SetErr(retErr)
- }
-
- for hookIndex--; hookIndex >= 0; hookIndex-- {
- if err := hs.hooks[hookIndex].AfterProcess(ctx, cmd); err != nil {
- retErr = err
- cmd.SetErr(retErr)
- }
- }
-
- return retErr
-}
-
-func (hs hooks) processPipeline(
- ctx context.Context, cmds []Cmder, fn func(context.Context, []Cmder) error,
-) error {
- if len(hs.hooks) == 0 {
- err := fn(ctx, cmds)
- return err
- }
-
- var hookIndex int
- var retErr error
-
- for ; hookIndex < len(hs.hooks) && retErr == nil; hookIndex++ {
- ctx, retErr = hs.hooks[hookIndex].BeforeProcessPipeline(ctx, cmds)
- if retErr != nil {
- setCmdsErr(cmds, retErr)
- }
- }
-
- if retErr == nil {
- retErr = fn(ctx, cmds)
- }
-
- for hookIndex--; hookIndex >= 0; hookIndex-- {
- if err := hs.hooks[hookIndex].AfterProcessPipeline(ctx, cmds); err != nil {
- retErr = err
- setCmdsErr(cmds, retErr)
- }
- }
-
- return retErr
-}
-
-func (hs hooks) processTxPipeline(
- ctx context.Context, cmds []Cmder, fn func(context.Context, []Cmder) error,
-) error {
- cmds = wrapMultiExec(ctx, cmds)
- return hs.processPipeline(ctx, cmds, fn)
-}
-
-//------------------------------------------------------------------------------
-
-type baseClient struct {
- opt *Options
- connPool pool.Pooler
-
- onClose func() error // hook called when client is closed
-}
-
-func newBaseClient(opt *Options, connPool pool.Pooler) *baseClient {
- return &baseClient{
- opt: opt,
- connPool: connPool,
- }
-}
-
-func (c *baseClient) clone() *baseClient {
- clone := *c
- return &clone
-}
-
-func (c *baseClient) withTimeout(timeout time.Duration) *baseClient {
- opt := c.opt.clone()
- opt.ReadTimeout = timeout
- opt.WriteTimeout = timeout
-
- clone := c.clone()
- clone.opt = opt
-
- return clone
-}
-
-func (c *baseClient) String() string {
- return fmt.Sprintf("Redis<%s db:%d>", c.getAddr(), c.opt.DB)
-}
-
-func (c *baseClient) newConn(ctx context.Context) (*pool.Conn, error) {
- cn, err := c.connPool.NewConn(ctx)
- if err != nil {
- return nil, err
- }
-
- err = c.initConn(ctx, cn)
- if err != nil {
- _ = c.connPool.CloseConn(cn)
- return nil, err
- }
-
- return cn, nil
-}
-
-func (c *baseClient) getConn(ctx context.Context) (*pool.Conn, error) {
- if c.opt.Limiter != nil {
- err := c.opt.Limiter.Allow()
- if err != nil {
- return nil, err
- }
- }
-
- cn, err := c._getConn(ctx)
- if err != nil {
- if c.opt.Limiter != nil {
- c.opt.Limiter.ReportResult(err)
- }
- return nil, err
- }
-
- return cn, nil
-}
-
-func (c *baseClient) _getConn(ctx context.Context) (*pool.Conn, error) {
- cn, err := c.connPool.Get(ctx)
- if err != nil {
- return nil, err
- }
-
- if cn.Inited {
- return cn, nil
- }
-
- if err := c.initConn(ctx, cn); err != nil {
- c.connPool.Remove(ctx, cn, err)
- if err := errors.Unwrap(err); err != nil {
- return nil, err
- }
- return nil, err
- }
-
- return cn, nil
-}
-
-func (c *baseClient) initConn(ctx context.Context, cn *pool.Conn) error {
- if cn.Inited {
- return nil
- }
- cn.Inited = true
-
- if c.opt.Password == "" &&
- c.opt.DB == 0 &&
- !c.opt.readOnly &&
- c.opt.OnConnect == nil {
- return nil
- }
-
- connPool := pool.NewSingleConnPool(c.connPool, cn)
- conn := newConn(ctx, c.opt, connPool)
-
- _, err := conn.Pipelined(ctx, func(pipe Pipeliner) error {
- if c.opt.Password != "" {
- if c.opt.Username != "" {
- pipe.AuthACL(ctx, c.opt.Username, c.opt.Password)
- } else {
- pipe.Auth(ctx, c.opt.Password)
- }
- }
-
- if c.opt.DB > 0 {
- pipe.Select(ctx, c.opt.DB)
- }
-
- if c.opt.readOnly {
- pipe.ReadOnly(ctx)
- }
-
- return nil
- })
- if err != nil {
- return err
- }
-
- if c.opt.OnConnect != nil {
- return c.opt.OnConnect(ctx, conn)
- }
- return nil
-}
-
-func (c *baseClient) releaseConn(ctx context.Context, cn *pool.Conn, err error) {
- if c.opt.Limiter != nil {
- c.opt.Limiter.ReportResult(err)
- }
-
- if isBadConn(err, false, c.opt.Addr) {
- c.connPool.Remove(ctx, cn, err)
- } else {
- c.connPool.Put(ctx, cn)
- }
-}
-
-func (c *baseClient) withConn(
- ctx context.Context, fn func(context.Context, *pool.Conn) error,
-) error {
- cn, err := c.getConn(ctx)
- if err != nil {
- return err
- }
-
- defer func() {
- c.releaseConn(ctx, cn, err)
- }()
-
- done := ctx.Done() //nolint:ifshort
-
- if done == nil {
- err = fn(ctx, cn)
- return err
- }
-
- errc := make(chan error, 1)
- go func() { errc <- fn(ctx, cn) }()
-
- select {
- case <-done:
- _ = cn.Close()
- // Wait for the goroutine to finish and send something.
- <-errc
-
- err = ctx.Err()
- return err
- case err = <-errc:
- return err
- }
-}
-
-func (c *baseClient) process(ctx context.Context, cmd Cmder) error {
- var lastErr error
- for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ {
- attempt := attempt
-
- retry, err := c._process(ctx, cmd, attempt)
- if err == nil || !retry {
- return err
- }
-
- lastErr = err
- }
- return lastErr
-}
-
-func (c *baseClient) _process(ctx context.Context, cmd Cmder, attempt int) (bool, error) {
- if attempt > 0 {
- if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
- return false, err
- }
- }
-
- retryTimeout := uint32(1)
- err := c.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
- err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
- return writeCmd(wr, cmd)
- })
- if err != nil {
- return err
- }
-
- err = cn.WithReader(ctx, c.cmdTimeout(cmd), cmd.readReply)
- if err != nil {
- if cmd.readTimeout() == nil {
- atomic.StoreUint32(&retryTimeout, 1)
- }
- return err
- }
-
- return nil
- })
- if err == nil {
- return false, nil
- }
-
- retry := shouldRetry(err, atomic.LoadUint32(&retryTimeout) == 1)
- return retry, err
-}
-
-func (c *baseClient) retryBackoff(attempt int) time.Duration {
- return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff)
-}
-
-func (c *baseClient) cmdTimeout(cmd Cmder) time.Duration {
- if timeout := cmd.readTimeout(); timeout != nil {
- t := *timeout
- if t == 0 {
- return 0
- }
- return t + 10*time.Second
- }
- return c.opt.ReadTimeout
-}
-
-// Close closes the client, releasing any open resources.
-//
-// It is rare to Close a Client, as the Client is meant to be
-// long-lived and shared between many goroutines.
-func (c *baseClient) Close() error {
- var firstErr error
- if c.onClose != nil {
- if err := c.onClose(); err != nil {
- firstErr = err
- }
- }
- if err := c.connPool.Close(); err != nil && firstErr == nil {
- firstErr = err
- }
- return firstErr
-}
-
-func (c *baseClient) getAddr() string {
- return c.opt.Addr
-}
-
-func (c *baseClient) processPipeline(ctx context.Context, cmds []Cmder) error {
- return c.generalProcessPipeline(ctx, cmds, c.pipelineProcessCmds)
-}
-
-func (c *baseClient) processTxPipeline(ctx context.Context, cmds []Cmder) error {
- return c.generalProcessPipeline(ctx, cmds, c.txPipelineProcessCmds)
-}
-
-type pipelineProcessor func(context.Context, *pool.Conn, []Cmder) (bool, error)
-
-func (c *baseClient) generalProcessPipeline(
- ctx context.Context, cmds []Cmder, p pipelineProcessor,
-) error {
- err := c._generalProcessPipeline(ctx, cmds, p)
- if err != nil {
- setCmdsErr(cmds, err)
- return err
- }
- return cmdsFirstErr(cmds)
-}
-
-func (c *baseClient) _generalProcessPipeline(
- ctx context.Context, cmds []Cmder, p pipelineProcessor,
-) error {
- var lastErr error
- for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ {
- if attempt > 0 {
- if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
- return err
- }
- }
-
- var canRetry bool
- lastErr = c.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
- var err error
- canRetry, err = p(ctx, cn, cmds)
- return err
- })
- if lastErr == nil || !canRetry || !shouldRetry(lastErr, true) {
- return lastErr
- }
- }
- return lastErr
-}
-
-func (c *baseClient) pipelineProcessCmds(
- ctx context.Context, cn *pool.Conn, cmds []Cmder,
-) (bool, error) {
- err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
- return writeCmds(wr, cmds)
- })
- if err != nil {
- return true, err
- }
-
- err = cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error {
- return pipelineReadCmds(rd, cmds)
- })
- return true, err
-}
-
-func pipelineReadCmds(rd *proto.Reader, cmds []Cmder) error {
- for _, cmd := range cmds {
- err := cmd.readReply(rd)
- cmd.SetErr(err)
- if err != nil && !isRedisError(err) {
- return err
- }
- }
- return nil
-}
-
-func (c *baseClient) txPipelineProcessCmds(
- ctx context.Context, cn *pool.Conn, cmds []Cmder,
-) (bool, error) {
- err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
- return writeCmds(wr, cmds)
- })
- if err != nil {
- return true, err
- }
-
- err = cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error {
- statusCmd := cmds[0].(*StatusCmd)
- // Trim multi and exec.
- cmds = cmds[1 : len(cmds)-1]
-
- err := txPipelineReadQueued(rd, statusCmd, cmds)
- if err != nil {
- return err
- }
-
- return pipelineReadCmds(rd, cmds)
- })
- return false, err
-}
-
-func wrapMultiExec(ctx context.Context, cmds []Cmder) []Cmder {
- if len(cmds) == 0 {
- panic("not reached")
- }
- cmdCopy := make([]Cmder, len(cmds)+2)
- cmdCopy[0] = NewStatusCmd(ctx, "multi")
- copy(cmdCopy[1:], cmds)
- cmdCopy[len(cmdCopy)-1] = NewSliceCmd(ctx, "exec")
- return cmdCopy
-}
-
-func txPipelineReadQueued(rd *proto.Reader, statusCmd *StatusCmd, cmds []Cmder) error {
- // Parse queued replies.
- if err := statusCmd.readReply(rd); err != nil {
- return err
- }
-
- for range cmds {
- if err := statusCmd.readReply(rd); err != nil && !isRedisError(err) {
- return err
- }
- }
-
- // Parse number of replies.
- line, err := rd.ReadLine()
- if err != nil {
- if err == Nil {
- err = TxFailedErr
- }
- return err
- }
-
- switch line[0] {
- case proto.ErrorReply:
- return proto.ParseErrorReply(line)
- case proto.ArrayReply:
- // ok
- default:
- err := fmt.Errorf("redis: expected '*', but got line %q", line)
- return err
- }
-
- return nil
-}
-
-//------------------------------------------------------------------------------
-
-// Client is a Redis client representing a pool of zero or more
-// underlying connections. It's safe for concurrent use by multiple
-// goroutines.
-type Client struct {
- *baseClient
- cmdable
- hooks
- ctx context.Context
-}
-
-// NewClient returns a client to the Redis Server specified by Options.
-func NewClient(opt *Options) *Client {
- opt.init()
-
- c := Client{
- baseClient: newBaseClient(opt, newConnPool(opt)),
- ctx: context.Background(),
- }
- c.cmdable = c.Process
-
- return &c
-}
-
-func (c *Client) clone() *Client {
- clone := *c
- clone.cmdable = clone.Process
- clone.hooks.lock()
- return &clone
-}
-
-func (c *Client) WithTimeout(timeout time.Duration) *Client {
- clone := c.clone()
- clone.baseClient = c.baseClient.withTimeout(timeout)
- return clone
-}
-
-func (c *Client) Context() context.Context {
- return c.ctx
-}
-
-func (c *Client) WithContext(ctx context.Context) *Client {
- if ctx == nil {
- panic("nil context")
- }
- clone := c.clone()
- clone.ctx = ctx
- return clone
-}
-
-func (c *Client) Conn(ctx context.Context) *Conn {
- return newConn(ctx, c.opt, pool.NewStickyConnPool(c.connPool))
-}
-
-// Do creates a Cmd from the args and processes the cmd.
-func (c *Client) Do(ctx context.Context, args ...interface{}) *Cmd {
- cmd := NewCmd(ctx, args...)
- _ = c.Process(ctx, cmd)
- return cmd
-}
-
-func (c *Client) Process(ctx context.Context, cmd Cmder) error {
- return c.hooks.process(ctx, cmd, c.baseClient.process)
-}
-
-func (c *Client) processPipeline(ctx context.Context, cmds []Cmder) error {
- return c.hooks.processPipeline(ctx, cmds, c.baseClient.processPipeline)
-}
-
-func (c *Client) processTxPipeline(ctx context.Context, cmds []Cmder) error {
- return c.hooks.processTxPipeline(ctx, cmds, c.baseClient.processTxPipeline)
-}
-
-// Options returns read-only Options that were used to create the client.
-func (c *Client) Options() *Options {
- return c.opt
-}
-
-type PoolStats pool.Stats
-
-// PoolStats returns connection pool stats.
-func (c *Client) PoolStats() *PoolStats {
- stats := c.connPool.Stats()
- return (*PoolStats)(stats)
-}
-
-func (c *Client) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
- return c.Pipeline().Pipelined(ctx, fn)
-}
-
-func (c *Client) Pipeline() Pipeliner {
- pipe := Pipeline{
- ctx: c.ctx,
- exec: c.processPipeline,
- }
- pipe.init()
- return &pipe
-}
-
-func (c *Client) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
- return c.TxPipeline().Pipelined(ctx, fn)
-}
-
-// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC.
-func (c *Client) TxPipeline() Pipeliner {
- pipe := Pipeline{
- ctx: c.ctx,
- exec: c.processTxPipeline,
- }
- pipe.init()
- return &pipe
-}
-
-func (c *Client) pubSub() *PubSub {
- pubsub := &PubSub{
- opt: c.opt,
-
- newConn: func(ctx context.Context, channels []string) (*pool.Conn, error) {
- return c.newConn(ctx)
- },
- closeConn: c.connPool.CloseConn,
- }
- pubsub.init()
- return pubsub
-}
-
-// Subscribe subscribes the client to the specified channels.
-// Channels can be omitted to create empty subscription.
-// Note that this method does not wait on a response from Redis, so the
-// subscription may not be active immediately. To force the connection to wait,
-// you may call the Receive() method on the returned *PubSub like so:
-//
-// sub := client.Subscribe(queryResp)
-// iface, err := sub.Receive()
-// if err != nil {
-// // handle error
-// }
-//
-// // Should be *Subscription, but others are possible if other actions have been
-// // taken on sub since it was created.
-// switch iface.(type) {
-// case *Subscription:
-// // subscribe succeeded
-// case *Message:
-// // received first message
-// case *Pong:
-// // pong received
-// default:
-// // handle error
-// }
-//
-// ch := sub.Channel()
-func (c *Client) Subscribe(ctx context.Context, channels ...string) *PubSub {
- pubsub := c.pubSub()
- if len(channels) > 0 {
- _ = pubsub.Subscribe(ctx, channels...)
- }
- return pubsub
-}
-
-// PSubscribe subscribes the client to the given patterns.
-// Patterns can be omitted to create empty subscription.
-func (c *Client) PSubscribe(ctx context.Context, channels ...string) *PubSub {
- pubsub := c.pubSub()
- if len(channels) > 0 {
- _ = pubsub.PSubscribe(ctx, channels...)
- }
- return pubsub
-}
-
-//------------------------------------------------------------------------------
-
-type conn struct {
- baseClient
- cmdable
- statefulCmdable
- hooks // TODO: inherit hooks
-}
-
-// Conn represents a single Redis connection rather than a pool of connections.
-// Prefer running commands from Client unless there is a specific need
-// for a continuous single Redis connection.
-type Conn struct {
- *conn
- ctx context.Context
-}
-
-func newConn(ctx context.Context, opt *Options, connPool pool.Pooler) *Conn {
- c := Conn{
- conn: &conn{
- baseClient: baseClient{
- opt: opt,
- connPool: connPool,
- },
- },
- ctx: ctx,
- }
- c.cmdable = c.Process
- c.statefulCmdable = c.Process
- return &c
-}
-
-func (c *Conn) Process(ctx context.Context, cmd Cmder) error {
- return c.hooks.process(ctx, cmd, c.baseClient.process)
-}
-
-func (c *Conn) processPipeline(ctx context.Context, cmds []Cmder) error {
- return c.hooks.processPipeline(ctx, cmds, c.baseClient.processPipeline)
-}
-
-func (c *Conn) processTxPipeline(ctx context.Context, cmds []Cmder) error {
- return c.hooks.processTxPipeline(ctx, cmds, c.baseClient.processTxPipeline)
-}
-
-func (c *Conn) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
- return c.Pipeline().Pipelined(ctx, fn)
-}
-
-func (c *Conn) Pipeline() Pipeliner {
- pipe := Pipeline{
- ctx: c.ctx,
- exec: c.processPipeline,
- }
- pipe.init()
- return &pipe
-}
-
-func (c *Conn) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
- return c.TxPipeline().Pipelined(ctx, fn)
-}
-
-// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC.
-func (c *Conn) TxPipeline() Pipeliner {
- pipe := Pipeline{
- ctx: c.ctx,
- exec: c.processTxPipeline,
- }
- pipe.init()
- return &pipe
-}
diff --git a/vendor/github.com/redis/go-redis/v9/.gitignore b/vendor/github.com/redis/go-redis/v9/.gitignore
new file mode 100644
index 000000000..64a7cb512
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/.gitignore
@@ -0,0 +1,4 @@
+*.rdb
+testdata/*
+.idea/
+.DS_Store
diff --git a/vendor/github.com/go-redis/redis/v8/.golangci.yml b/vendor/github.com/redis/go-redis/v9/.golangci.yml
similarity index 100%
rename from vendor/github.com/go-redis/redis/v8/.golangci.yml
rename to vendor/github.com/redis/go-redis/v9/.golangci.yml
diff --git a/vendor/github.com/go-redis/redis/v8/.prettierrc.yml b/vendor/github.com/redis/go-redis/v9/.prettierrc.yml
similarity index 100%
rename from vendor/github.com/go-redis/redis/v8/.prettierrc.yml
rename to vendor/github.com/redis/go-redis/v9/.prettierrc.yml
diff --git a/vendor/github.com/redis/go-redis/v9/CHANGELOG.md b/vendor/github.com/redis/go-redis/v9/CHANGELOG.md
new file mode 100644
index 000000000..297438a9f
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/CHANGELOG.md
@@ -0,0 +1,124 @@
+## [9.0.5](https://github.com/redis/go-redis/compare/v9.0.4...v9.0.5) (2023-05-29)
+
+
+### Features
+
+* Add ACL LOG ([#2536](https://github.com/redis/go-redis/issues/2536)) ([31ba855](https://github.com/redis/go-redis/commit/31ba855ddebc38fbcc69a75d9d4fb769417cf602))
+* add field protocol to setupClusterQueryParams ([#2600](https://github.com/redis/go-redis/issues/2600)) ([840c25c](https://github.com/redis/go-redis/commit/840c25cb6f320501886a82a5e75f47b491e46fbe))
+* add protocol option ([#2598](https://github.com/redis/go-redis/issues/2598)) ([3917988](https://github.com/redis/go-redis/commit/391798880cfb915c4660f6c3ba63e0c1a459e2af))
+
+
+
+## [9.0.4](https://github.com/redis/go-redis/compare/v9.0.3...v9.0.4) (2023-05-01)
+
+
+### Bug Fixes
+
+* reader float parser ([#2513](https://github.com/redis/go-redis/issues/2513)) ([46f2450](https://github.com/redis/go-redis/commit/46f245075e6e3a8bd8471f9ca67ea95fd675e241))
+
+
+### Features
+
+* add client info command ([#2483](https://github.com/redis/go-redis/issues/2483)) ([b8c7317](https://github.com/redis/go-redis/commit/b8c7317cc6af444603731f7017c602347c0ba61e))
+* no longer verify HELLO error messages ([#2515](https://github.com/redis/go-redis/issues/2515)) ([7b4f217](https://github.com/redis/go-redis/commit/7b4f2179cb5dba3d3c6b0c6f10db52b837c912c8))
+* read the structure to increase the judgment of the omitempty op… ([#2529](https://github.com/redis/go-redis/issues/2529)) ([37c057b](https://github.com/redis/go-redis/commit/37c057b8e597c5e8a0e372337f6a8ad27f6030af))
+
+
+
+## [9.0.3](https://github.com/redis/go-redis/compare/v9.0.2...v9.0.3) (2023-04-02)
+
+### New Features
+
+- feat(scan): scan time.Time sets the default decoding (#2413)
+- Add support for CLUSTER LINKS command (#2504)
+- Add support for acl dryrun command (#2502)
+- Add support for COMMAND GETKEYS & COMMAND GETKEYSANDFLAGS (#2500)
+- Add support for LCS Command (#2480)
+- Add support for BZMPOP (#2456)
+- Adding support for ZMPOP command (#2408)
+- Add support for LMPOP (#2440)
+- feat: remove pool unused fields (#2438)
+- Expiretime and PExpireTime (#2426)
+- Implement `FUNCTION` group of commands (#2475)
+- feat(zadd): add ZAddLT and ZAddGT (#2429)
+- Add: Support for COMMAND LIST command (#2491)
+- Add support for BLMPOP (#2442)
+- feat: check pipeline.Do to prevent confusion with Exec (#2517)
+- Function stats, function kill, fcall and fcall_ro (#2486)
+- feat: Add support for CLUSTER SHARDS command (#2507)
+- feat(cmd): support for adding byte,bit parameters to the bitpos command (#2498)
+
+### Fixed
+
+- fix: eval api cmd.SetFirstKeyPos (#2501)
+- fix: limit the number of connections created (#2441)
+- fixed #2462 v9 continue support dragonfly, it's Hello command return "NOAUTH Authentication required" error (#2479)
+- Fix for internal/hscan/structmap.go:89:23: undefined: reflect.Pointer (#2458)
+- fix: group lag can be null (#2448)
+
+### Maintenance
+
+- Updating to the latest version of redis (#2508)
+- Allowing for running tests on a port other than the fixed 6380 (#2466)
+- redis 7.0.8 in tests (#2450)
+- docs: Update redisotel example for v9 (#2425)
+- chore: update go mod, Upgrade golang.org/x/net version to 0.7.0 (#2476)
+- chore: add Chinese translation (#2436)
+- chore(deps): bump github.com/bsm/gomega from 1.20.0 to 1.26.0 (#2421)
+- chore(deps): bump github.com/bsm/ginkgo/v2 from 2.5.0 to 2.7.0 (#2420)
+- chore(deps): bump actions/setup-go from 3 to 4 (#2495)
+- docs: add instructions for the HSet api (#2503)
+- docs: add reading lag field comment (#2451)
+- test: update go mod before testing(go mod tidy) (#2423)
+- docs: fix comment typo (#2505)
+- test: remove testify (#2463)
+- refactor: change ListElementCmd to KeyValuesCmd. (#2443)
+- fix(appendArg): appendArg case special type (#2489)
+
+## [9.0.2](https://github.com/redis/go-redis/compare/v9.0.1...v9.0.2) (2023-02-01)
+
+### Features
+
+* upgrade OpenTelemetry, use the new metrics API. ([#2410](https://github.com/redis/go-redis/issues/2410)) ([e29e42c](https://github.com/redis/go-redis/commit/e29e42cde2755ab910d04185025dc43ce6f59c65))
+
+## v9 2023-01-30
+
+### Breaking
+
+- Changed Pipelines to not be thread-safe any more.
+
+### Added
+
+- Added support for [RESP3](https://github.com/antirez/RESP3/blob/master/spec.md) protocol. It was
+ contributed by @monkey92t who has done the majority of work in this release.
+- Added `ContextTimeoutEnabled` option that controls whether the client respects context timeouts
+ and deadlines. See
+ [Redis Timeouts](https://redis.uptrace.dev/guide/go-redis-debugging.html#timeouts) for details.
+- Added `ParseClusterURL` to parse URLs into `ClusterOptions`, for example,
+ `redis://user:password@localhost:6789?dial_timeout=3&read_timeout=6s&addr=localhost:6790&addr=localhost:6791`.
+- Added metrics instrumentation using `redisotel.IstrumentMetrics`. See
+ [documentation](https://redis.uptrace.dev/guide/go-redis-monitoring.html)
+- Added `redis.HasErrorPrefix` to help working with errors.
+
+### Changed
+
+- Removed asynchronous cancellation based on the context timeout. It was racy in v8 and is
+ completely gone in v9.
+- Reworked hook interface and added `DialHook`.
+- Replaced `redisotel.NewTracingHook` with `redisotel.InstrumentTracing`. See
+ [example](example/otel) and
+ [documentation](https://redis.uptrace.dev/guide/go-redis-monitoring.html).
+- Replaced `*redis.Z` with `redis.Z` since it is small enough to be passed as value without making
+ an allocation.
+- Renamed the option `MaxConnAge` to `ConnMaxLifetime`.
+- Renamed the option `IdleTimeout` to `ConnMaxIdleTime`.
+- Removed connection reaper in favor of `MaxIdleConns`.
+- Removed `WithContext` since `context.Context` can be passed directly as an arg.
+- Removed `Pipeline.Close` since there is no real need to explicitly manage pipeline resources and
+ it can be safely reused via `sync.Pool` etc. `Pipeline.Discard` is still available if you want to
+ reset commands for some reason.
+
+### Fixed
+
+- Improved and fixed pipeline retries.
+- As usually, added support for more commands and fixed some bugs.
diff --git a/vendor/github.com/go-redis/redis/v8/LICENSE b/vendor/github.com/redis/go-redis/v9/LICENSE
similarity index 95%
rename from vendor/github.com/go-redis/redis/v8/LICENSE
rename to vendor/github.com/redis/go-redis/v9/LICENSE
index 298bed9be..f4967dbc5 100644
--- a/vendor/github.com/go-redis/redis/v8/LICENSE
+++ b/vendor/github.com/redis/go-redis/v9/LICENSE
@@ -1,4 +1,4 @@
-Copyright (c) 2013 The github.com/go-redis/redis Authors.
+Copyright (c) 2013 The github.com/redis/go-redis Authors.
All rights reserved.
Redistribution and use in source and binary forms, with or without
diff --git a/vendor/github.com/redis/go-redis/v9/Makefile b/vendor/github.com/redis/go-redis/v9/Makefile
new file mode 100644
index 000000000..b59c39554
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/Makefile
@@ -0,0 +1,41 @@
+GO_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | sort)
+
+test: testdeps
+ set -e; for dir in $(GO_MOD_DIRS); do \
+ echo "go test in $${dir}"; \
+ (cd "$${dir}" && \
+ go mod tidy -compat=1.18 && \
+ go test && \
+ go test ./... -short -race && \
+ go test ./... -run=NONE -bench=. -benchmem && \
+ env GOOS=linux GOARCH=386 go test && \
+ go vet); \
+ done
+ cd internal/customvet && go build .
+ go vet -vettool ./internal/customvet/customvet
+
+testdeps: testdata/redis/src/redis-server
+
+bench: testdeps
+ go test ./... -test.run=NONE -test.bench=. -test.benchmem
+
+.PHONY: all test testdeps bench
+
+testdata/redis:
+ mkdir -p $@
+ wget -qO- https://download.redis.io/releases/redis-7.2-rc3.tar.gz | tar xvz --strip-components=1 -C $@
+
+testdata/redis/src/redis-server: testdata/redis
+ cd $< && make all
+
+fmt:
+ gofmt -w -s ./
+ goimports -w -local github.com/redis/go-redis ./
+
+go_mod_tidy:
+ set -e; for dir in $(GO_MOD_DIRS); do \
+ echo "go mod tidy in $${dir}"; \
+ (cd "$${dir}" && \
+ go get -u ./... && \
+ go mod tidy -compat=1.18); \
+ done
diff --git a/vendor/github.com/redis/go-redis/v9/README.md b/vendor/github.com/redis/go-redis/v9/README.md
new file mode 100644
index 000000000..3486e8e5a
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/README.md
@@ -0,0 +1,224 @@
+# Redis client for Go
+
+[](https://github.com/redis/go-redis/actions)
+[](https://pkg.go.dev/github.com/redis/go-redis/v9?tab=doc)
+[](https://redis.uptrace.dev/)
+[](https://discord.gg/rWtp5Aj)
+
+> go-redis is brought to you by :star: [**uptrace/uptrace**](https://github.com/uptrace/uptrace).
+> Uptrace is an open-source APM tool that supports distributed tracing, metrics, and logs. You can
+> use it to monitor applications and set up automatic alerts to receive notifications via email,
+> Slack, Telegram, and others.
+>
+> See [OpenTelemetry](example/otel) example which demonstrates how you can use Uptrace to monitor
+> go-redis.
+
+## Documentation
+
+- [English](https://redis.uptrace.dev)
+- [ç®€ä½“ä¸æ–‡](https://redis.uptrace.dev/zh/)
+
+## Resources
+
+- [Discussions](https://github.com/redis/go-redis/discussions)
+- [Chat](https://discord.gg/rWtp5Aj)
+- [Reference](https://pkg.go.dev/github.com/redis/go-redis/v9)
+- [Examples](https://pkg.go.dev/github.com/redis/go-redis/v9#pkg-examples)
+
+## Ecosystem
+
+- [Redis Mock](https://github.com/go-redis/redismock)
+- [Distributed Locks](https://github.com/bsm/redislock)
+- [Redis Cache](https://github.com/go-redis/cache)
+- [Rate limiting](https://github.com/go-redis/redis_rate)
+
+This client also works with [Kvrocks](https://github.com/apache/incubator-kvrocks), a distributed
+key value NoSQL database that uses RocksDB as storage engine and is compatible with Redis protocol.
+
+## Features
+
+- Redis 3 commands except QUIT, MONITOR, and SYNC.
+- Automatic connection pooling with
+- [Pub/Sub](https://redis.uptrace.dev/guide/go-redis-pubsub.html).
+- [Pipelines and transactions](https://redis.uptrace.dev/guide/go-redis-pipelines.html).
+- [Scripting](https://redis.uptrace.dev/guide/lua-scripting.html).
+- [Redis Sentinel](https://redis.uptrace.dev/guide/go-redis-sentinel.html).
+- [Redis Cluster](https://redis.uptrace.dev/guide/go-redis-cluster.html).
+- [Redis Ring](https://redis.uptrace.dev/guide/ring.html).
+- [Redis Performance Monitoring](https://redis.uptrace.dev/guide/redis-performance-monitoring.html).
+- [Redis Probabilistic [RedisStack]](https://redis.io/docs/data-types/probabilistic/)
+
+## Installation
+
+go-redis supports 2 last Go versions and requires a Go version with
+[modules](https://github.com/golang/go/wiki/Modules) support. So make sure to initialize a Go
+module:
+
+```shell
+go mod init github.com/my/repo
+```
+
+Then install go-redis/**v9**:
+
+```shell
+go get github.com/redis/go-redis/v9
+```
+
+## Quickstart
+
+```go
+import (
+ "context"
+ "github.com/redis/go-redis/v9"
+ "fmt"
+)
+
+var ctx = context.Background()
+
+func ExampleClient() {
+ rdb := redis.NewClient(&redis.Options{
+ Addr: "localhost:6379",
+ Password: "", // no password set
+ DB: 0, // use default DB
+ })
+
+ err := rdb.Set(ctx, "key", "value", 0).Err()
+ if err != nil {
+ panic(err)
+ }
+
+ val, err := rdb.Get(ctx, "key").Result()
+ if err != nil {
+ panic(err)
+ }
+ fmt.Println("key", val)
+
+ val2, err := rdb.Get(ctx, "key2").Result()
+ if err == redis.Nil {
+ fmt.Println("key2 does not exist")
+ } else if err != nil {
+ panic(err)
+ } else {
+ fmt.Println("key2", val2)
+ }
+ // Output: key value
+ // key2 does not exist
+}
+```
+
+The above can be modified to specify the version of the RESP protocol by adding the `protocol` option to the `Options` struct:
+
+```go
+ rdb := redis.NewClient(&redis.Options{
+ Addr: "localhost:6379",
+ Password: "", // no password set
+ DB: 0, // use default DB
+ Protocol: 3, // specify 2 for RESP 2 or 3 for RESP 3
+ })
+
+```
+
+### Connecting via a redis url
+
+go-redis also supports connecting via the [redis uri specification](https://github.com/redis/redis-specifications/tree/master/uri/redis.txt). The example below demonstrates how the connection can easily be configured using a string, adhering to this specification.
+
+```go
+import (
+ "context"
+ "github.com/redis/go-redis/v9"
+ "fmt"
+)
+
+var ctx = context.Background()
+
+func ExampleClient() {
+ url := "redis://localhost:6379?password=hello&protocol=3"
+ opts, err := redis.ParseURL(url)
+ if err != nil {
+ panic(err)
+ }
+ rdb := redis.NewClient(opts)
+```
+
+## Look and feel
+
+Some corner cases:
+
+```go
+// SET key value EX 10 NX
+set, err := rdb.SetNX(ctx, "key", "value", 10*time.Second).Result()
+
+// SET key value keepttl NX
+set, err := rdb.SetNX(ctx, "key", "value", redis.KeepTTL).Result()
+
+// SORT list LIMIT 0 2 ASC
+vals, err := rdb.Sort(ctx, "list", &redis.Sort{Offset: 0, Count: 2, Order: "ASC"}).Result()
+
+// ZRANGEBYSCORE zset -inf +inf WITHSCORES LIMIT 0 2
+vals, err := rdb.ZRangeByScoreWithScores(ctx, "zset", &redis.ZRangeBy{
+ Min: "-inf",
+ Max: "+inf",
+ Offset: 0,
+ Count: 2,
+}).Result()
+
+// ZINTERSTORE out 2 zset1 zset2 WEIGHTS 2 3 AGGREGATE SUM
+vals, err := rdb.ZInterStore(ctx, "out", &redis.ZStore{
+ Keys: []string{"zset1", "zset2"},
+ Weights: []int64{2, 3}
+}).Result()
+
+// EVAL "return {KEYS[1],ARGV[1]}" 1 "key" "hello"
+vals, err := rdb.Eval(ctx, "return {KEYS[1],ARGV[1]}", []string{"key"}, "hello").Result()
+
+// custom command
+res, err := rdb.Do(ctx, "set", "key", "value").Result()
+```
+
+## Run the test
+
+go-redis will start a redis-server and run the test cases.
+
+The paths of redis-server bin file and redis config file are defined in `main_test.go`:
+
+```go
+var (
+ redisServerBin, _ = filepath.Abs(filepath.Join("testdata", "redis", "src", "redis-server"))
+ redisServerConf, _ = filepath.Abs(filepath.Join("testdata", "redis", "redis.conf"))
+)
+```
+
+For local testing, you can change the variables to refer to your local files, or create a soft link
+to the corresponding folder for redis-server and copy the config file to `testdata/redis/`:
+
+```shell
+ln -s /usr/bin/redis-server ./go-redis/testdata/redis/src
+cp ./go-redis/testdata/redis.conf ./go-redis/testdata/redis/
+```
+
+Lastly, run:
+
+```shell
+go test
+```
+
+Another option is to run your specific tests with an already running redis. The example below, tests against a redis running on port 9999.:
+
+```shell
+REDIS_PORT=9999 go test
+```
+
+## See also
+
+- [Golang ORM](https://bun.uptrace.dev) for PostgreSQL, MySQL, MSSQL, and SQLite
+- [Golang PostgreSQL](https://bun.uptrace.dev/postgres/)
+- [Golang HTTP router](https://bunrouter.uptrace.dev/)
+- [Golang ClickHouse ORM](https://github.com/uptrace/go-clickhouse)
+
+## Contributors
+
+Thanks to all the people who already contributed!
+
+
+
+
diff --git a/vendor/github.com/go-redis/redis/v8/RELEASING.md b/vendor/github.com/redis/go-redis/v9/RELEASING.md
similarity index 100%
rename from vendor/github.com/go-redis/redis/v8/RELEASING.md
rename to vendor/github.com/redis/go-redis/v9/RELEASING.md
diff --git a/vendor/github.com/go-redis/redis/v8/cluster.go b/vendor/github.com/redis/go-redis/v9/cluster.go
similarity index 74%
rename from vendor/github.com/go-redis/redis/v8/cluster.go
rename to vendor/github.com/redis/go-redis/v9/cluster.go
index a54f2f37e..941838dd0 100644
--- a/vendor/github.com/go-redis/redis/v8/cluster.go
+++ b/vendor/github.com/redis/go-redis/v9/cluster.go
@@ -6,17 +6,19 @@ import (
"fmt"
"math"
"net"
+ "net/url"
"runtime"
"sort"
+ "strings"
"sync"
"sync/atomic"
"time"
- "github.com/go-redis/redis/v8/internal"
- "github.com/go-redis/redis/v8/internal/hashtag"
- "github.com/go-redis/redis/v8/internal/pool"
- "github.com/go-redis/redis/v8/internal/proto"
- "github.com/go-redis/redis/v8/internal/rand"
+ "github.com/redis/go-redis/v9/internal"
+ "github.com/redis/go-redis/v9/internal/hashtag"
+ "github.com/redis/go-redis/v9/internal/pool"
+ "github.com/redis/go-redis/v9/internal/proto"
+ "github.com/redis/go-redis/v9/internal/rand"
)
var errClusterNoNodes = fmt.Errorf("redis: cluster has no nodes")
@@ -27,6 +29,9 @@ type ClusterOptions struct {
// A seed list of host:port addresses of cluster nodes.
Addrs []string
+ // ClientName will execute the `CLIENT SETNAME ClientName` command for each conn.
+ ClientName string
+
// NewClient creates a cluster node client with provided name and options.
NewClient func(opt *Options) *Client
@@ -57,6 +62,7 @@ type ClusterOptions struct {
OnConnect func(ctx context.Context, cn *Conn) error
+ Protocol int
Username string
Password string
@@ -64,20 +70,18 @@ type ClusterOptions struct {
MinRetryBackoff time.Duration
MaxRetryBackoff time.Duration
- DialTimeout time.Duration
- ReadTimeout time.Duration
- WriteTimeout time.Duration
+ DialTimeout time.Duration
+ ReadTimeout time.Duration
+ WriteTimeout time.Duration
+ ContextTimeoutEnabled bool
- // PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO).
- PoolFIFO bool
-
- // PoolSize applies per cluster node and not for the whole cluster.
- PoolSize int
- MinIdleConns int
- MaxConnAge time.Duration
- PoolTimeout time.Duration
- IdleTimeout time.Duration
- IdleCheckFrequency time.Duration
+ PoolFIFO bool
+ PoolSize int // applies per cluster node and not for the whole cluster
+ PoolTimeout time.Duration
+ MinIdleConns int
+ MaxIdleConns int
+ ConnMaxIdleTime time.Duration
+ ConnMaxLifetime time.Duration
TLSConfig *tls.Config
}
@@ -131,13 +135,137 @@ func (opt *ClusterOptions) init() {
}
}
+// ParseClusterURL parses a URL into ClusterOptions that can be used to connect to Redis.
+// The URL must be in the form:
+//
+// redis://:@:
+// or
+// rediss://:@:
+//
+// To add additional addresses, specify the query parameter, "addr" one or more times. e.g:
+//
+// redis://:@:?addr=:&addr=:
+// or
+// rediss://:@:?addr=:&addr=:
+//
+// Most Option fields can be set using query parameters, with the following restrictions:
+// - field names are mapped using snake-case conversion: to set MaxRetries, use max_retries
+// - only scalar type fields are supported (bool, int, time.Duration)
+// - for time.Duration fields, values must be a valid input for time.ParseDuration();
+// additionally a plain integer as value (i.e. without unit) is intepreted as seconds
+// - to disable a duration field, use value less than or equal to 0; to use the default
+// value, leave the value blank or remove the parameter
+// - only the last value is interpreted if a parameter is given multiple times
+// - fields "network", "addr", "username" and "password" can only be set using other
+// URL attributes (scheme, host, userinfo, resp.), query paremeters using these
+// names will be treated as unknown parameters
+// - unknown parameter names will result in an error
+//
+// Example:
+//
+// redis://user:password@localhost:6789?dial_timeout=3&read_timeout=6s&addr=localhost:6790&addr=localhost:6791
+// is equivalent to:
+// &ClusterOptions{
+// Addr: ["localhost:6789", "localhost:6790", "localhost:6791"]
+// DialTimeout: 3 * time.Second, // no time unit = seconds
+// ReadTimeout: 6 * time.Second,
+// }
+func ParseClusterURL(redisURL string) (*ClusterOptions, error) {
+ o := &ClusterOptions{}
+
+ u, err := url.Parse(redisURL)
+ if err != nil {
+ return nil, err
+ }
+
+ // add base URL to the array of addresses
+ // more addresses may be added through the URL params
+ h, p := getHostPortWithDefaults(u)
+ o.Addrs = append(o.Addrs, net.JoinHostPort(h, p))
+
+ // setup username, password, and other configurations
+ o, err = setupClusterConn(u, h, o)
+ if err != nil {
+ return nil, err
+ }
+
+ return o, nil
+}
+
+// setupClusterConn gets the username and password from the URL and the query parameters.
+func setupClusterConn(u *url.URL, host string, o *ClusterOptions) (*ClusterOptions, error) {
+ switch u.Scheme {
+ case "rediss":
+ o.TLSConfig = &tls.Config{ServerName: host}
+ fallthrough
+ case "redis":
+ o.Username, o.Password = getUserPassword(u)
+ default:
+ return nil, fmt.Errorf("redis: invalid URL scheme: %s", u.Scheme)
+ }
+
+ // retrieve the configuration from the query parameters
+ o, err := setupClusterQueryParams(u, o)
+ if err != nil {
+ return nil, err
+ }
+
+ return o, nil
+}
+
+// setupClusterQueryParams converts query parameters in u to option value in o.
+func setupClusterQueryParams(u *url.URL, o *ClusterOptions) (*ClusterOptions, error) {
+ q := queryOptions{q: u.Query()}
+
+ o.Protocol = q.int("protocol")
+ o.ClientName = q.string("client_name")
+ o.MaxRedirects = q.int("max_redirects")
+ o.ReadOnly = q.bool("read_only")
+ o.RouteByLatency = q.bool("route_by_latency")
+ o.RouteRandomly = q.bool("route_randomly")
+ o.MaxRetries = q.int("max_retries")
+ o.MinRetryBackoff = q.duration("min_retry_backoff")
+ o.MaxRetryBackoff = q.duration("max_retry_backoff")
+ o.DialTimeout = q.duration("dial_timeout")
+ o.ReadTimeout = q.duration("read_timeout")
+ o.WriteTimeout = q.duration("write_timeout")
+ o.PoolFIFO = q.bool("pool_fifo")
+ o.PoolSize = q.int("pool_size")
+ o.MinIdleConns = q.int("min_idle_conns")
+ o.PoolTimeout = q.duration("pool_timeout")
+ o.ConnMaxLifetime = q.duration("conn_max_lifetime")
+ o.ConnMaxIdleTime = q.duration("conn_max_idle_time")
+
+ if q.err != nil {
+ return nil, q.err
+ }
+
+ // addr can be specified as many times as needed
+ addrs := q.strings("addr")
+ for _, addr := range addrs {
+ h, p, err := net.SplitHostPort(addr)
+ if err != nil || h == "" || p == "" {
+ return nil, fmt.Errorf("redis: unable to parse addr param: %s", addr)
+ }
+
+ o.Addrs = append(o.Addrs, net.JoinHostPort(h, p))
+ }
+
+ // any parameters left?
+ if r := q.remaining(); len(r) > 0 {
+ return nil, fmt.Errorf("redis: unexpected option: %s", strings.Join(r, ", "))
+ }
+
+ return o, nil
+}
+
func (opt *ClusterOptions) clientOptions() *Options {
- const disableIdleCheck = -1
-
return &Options{
- Dialer: opt.Dialer,
- OnConnect: opt.OnConnect,
+ ClientName: opt.ClientName,
+ Dialer: opt.Dialer,
+ OnConnect: opt.OnConnect,
+ Protocol: opt.Protocol,
Username: opt.Username,
Password: opt.Password,
@@ -149,13 +277,13 @@ func (opt *ClusterOptions) clientOptions() *Options {
ReadTimeout: opt.ReadTimeout,
WriteTimeout: opt.WriteTimeout,
- PoolFIFO: opt.PoolFIFO,
- PoolSize: opt.PoolSize,
- MinIdleConns: opt.MinIdleConns,
- MaxConnAge: opt.MaxConnAge,
- PoolTimeout: opt.PoolTimeout,
- IdleTimeout: opt.IdleTimeout,
- IdleCheckFrequency: disableIdleCheck,
+ PoolFIFO: opt.PoolFIFO,
+ PoolSize: opt.PoolSize,
+ PoolTimeout: opt.PoolTimeout,
+ MinIdleConns: opt.MinIdleConns,
+ MaxIdleConns: opt.MaxIdleConns,
+ ConnMaxIdleTime: opt.ConnMaxIdleTime,
+ ConnMaxLifetime: opt.ConnMaxLifetime,
TLSConfig: opt.TLSConfig,
// If ClusterSlots is populated, then we probably have an artificial
@@ -204,15 +332,26 @@ func (n *clusterNode) updateLatency() {
const numProbe = 10
var dur uint64
+ successes := 0
for i := 0; i < numProbe; i++ {
time.Sleep(time.Duration(10+rand.Intn(10)) * time.Millisecond)
start := time.Now()
- n.Client.Ping(context.TODO())
- dur += uint64(time.Since(start) / time.Microsecond)
+ err := n.Client.Ping(context.TODO()).Err()
+ if err == nil {
+ dur += uint64(time.Since(start) / time.Microsecond)
+ successes++
+ }
}
- latency := float64(dur) / float64(numProbe)
+ var latency float64
+ if successes == 0 {
+ // If none of the pings worked, set latency to some arbitrarily high value so this node gets
+ // least priority.
+ latency = float64((1 * time.Minute) / time.Microsecond)
+ } else {
+ latency = float64(dur) / float64(successes)
+ }
atomic.StoreUint32(&n.latency, uint32(latency+0.5))
}
@@ -262,6 +401,7 @@ type clusterNodes struct {
nodes map[string]*clusterNode
activeAddrs []string
closed bool
+ onNewNode []func(rdb *Client)
_generation uint32 // atomic
}
@@ -297,6 +437,12 @@ func (c *clusterNodes) Close() error {
return firstErr
}
+func (c *clusterNodes) OnNewNode(fn func(rdb *Client)) {
+ c.mu.Lock()
+ c.onNewNode = append(c.onNewNode, fn)
+ c.mu.Unlock()
+}
+
func (c *clusterNodes) Addrs() ([]string, error) {
var addrs []string
@@ -374,6 +520,9 @@ func (c *clusterNodes) GetOrCreate(addr string) (*clusterNode, error) {
}
node = newClusterNode(c.opt, addr)
+ for _, fn := range c.onNewNode {
+ fn(node.Client)
+ }
c.addrs = appendIfNotExists(c.addrs, addr)
c.nodes[addr] = node
@@ -683,21 +832,16 @@ func (c *clusterStateHolder) ReloadOrGet(ctx context.Context) (*clusterState, er
//------------------------------------------------------------------------------
-type clusterClient struct {
- opt *ClusterOptions
- nodes *clusterNodes
- state *clusterStateHolder //nolint:structcheck
- cmdsInfoCache *cmdsInfoCache //nolint:structcheck
-}
-
// ClusterClient is a Redis Cluster client representing a pool of zero
// or more underlying connections. It's safe for concurrent use by
// multiple goroutines.
type ClusterClient struct {
- *clusterClient
+ opt *ClusterOptions
+ nodes *clusterNodes
+ state *clusterStateHolder
+ cmdsInfoCache *cmdsInfoCache
cmdable
- hooks
- ctx context.Context
+ hooksMixin
}
// NewClusterClient returns a Redis Cluster client as described in
@@ -706,38 +850,24 @@ func NewClusterClient(opt *ClusterOptions) *ClusterClient {
opt.init()
c := &ClusterClient{
- clusterClient: &clusterClient{
- opt: opt,
- nodes: newClusterNodes(opt),
- },
- ctx: context.Background(),
+ opt: opt,
+ nodes: newClusterNodes(opt),
}
+
c.state = newClusterStateHolder(c.loadState)
c.cmdsInfoCache = newCmdsInfoCache(c.cmdsInfo)
c.cmdable = c.Process
- if opt.IdleCheckFrequency > 0 {
- go c.reaper(opt.IdleCheckFrequency)
- }
+ c.initHooks(hooks{
+ dial: nil,
+ process: c.process,
+ pipeline: c.processPipeline,
+ txPipeline: c.processTxPipeline,
+ })
return c
}
-func (c *ClusterClient) Context() context.Context {
- return c.ctx
-}
-
-func (c *ClusterClient) WithContext(ctx context.Context) *ClusterClient {
- if ctx == nil {
- panic("nil context")
- }
- clone := *c
- clone.cmdable = clone.Process
- clone.hooks.lock()
- clone.ctx = ctx
- return &clone
-}
-
// Options returns read-only Options that were used to create the client.
func (c *ClusterClient) Options() *ClusterOptions {
return c.opt
@@ -757,7 +887,7 @@ func (c *ClusterClient) Close() error {
return c.nodes.Close()
}
-// Do creates a Cmd from the args and processes the cmd.
+// Do create a Cmd from the args and processes the cmd.
func (c *ClusterClient) Do(ctx context.Context, args ...interface{}) *Cmd {
cmd := NewCmd(ctx, args...)
_ = c.Process(ctx, cmd)
@@ -765,13 +895,14 @@ func (c *ClusterClient) Do(ctx context.Context, args ...interface{}) *Cmd {
}
func (c *ClusterClient) Process(ctx context.Context, cmd Cmder) error {
- return c.hooks.process(ctx, cmd, c.process)
+ err := c.processHook(ctx, cmd)
+ cmd.SetErr(err)
+ return err
}
func (c *ClusterClient) process(ctx context.Context, cmd Cmder) error {
- cmdInfo := c.cmdInfo(cmd.Name())
- slot := c.cmdSlot(cmd)
-
+ cmdInfo := c.cmdInfo(ctx, cmd.Name())
+ slot := c.cmdSlot(ctx, cmd)
var node *clusterNode
var ask bool
var lastErr error
@@ -791,12 +922,12 @@ func (c *ClusterClient) process(ctx context.Context, cmd Cmder) error {
}
if ask {
+ ask = false
+
pipe := node.Client.Pipeline()
_ = pipe.Process(ctx, NewCmd(ctx, "asking"))
_ = pipe.Process(ctx, cmd)
_, lastErr = pipe.Exec(ctx)
- _ = pipe.Close()
- ask = false
} else {
lastErr = node.Client.Process(ctx, cmd)
}
@@ -851,6 +982,10 @@ func (c *ClusterClient) process(ctx context.Context, cmd Cmder) error {
return lastErr
}
+func (c *ClusterClient) OnNewNode(fn func(rdb *Client)) {
+ c.nodes.OnNewNode(fn)
+}
+
// ForEachMaster concurrently calls the fn on each master node in the cluster.
// It returns the first error if any.
func (c *ClusterClient) ForEachMaster(
@@ -1056,30 +1191,9 @@ func (c *ClusterClient) loadState(ctx context.Context) (*clusterState, error) {
return nil, firstErr
}
-// reaper closes idle connections to the cluster.
-func (c *ClusterClient) reaper(idleCheckFrequency time.Duration) {
- ticker := time.NewTicker(idleCheckFrequency)
- defer ticker.Stop()
-
- for range ticker.C {
- nodes, err := c.nodes.All()
- if err != nil {
- break
- }
-
- for _, node := range nodes {
- _, err := node.Client.connPool.(*pool.ConnPool).ReapStaleConns()
- if err != nil {
- internal.Logger.Printf(c.Context(), "ReapStaleConns failed: %s", err)
- }
- }
- }
-}
-
func (c *ClusterClient) Pipeline() Pipeliner {
pipe := Pipeline{
- ctx: c.ctx,
- exec: c.processPipeline,
+ exec: pipelineExecer(c.processPipelineHook),
}
pipe.init()
return &pipe
@@ -1090,13 +1204,9 @@ func (c *ClusterClient) Pipelined(ctx context.Context, fn func(Pipeliner) error)
}
func (c *ClusterClient) processPipeline(ctx context.Context, cmds []Cmder) error {
- return c.hooks.processPipeline(ctx, cmds, c._processPipeline)
-}
-
-func (c *ClusterClient) _processPipeline(ctx context.Context, cmds []Cmder) error {
cmdsMap := newCmdsMap()
- err := c.mapCmdsByNode(ctx, cmdsMap, cmds)
- if err != nil {
+
+ if err := c.mapCmdsByNode(ctx, cmdsMap, cmds); err != nil {
setCmdsErr(cmds, err)
return err
}
@@ -1116,18 +1226,7 @@ func (c *ClusterClient) _processPipeline(ctx context.Context, cmds []Cmder) erro
wg.Add(1)
go func(node *clusterNode, cmds []Cmder) {
defer wg.Done()
-
- err := c._processPipelineNode(ctx, node, cmds, failedCmds)
- if err == nil {
- return
- }
- if attempt < c.opt.MaxRedirects {
- if err := c.mapCmdsByNode(ctx, failedCmds, cmds); err != nil {
- setCmdsErr(cmds, err)
- }
- } else {
- setCmdsErr(cmds, err)
- }
+ c.processPipelineNode(ctx, node, cmds, failedCmds)
}(node, cmds)
}
@@ -1147,9 +1246,9 @@ func (c *ClusterClient) mapCmdsByNode(ctx context.Context, cmdsMap *cmdsMap, cmd
return err
}
- if c.opt.ReadOnly && c.cmdsAreReadOnly(cmds) {
+ if c.opt.ReadOnly && c.cmdsAreReadOnly(ctx, cmds) {
for _, cmd := range cmds {
- slot := c.cmdSlot(cmd)
+ slot := c.cmdSlot(ctx, cmd)
node, err := c.slotReadOnlyNode(state, slot)
if err != nil {
return err
@@ -1160,7 +1259,7 @@ func (c *ClusterClient) mapCmdsByNode(ctx context.Context, cmdsMap *cmdsMap, cmd
}
for _, cmd := range cmds {
- slot := c.cmdSlot(cmd)
+ slot := c.cmdSlot(ctx, cmd)
node, err := state.slotMasterNode(slot)
if err != nil {
return err
@@ -1170,9 +1269,9 @@ func (c *ClusterClient) mapCmdsByNode(ctx context.Context, cmdsMap *cmdsMap, cmd
return nil
}
-func (c *ClusterClient) cmdsAreReadOnly(cmds []Cmder) bool {
+func (c *ClusterClient) cmdsAreReadOnly(ctx context.Context, cmds []Cmder) bool {
for _, cmd := range cmds {
- cmdInfo := c.cmdInfo(cmd.Name())
+ cmdInfo := c.cmdInfo(ctx, cmd.Name())
if cmdInfo == nil || !cmdInfo.ReadOnly {
return false
}
@@ -1180,22 +1279,42 @@ func (c *ClusterClient) cmdsAreReadOnly(cmds []Cmder) bool {
return true
}
-func (c *ClusterClient) _processPipelineNode(
+func (c *ClusterClient) processPipelineNode(
ctx context.Context, node *clusterNode, cmds []Cmder, failedCmds *cmdsMap,
-) error {
- return node.Client.hooks.processPipeline(ctx, cmds, func(ctx context.Context, cmds []Cmder) error {
- return node.Client.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
- err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
- return writeCmds(wr, cmds)
- })
- if err != nil {
- return err
- }
+) {
+ _ = node.Client.withProcessPipelineHook(ctx, cmds, func(ctx context.Context, cmds []Cmder) error {
+ cn, err := node.Client.getConn(ctx)
+ if err != nil {
+ _ = c.mapCmdsByNode(ctx, failedCmds, cmds)
+ setCmdsErr(cmds, err)
+ return err
+ }
- return cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error {
- return c.pipelineReadCmds(ctx, node, rd, cmds, failedCmds)
- })
- })
+ var processErr error
+ defer func() {
+ node.Client.releaseConn(ctx, cn, processErr)
+ }()
+ processErr = c.processPipelineNodeConn(ctx, node, cn, cmds, failedCmds)
+
+ return processErr
+ })
+}
+
+func (c *ClusterClient) processPipelineNodeConn(
+ ctx context.Context, node *clusterNode, cn *pool.Conn, cmds []Cmder, failedCmds *cmdsMap,
+) error {
+ if err := cn.WithWriter(c.context(ctx), c.opt.WriteTimeout, func(wr *proto.Writer) error {
+ return writeCmds(wr, cmds)
+ }); err != nil {
+ if shouldRetry(err, true) {
+ _ = c.mapCmdsByNode(ctx, failedCmds, cmds)
+ }
+ setCmdsErr(cmds, err)
+ return err
+ }
+
+ return cn.WithReader(c.context(ctx), c.opt.ReadTimeout, func(rd *proto.Reader) error {
+ return c.pipelineReadCmds(ctx, node, rd, cmds, failedCmds)
})
}
@@ -1206,7 +1325,7 @@ func (c *ClusterClient) pipelineReadCmds(
cmds []Cmder,
failedCmds *cmdsMap,
) error {
- for _, cmd := range cmds {
+ for i, cmd := range cmds {
err := cmd.readReply(rd)
cmd.SetErr(err)
@@ -1218,15 +1337,24 @@ func (c *ClusterClient) pipelineReadCmds(
continue
}
- if c.opt.ReadOnly && isLoadingError(err) {
+ if c.opt.ReadOnly {
node.MarkAsFailing()
+ }
+
+ if !isRedisError(err) {
+ if shouldRetry(err, true) {
+ _ = c.mapCmdsByNode(ctx, failedCmds, cmds)
+ }
+ setCmdsErr(cmds[i+1:], err)
return err
}
- if isRedisError(err) {
- continue
- }
+ }
+
+ if err := cmds[0].Err(); err != nil && shouldRetry(err, true) {
+ _ = c.mapCmdsByNode(ctx, failedCmds, cmds)
return err
}
+
return nil
}
@@ -1260,8 +1388,10 @@ func (c *ClusterClient) checkMovedErr(
// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC.
func (c *ClusterClient) TxPipeline() Pipeliner {
pipe := Pipeline{
- ctx: c.ctx,
- exec: c.processTxPipeline,
+ exec: func(ctx context.Context, cmds []Cmder) error {
+ cmds = wrapMultiExec(ctx, cmds)
+ return c.processTxPipelineHook(ctx, cmds)
+ },
}
pipe.init()
return &pipe
@@ -1272,10 +1402,6 @@ func (c *ClusterClient) TxPipelined(ctx context.Context, fn func(Pipeliner) erro
}
func (c *ClusterClient) processTxPipeline(ctx context.Context, cmds []Cmder) error {
- return c.hooks.processTxPipeline(ctx, cmds, c._processTxPipeline)
-}
-
-func (c *ClusterClient) _processTxPipeline(ctx context.Context, cmds []Cmder) error {
// Trim multi .. exec.
cmds = cmds[1 : len(cmds)-1]
@@ -1285,7 +1411,7 @@ func (c *ClusterClient) _processTxPipeline(ctx context.Context, cmds []Cmder) er
return err
}
- cmdsMap := c.mapCmdsBySlot(cmds)
+ cmdsMap := c.mapCmdsBySlot(ctx, cmds)
for slot, cmds := range cmdsMap {
node, err := state.slotMasterNode(slot)
if err != nil {
@@ -1309,19 +1435,7 @@ func (c *ClusterClient) _processTxPipeline(ctx context.Context, cmds []Cmder) er
wg.Add(1)
go func(node *clusterNode, cmds []Cmder) {
defer wg.Done()
-
- err := c._processTxPipelineNode(ctx, node, cmds, failedCmds)
- if err == nil {
- return
- }
-
- if attempt < c.opt.MaxRedirects {
- if err := c.mapCmdsByNode(ctx, failedCmds, cmds); err != nil {
- setCmdsErr(cmds, err)
- }
- } else {
- setCmdsErr(cmds, err)
- }
+ c.processTxPipelineNode(ctx, node, cmds, failedCmds)
}(node, cmds)
}
@@ -1336,44 +1450,69 @@ func (c *ClusterClient) _processTxPipeline(ctx context.Context, cmds []Cmder) er
return cmdsFirstErr(cmds)
}
-func (c *ClusterClient) mapCmdsBySlot(cmds []Cmder) map[int][]Cmder {
+func (c *ClusterClient) mapCmdsBySlot(ctx context.Context, cmds []Cmder) map[int][]Cmder {
cmdsMap := make(map[int][]Cmder)
for _, cmd := range cmds {
- slot := c.cmdSlot(cmd)
+ slot := c.cmdSlot(ctx, cmd)
cmdsMap[slot] = append(cmdsMap[slot], cmd)
}
return cmdsMap
}
-func (c *ClusterClient) _processTxPipelineNode(
+func (c *ClusterClient) processTxPipelineNode(
ctx context.Context, node *clusterNode, cmds []Cmder, failedCmds *cmdsMap,
+) {
+ cmds = wrapMultiExec(ctx, cmds)
+ _ = node.Client.withProcessPipelineHook(ctx, cmds, func(ctx context.Context, cmds []Cmder) error {
+ cn, err := node.Client.getConn(ctx)
+ if err != nil {
+ _ = c.mapCmdsByNode(ctx, failedCmds, cmds)
+ setCmdsErr(cmds, err)
+ return err
+ }
+
+ var processErr error
+ defer func() {
+ node.Client.releaseConn(ctx, cn, processErr)
+ }()
+ processErr = c.processTxPipelineNodeConn(ctx, node, cn, cmds, failedCmds)
+
+ return processErr
+ })
+}
+
+func (c *ClusterClient) processTxPipelineNodeConn(
+ ctx context.Context, node *clusterNode, cn *pool.Conn, cmds []Cmder, failedCmds *cmdsMap,
) error {
- return node.Client.hooks.processTxPipeline(ctx, cmds, func(ctx context.Context, cmds []Cmder) error {
- return node.Client.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
- err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
- return writeCmds(wr, cmds)
- })
- if err != nil {
- return err
+ if err := cn.WithWriter(c.context(ctx), c.opt.WriteTimeout, func(wr *proto.Writer) error {
+ return writeCmds(wr, cmds)
+ }); err != nil {
+ if shouldRetry(err, true) {
+ _ = c.mapCmdsByNode(ctx, failedCmds, cmds)
+ }
+ setCmdsErr(cmds, err)
+ return err
+ }
+
+ return cn.WithReader(c.context(ctx), c.opt.ReadTimeout, func(rd *proto.Reader) error {
+ statusCmd := cmds[0].(*StatusCmd)
+ // Trim multi and exec.
+ trimmedCmds := cmds[1 : len(cmds)-1]
+
+ if err := c.txPipelineReadQueued(
+ ctx, rd, statusCmd, trimmedCmds, failedCmds,
+ ); err != nil {
+ setCmdsErr(cmds, err)
+
+ moved, ask, addr := isMovedError(err)
+ if moved || ask {
+ return c.cmdsMoved(ctx, trimmedCmds, moved, ask, addr, failedCmds)
}
- return cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error {
- statusCmd := cmds[0].(*StatusCmd)
- // Trim multi and exec.
- cmds = cmds[1 : len(cmds)-1]
+ return err
+ }
- err := c.txPipelineReadQueued(ctx, rd, statusCmd, cmds, failedCmds)
- if err != nil {
- moved, ask, addr := isMovedError(err)
- if moved || ask {
- return c.cmdsMoved(ctx, cmds, moved, ask, addr, failedCmds)
- }
- return err
- }
-
- return pipelineReadCmds(rd, cmds)
- })
- })
+ return pipelineReadCmds(rd, trimmedCmds)
})
}
@@ -1406,12 +1545,7 @@ func (c *ClusterClient) txPipelineReadQueued(
return err
}
- switch line[0] {
- case proto.ErrorReply:
- return proto.ParseErrorReply(line)
- case proto.ArrayReply:
- // ok
- default:
+ if line[0] != proto.RespArray {
return fmt.Errorf("redis: expected '*', but got line %q", line)
}
@@ -1568,6 +1702,15 @@ func (c *ClusterClient) PSubscribe(ctx context.Context, channels ...string) *Pub
return pubsub
}
+// SSubscribe Subscribes the client to the specified shard channels.
+func (c *ClusterClient) SSubscribe(ctx context.Context, channels ...string) *PubSub {
+ pubsub := c.pubSub()
+ if len(channels) > 0 {
+ _ = pubsub.SSubscribe(ctx, channels...)
+ }
+ return pubsub
+}
+
func (c *ClusterClient) retryBackoff(attempt int) time.Duration {
return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff)
}
@@ -1614,26 +1757,27 @@ func (c *ClusterClient) cmdsInfo(ctx context.Context) (map[string]*CommandInfo,
return nil, firstErr
}
-func (c *ClusterClient) cmdInfo(name string) *CommandInfo {
- cmdsInfo, err := c.cmdsInfoCache.Get(c.ctx)
+func (c *ClusterClient) cmdInfo(ctx context.Context, name string) *CommandInfo {
+ cmdsInfo, err := c.cmdsInfoCache.Get(ctx)
if err != nil {
+ internal.Logger.Printf(context.TODO(), "getting command info: %s", err)
return nil
}
info := cmdsInfo[name]
if info == nil {
- internal.Logger.Printf(c.Context(), "info for cmd=%s not found", name)
+ internal.Logger.Printf(context.TODO(), "info for cmd=%s not found", name)
}
return info
}
-func (c *ClusterClient) cmdSlot(cmd Cmder) int {
+func (c *ClusterClient) cmdSlot(ctx context.Context, cmd Cmder) int {
args := cmd.Args()
if args[0] == "cluster" && args[1] == "getkeysinslot" {
return args[2].(int)
}
- cmdInfo := c.cmdInfo(cmd.Name())
+ cmdInfo := c.cmdInfo(ctx, cmd.Name())
return cmdSlot(cmd, cmdFirstKeyPos(cmd, cmdInfo))
}
@@ -1661,7 +1805,7 @@ func (c *ClusterClient) cmdNode(
return state.slotMasterNode(slot)
}
-func (c *clusterClient) slotReadOnlyNode(state *clusterState, slot int) (*clusterNode, error) {
+func (c *ClusterClient) slotReadOnlyNode(state *clusterState, slot int) (*clusterNode, error) {
if c.opt.RouteByLatency {
return state.slotClosestNode(slot)
}
@@ -1708,6 +1852,13 @@ func (c *ClusterClient) MasterForKey(ctx context.Context, key string) (*Client,
return node.Client, err
}
+func (c *ClusterClient) context(ctx context.Context) context.Context {
+ if c.opt.ContextTimeoutEnabled {
+ return ctx
+ }
+ return context.Background()
+}
+
func appendUniqueNode(nodes []*clusterNode, node *clusterNode) []*clusterNode {
for _, n := range nodes {
if n == node {
diff --git a/vendor/github.com/go-redis/redis/v8/cluster_commands.go b/vendor/github.com/redis/go-redis/v9/cluster_commands.go
similarity index 85%
rename from vendor/github.com/go-redis/redis/v8/cluster_commands.go
rename to vendor/github.com/redis/go-redis/v9/cluster_commands.go
index 085bce83d..b13f8e7e9 100644
--- a/vendor/github.com/go-redis/redis/v8/cluster_commands.go
+++ b/vendor/github.com/redis/go-redis/v9/cluster_commands.go
@@ -8,7 +8,7 @@ import (
func (c *ClusterClient) DBSize(ctx context.Context) *IntCmd {
cmd := NewIntCmd(ctx, "dbsize")
- _ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error {
+ _ = c.withProcessHook(ctx, cmd, func(ctx context.Context, _ Cmder) error {
var size int64
err := c.ForEachMaster(ctx, func(ctx context.Context, master *Client) error {
n, err := master.DBSize(ctx).Result()
@@ -30,8 +30,8 @@ func (c *ClusterClient) DBSize(ctx context.Context) *IntCmd {
func (c *ClusterClient) ScriptLoad(ctx context.Context, script string) *StringCmd {
cmd := NewStringCmd(ctx, "script", "load", script)
- _ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error {
- mu := &sync.Mutex{}
+ _ = c.withProcessHook(ctx, cmd, func(ctx context.Context, _ Cmder) error {
+ var mu sync.Mutex
err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error {
val, err := shard.ScriptLoad(ctx, script).Result()
if err != nil {
@@ -56,7 +56,7 @@ func (c *ClusterClient) ScriptLoad(ctx context.Context, script string) *StringCm
func (c *ClusterClient) ScriptFlush(ctx context.Context) *StatusCmd {
cmd := NewStatusCmd(ctx, "script", "flush")
- _ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error {
+ _ = c.withProcessHook(ctx, cmd, func(ctx context.Context, _ Cmder) error {
err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error {
return shard.ScriptFlush(ctx).Err()
})
@@ -82,8 +82,8 @@ func (c *ClusterClient) ScriptExists(ctx context.Context, hashes ...string) *Boo
result[i] = true
}
- _ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error {
- mu := &sync.Mutex{}
+ _ = c.withProcessHook(ctx, cmd, func(ctx context.Context, _ Cmder) error {
+ var mu sync.Mutex
err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error {
val, err := shard.ScriptExists(ctx, hashes...).Result()
if err != nil {
diff --git a/vendor/github.com/redis/go-redis/v9/command.go b/vendor/github.com/redis/go-redis/v9/command.go
new file mode 100644
index 000000000..1bd4d5db1
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/command.go
@@ -0,0 +1,5235 @@
+package redis
+
+import (
+ "context"
+ "fmt"
+ "net"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/redis/go-redis/v9/internal"
+ "github.com/redis/go-redis/v9/internal/hscan"
+ "github.com/redis/go-redis/v9/internal/proto"
+ "github.com/redis/go-redis/v9/internal/util"
+)
+
+type Cmder interface {
+ Name() string
+ FullName() string
+ Args() []interface{}
+ String() string
+ stringArg(int) string
+ firstKeyPos() int8
+ SetFirstKeyPos(int8)
+
+ readTimeout() *time.Duration
+ readReply(rd *proto.Reader) error
+
+ SetErr(error)
+ Err() error
+}
+
+func setCmdsErr(cmds []Cmder, e error) {
+ for _, cmd := range cmds {
+ if cmd.Err() == nil {
+ cmd.SetErr(e)
+ }
+ }
+}
+
+func cmdsFirstErr(cmds []Cmder) error {
+ for _, cmd := range cmds {
+ if err := cmd.Err(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func writeCmds(wr *proto.Writer, cmds []Cmder) error {
+ for _, cmd := range cmds {
+ if err := writeCmd(wr, cmd); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func writeCmd(wr *proto.Writer, cmd Cmder) error {
+ return wr.WriteArgs(cmd.Args())
+}
+
+func cmdFirstKeyPos(cmd Cmder, info *CommandInfo) int {
+ if pos := cmd.firstKeyPos(); pos != 0 {
+ return int(pos)
+ }
+
+ switch cmd.Name() {
+ case "eval", "evalsha", "eval_ro", "evalsha_ro":
+ if cmd.stringArg(2) != "0" {
+ return 3
+ }
+
+ return 0
+ case "publish":
+ return 1
+ case "memory":
+ // https://github.com/redis/redis/issues/7493
+ if cmd.stringArg(1) == "usage" {
+ return 2
+ }
+ }
+
+ if info != nil {
+ return int(info.FirstKeyPos)
+ }
+ return 1
+}
+
+func cmdString(cmd Cmder, val interface{}) string {
+ b := make([]byte, 0, 64)
+
+ for i, arg := range cmd.Args() {
+ if i > 0 {
+ b = append(b, ' ')
+ }
+ b = internal.AppendArg(b, arg)
+ }
+
+ if err := cmd.Err(); err != nil {
+ b = append(b, ": "...)
+ b = append(b, err.Error()...)
+ } else if val != nil {
+ b = append(b, ": "...)
+ b = internal.AppendArg(b, val)
+ }
+
+ return util.BytesToString(b)
+}
+
+//------------------------------------------------------------------------------
+
+type baseCmd struct {
+ ctx context.Context
+ args []interface{}
+ err error
+ keyPos int8
+
+ _readTimeout *time.Duration
+}
+
+var _ Cmder = (*Cmd)(nil)
+
+func (cmd *baseCmd) Name() string {
+ if len(cmd.args) == 0 {
+ return ""
+ }
+ // Cmd name must be lower cased.
+ return internal.ToLower(cmd.stringArg(0))
+}
+
+func (cmd *baseCmd) FullName() string {
+ switch name := cmd.Name(); name {
+ case "cluster", "command":
+ if len(cmd.args) == 1 {
+ return name
+ }
+ if s2, ok := cmd.args[1].(string); ok {
+ return name + " " + s2
+ }
+ return name
+ default:
+ return name
+ }
+}
+
+func (cmd *baseCmd) Args() []interface{} {
+ return cmd.args
+}
+
+func (cmd *baseCmd) stringArg(pos int) string {
+ if pos < 0 || pos >= len(cmd.args) {
+ return ""
+ }
+ arg := cmd.args[pos]
+ switch v := arg.(type) {
+ case string:
+ return v
+ default:
+ // TODO: consider using appendArg
+ return fmt.Sprint(v)
+ }
+}
+
+func (cmd *baseCmd) firstKeyPos() int8 {
+ return cmd.keyPos
+}
+
+func (cmd *baseCmd) SetFirstKeyPos(keyPos int8) {
+ cmd.keyPos = keyPos
+}
+
+func (cmd *baseCmd) SetErr(e error) {
+ cmd.err = e
+}
+
+func (cmd *baseCmd) Err() error {
+ return cmd.err
+}
+
+func (cmd *baseCmd) readTimeout() *time.Duration {
+ return cmd._readTimeout
+}
+
+func (cmd *baseCmd) setReadTimeout(d time.Duration) {
+ cmd._readTimeout = &d
+}
+
+//------------------------------------------------------------------------------
+
+type Cmd struct {
+ baseCmd
+
+ val interface{}
+}
+
+func NewCmd(ctx context.Context, args ...interface{}) *Cmd {
+ return &Cmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *Cmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *Cmd) SetVal(val interface{}) {
+ cmd.val = val
+}
+
+func (cmd *Cmd) Val() interface{} {
+ return cmd.val
+}
+
+func (cmd *Cmd) Result() (interface{}, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *Cmd) Text() (string, error) {
+ if cmd.err != nil {
+ return "", cmd.err
+ }
+ return toString(cmd.val)
+}
+
+func toString(val interface{}) (string, error) {
+ switch val := val.(type) {
+ case string:
+ return val, nil
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for String", val)
+ return "", err
+ }
+}
+
+func (cmd *Cmd) Int() (int, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ switch val := cmd.val.(type) {
+ case int64:
+ return int(val), nil
+ case string:
+ return strconv.Atoi(val)
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for Int", val)
+ return 0, err
+ }
+}
+
+func (cmd *Cmd) Int64() (int64, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ return toInt64(cmd.val)
+}
+
+func toInt64(val interface{}) (int64, error) {
+ switch val := val.(type) {
+ case int64:
+ return val, nil
+ case string:
+ return strconv.ParseInt(val, 10, 64)
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for Int64", val)
+ return 0, err
+ }
+}
+
+func (cmd *Cmd) Uint64() (uint64, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ return toUint64(cmd.val)
+}
+
+func toUint64(val interface{}) (uint64, error) {
+ switch val := val.(type) {
+ case int64:
+ return uint64(val), nil
+ case string:
+ return strconv.ParseUint(val, 10, 64)
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for Uint64", val)
+ return 0, err
+ }
+}
+
+func (cmd *Cmd) Float32() (float32, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ return toFloat32(cmd.val)
+}
+
+func toFloat32(val interface{}) (float32, error) {
+ switch val := val.(type) {
+ case int64:
+ return float32(val), nil
+ case string:
+ f, err := strconv.ParseFloat(val, 32)
+ if err != nil {
+ return 0, err
+ }
+ return float32(f), nil
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for Float32", val)
+ return 0, err
+ }
+}
+
+func (cmd *Cmd) Float64() (float64, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ return toFloat64(cmd.val)
+}
+
+func toFloat64(val interface{}) (float64, error) {
+ switch val := val.(type) {
+ case int64:
+ return float64(val), nil
+ case string:
+ return strconv.ParseFloat(val, 64)
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for Float64", val)
+ return 0, err
+ }
+}
+
+func (cmd *Cmd) Bool() (bool, error) {
+ if cmd.err != nil {
+ return false, cmd.err
+ }
+ return toBool(cmd.val)
+}
+
+func toBool(val interface{}) (bool, error) {
+ switch val := val.(type) {
+ case bool:
+ return val, nil
+ case int64:
+ return val != 0, nil
+ case string:
+ return strconv.ParseBool(val)
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for Bool", val)
+ return false, err
+ }
+}
+
+func (cmd *Cmd) Slice() ([]interface{}, error) {
+ if cmd.err != nil {
+ return nil, cmd.err
+ }
+ switch val := cmd.val.(type) {
+ case []interface{}:
+ return val, nil
+ default:
+ return nil, fmt.Errorf("redis: unexpected type=%T for Slice", val)
+ }
+}
+
+func (cmd *Cmd) StringSlice() ([]string, error) {
+ slice, err := cmd.Slice()
+ if err != nil {
+ return nil, err
+ }
+
+ ss := make([]string, len(slice))
+ for i, iface := range slice {
+ val, err := toString(iface)
+ if err != nil {
+ return nil, err
+ }
+ ss[i] = val
+ }
+ return ss, nil
+}
+
+func (cmd *Cmd) Int64Slice() ([]int64, error) {
+ slice, err := cmd.Slice()
+ if err != nil {
+ return nil, err
+ }
+
+ nums := make([]int64, len(slice))
+ for i, iface := range slice {
+ val, err := toInt64(iface)
+ if err != nil {
+ return nil, err
+ }
+ nums[i] = val
+ }
+ return nums, nil
+}
+
+func (cmd *Cmd) Uint64Slice() ([]uint64, error) {
+ slice, err := cmd.Slice()
+ if err != nil {
+ return nil, err
+ }
+
+ nums := make([]uint64, len(slice))
+ for i, iface := range slice {
+ val, err := toUint64(iface)
+ if err != nil {
+ return nil, err
+ }
+ nums[i] = val
+ }
+ return nums, nil
+}
+
+func (cmd *Cmd) Float32Slice() ([]float32, error) {
+ slice, err := cmd.Slice()
+ if err != nil {
+ return nil, err
+ }
+
+ floats := make([]float32, len(slice))
+ for i, iface := range slice {
+ val, err := toFloat32(iface)
+ if err != nil {
+ return nil, err
+ }
+ floats[i] = val
+ }
+ return floats, nil
+}
+
+func (cmd *Cmd) Float64Slice() ([]float64, error) {
+ slice, err := cmd.Slice()
+ if err != nil {
+ return nil, err
+ }
+
+ floats := make([]float64, len(slice))
+ for i, iface := range slice {
+ val, err := toFloat64(iface)
+ if err != nil {
+ return nil, err
+ }
+ floats[i] = val
+ }
+ return floats, nil
+}
+
+func (cmd *Cmd) BoolSlice() ([]bool, error) {
+ slice, err := cmd.Slice()
+ if err != nil {
+ return nil, err
+ }
+
+ bools := make([]bool, len(slice))
+ for i, iface := range slice {
+ val, err := toBool(iface)
+ if err != nil {
+ return nil, err
+ }
+ bools[i] = val
+ }
+ return bools, nil
+}
+
+func (cmd *Cmd) readReply(rd *proto.Reader) (err error) {
+ cmd.val, err = rd.ReadReply()
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type SliceCmd struct {
+ baseCmd
+
+ val []interface{}
+}
+
+var _ Cmder = (*SliceCmd)(nil)
+
+func NewSliceCmd(ctx context.Context, args ...interface{}) *SliceCmd {
+ return &SliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *SliceCmd) SetVal(val []interface{}) {
+ cmd.val = val
+}
+
+func (cmd *SliceCmd) Val() []interface{} {
+ return cmd.val
+}
+
+func (cmd *SliceCmd) Result() ([]interface{}, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *SliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+// Scan scans the results from the map into a destination struct. The map keys
+// are matched in the Redis struct fields by the `redis:"field"` tag.
+func (cmd *SliceCmd) Scan(dst interface{}) error {
+ if cmd.err != nil {
+ return cmd.err
+ }
+
+ // Pass the list of keys and values.
+ // Skip the first two args for: HMGET key
+ var args []interface{}
+ if cmd.args[0] == "hmget" {
+ args = cmd.args[2:]
+ } else {
+ // Otherwise, it's: MGET field field ...
+ args = cmd.args[1:]
+ }
+
+ return hscan.Scan(dst, args, cmd.val)
+}
+
+func (cmd *SliceCmd) readReply(rd *proto.Reader) (err error) {
+ cmd.val, err = rd.ReadSlice()
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type StatusCmd struct {
+ baseCmd
+
+ val string
+}
+
+var _ Cmder = (*StatusCmd)(nil)
+
+func NewStatusCmd(ctx context.Context, args ...interface{}) *StatusCmd {
+ return &StatusCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *StatusCmd) SetVal(val string) {
+ cmd.val = val
+}
+
+func (cmd *StatusCmd) Val() string {
+ return cmd.val
+}
+
+func (cmd *StatusCmd) Result() (string, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *StatusCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StatusCmd) readReply(rd *proto.Reader) (err error) {
+ cmd.val, err = rd.ReadString()
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type IntCmd struct {
+ baseCmd
+
+ val int64
+}
+
+var _ Cmder = (*IntCmd)(nil)
+
+func NewIntCmd(ctx context.Context, args ...interface{}) *IntCmd {
+ return &IntCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *IntCmd) SetVal(val int64) {
+ cmd.val = val
+}
+
+func (cmd *IntCmd) Val() int64 {
+ return cmd.val
+}
+
+func (cmd *IntCmd) Result() (int64, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *IntCmd) Uint64() (uint64, error) {
+ return uint64(cmd.val), cmd.err
+}
+
+func (cmd *IntCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *IntCmd) readReply(rd *proto.Reader) (err error) {
+ cmd.val, err = rd.ReadInt()
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type IntSliceCmd struct {
+ baseCmd
+
+ val []int64
+}
+
+var _ Cmder = (*IntSliceCmd)(nil)
+
+func NewIntSliceCmd(ctx context.Context, args ...interface{}) *IntSliceCmd {
+ return &IntSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *IntSliceCmd) SetVal(val []int64) {
+ cmd.val = val
+}
+
+func (cmd *IntSliceCmd) Val() []int64 {
+ return cmd.val
+}
+
+func (cmd *IntSliceCmd) Result() ([]int64, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *IntSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *IntSliceCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]int64, n)
+ for i := 0; i < len(cmd.val); i++ {
+ if cmd.val[i], err = rd.ReadInt(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type DurationCmd struct {
+ baseCmd
+
+ val time.Duration
+ precision time.Duration
+}
+
+var _ Cmder = (*DurationCmd)(nil)
+
+func NewDurationCmd(ctx context.Context, precision time.Duration, args ...interface{}) *DurationCmd {
+ return &DurationCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ precision: precision,
+ }
+}
+
+func (cmd *DurationCmd) SetVal(val time.Duration) {
+ cmd.val = val
+}
+
+func (cmd *DurationCmd) Val() time.Duration {
+ return cmd.val
+}
+
+func (cmd *DurationCmd) Result() (time.Duration, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *DurationCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *DurationCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ switch n {
+ // -2 if the key does not exist
+ // -1 if the key exists but has no associated expire
+ case -2, -1:
+ cmd.val = time.Duration(n)
+ default:
+ cmd.val = time.Duration(n) * cmd.precision
+ }
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type TimeCmd struct {
+ baseCmd
+
+ val time.Time
+}
+
+var _ Cmder = (*TimeCmd)(nil)
+
+func NewTimeCmd(ctx context.Context, args ...interface{}) *TimeCmd {
+ return &TimeCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *TimeCmd) SetVal(val time.Time) {
+ cmd.val = val
+}
+
+func (cmd *TimeCmd) Val() time.Time {
+ return cmd.val
+}
+
+func (cmd *TimeCmd) Result() (time.Time, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *TimeCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *TimeCmd) readReply(rd *proto.Reader) error {
+ if err := rd.ReadFixedArrayLen(2); err != nil {
+ return err
+ }
+ second, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ microsecond, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ cmd.val = time.Unix(second, microsecond*1000)
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type BoolCmd struct {
+ baseCmd
+
+ val bool
+}
+
+var _ Cmder = (*BoolCmd)(nil)
+
+func NewBoolCmd(ctx context.Context, args ...interface{}) *BoolCmd {
+ return &BoolCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *BoolCmd) SetVal(val bool) {
+ cmd.val = val
+}
+
+func (cmd *BoolCmd) Val() bool {
+ return cmd.val
+}
+
+func (cmd *BoolCmd) Result() (bool, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *BoolCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *BoolCmd) readReply(rd *proto.Reader) (err error) {
+ cmd.val, err = rd.ReadBool()
+
+ // `SET key value NX` returns nil when key already exists. But
+ // `SETNX key value` returns bool (0/1). So convert nil to bool.
+ if err == Nil {
+ cmd.val = false
+ err = nil
+ }
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type StringCmd struct {
+ baseCmd
+
+ val string
+}
+
+var _ Cmder = (*StringCmd)(nil)
+
+func NewStringCmd(ctx context.Context, args ...interface{}) *StringCmd {
+ return &StringCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *StringCmd) SetVal(val string) {
+ cmd.val = val
+}
+
+func (cmd *StringCmd) Val() string {
+ return cmd.val
+}
+
+func (cmd *StringCmd) Result() (string, error) {
+ return cmd.Val(), cmd.err
+}
+
+func (cmd *StringCmd) Bytes() ([]byte, error) {
+ return util.StringToBytes(cmd.val), cmd.err
+}
+
+func (cmd *StringCmd) Bool() (bool, error) {
+ if cmd.err != nil {
+ return false, cmd.err
+ }
+ return strconv.ParseBool(cmd.val)
+}
+
+func (cmd *StringCmd) Int() (int, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ return strconv.Atoi(cmd.Val())
+}
+
+func (cmd *StringCmd) Int64() (int64, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ return strconv.ParseInt(cmd.Val(), 10, 64)
+}
+
+func (cmd *StringCmd) Uint64() (uint64, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ return strconv.ParseUint(cmd.Val(), 10, 64)
+}
+
+func (cmd *StringCmd) Float32() (float32, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ f, err := strconv.ParseFloat(cmd.Val(), 32)
+ if err != nil {
+ return 0, err
+ }
+ return float32(f), nil
+}
+
+func (cmd *StringCmd) Float64() (float64, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ return strconv.ParseFloat(cmd.Val(), 64)
+}
+
+func (cmd *StringCmd) Time() (time.Time, error) {
+ if cmd.err != nil {
+ return time.Time{}, cmd.err
+ }
+ return time.Parse(time.RFC3339Nano, cmd.Val())
+}
+
+func (cmd *StringCmd) Scan(val interface{}) error {
+ if cmd.err != nil {
+ return cmd.err
+ }
+ return proto.Scan([]byte(cmd.val), val)
+}
+
+func (cmd *StringCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StringCmd) readReply(rd *proto.Reader) (err error) {
+ cmd.val, err = rd.ReadString()
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type FloatCmd struct {
+ baseCmd
+
+ val float64
+}
+
+var _ Cmder = (*FloatCmd)(nil)
+
+func NewFloatCmd(ctx context.Context, args ...interface{}) *FloatCmd {
+ return &FloatCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *FloatCmd) SetVal(val float64) {
+ cmd.val = val
+}
+
+func (cmd *FloatCmd) Val() float64 {
+ return cmd.val
+}
+
+func (cmd *FloatCmd) Result() (float64, error) {
+ return cmd.Val(), cmd.Err()
+}
+
+func (cmd *FloatCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *FloatCmd) readReply(rd *proto.Reader) (err error) {
+ cmd.val, err = rd.ReadFloat()
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type FloatSliceCmd struct {
+ baseCmd
+
+ val []float64
+}
+
+var _ Cmder = (*FloatSliceCmd)(nil)
+
+func NewFloatSliceCmd(ctx context.Context, args ...interface{}) *FloatSliceCmd {
+ return &FloatSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *FloatSliceCmd) SetVal(val []float64) {
+ cmd.val = val
+}
+
+func (cmd *FloatSliceCmd) Val() []float64 {
+ return cmd.val
+}
+
+func (cmd *FloatSliceCmd) Result() ([]float64, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *FloatSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *FloatSliceCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = make([]float64, n)
+ for i := 0; i < len(cmd.val); i++ {
+ switch num, err := rd.ReadFloat(); {
+ case err == Nil:
+ cmd.val[i] = 0
+ case err != nil:
+ return err
+ default:
+ cmd.val[i] = num
+ }
+ }
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type StringSliceCmd struct {
+ baseCmd
+
+ val []string
+}
+
+var _ Cmder = (*StringSliceCmd)(nil)
+
+func NewStringSliceCmd(ctx context.Context, args ...interface{}) *StringSliceCmd {
+ return &StringSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *StringSliceCmd) SetVal(val []string) {
+ cmd.val = val
+}
+
+func (cmd *StringSliceCmd) Val() []string {
+ return cmd.val
+}
+
+func (cmd *StringSliceCmd) Result() ([]string, error) {
+ return cmd.Val(), cmd.Err()
+}
+
+func (cmd *StringSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StringSliceCmd) ScanSlice(container interface{}) error {
+ return proto.ScanSlice(cmd.Val(), container)
+}
+
+func (cmd *StringSliceCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]string, n)
+ for i := 0; i < len(cmd.val); i++ {
+ switch s, err := rd.ReadString(); {
+ case err == Nil:
+ cmd.val[i] = ""
+ case err != nil:
+ return err
+ default:
+ cmd.val[i] = s
+ }
+ }
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type KeyValue struct {
+ Key string
+ Value string
+}
+
+type KeyValueSliceCmd struct {
+ baseCmd
+
+ val []KeyValue
+}
+
+var _ Cmder = (*KeyValueSliceCmd)(nil)
+
+func NewKeyValueSliceCmd(ctx context.Context, args ...interface{}) *KeyValueSliceCmd {
+ return &KeyValueSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *KeyValueSliceCmd) SetVal(val []KeyValue) {
+ cmd.val = val
+}
+
+func (cmd *KeyValueSliceCmd) Val() []KeyValue {
+ return cmd.val
+}
+
+func (cmd *KeyValueSliceCmd) Result() ([]KeyValue, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *KeyValueSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+// Many commands will respond to two formats:
+// 1. 1) "one"
+// 2. (double) 1
+// 2. 1) "two"
+// 2. (double) 2
+//
+// OR:
+// 1. "two"
+// 2. (double) 2
+// 3. "one"
+// 4. (double) 1
+func (cmd *KeyValueSliceCmd) readReply(rd *proto.Reader) error { // nolint:dupl
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ // If the n is 0, can't continue reading.
+ if n == 0 {
+ cmd.val = make([]KeyValue, 0)
+ return nil
+ }
+
+ typ, err := rd.PeekReplyType()
+ if err != nil {
+ return err
+ }
+ array := typ == proto.RespArray
+
+ if array {
+ cmd.val = make([]KeyValue, n)
+ } else {
+ cmd.val = make([]KeyValue, n/2)
+ }
+
+ for i := 0; i < len(cmd.val); i++ {
+ if array {
+ if err = rd.ReadFixedArrayLen(2); err != nil {
+ return err
+ }
+ }
+
+ if cmd.val[i].Key, err = rd.ReadString(); err != nil {
+ return err
+ }
+
+ if cmd.val[i].Value, err = rd.ReadString(); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type BoolSliceCmd struct {
+ baseCmd
+
+ val []bool
+}
+
+var _ Cmder = (*BoolSliceCmd)(nil)
+
+func NewBoolSliceCmd(ctx context.Context, args ...interface{}) *BoolSliceCmd {
+ return &BoolSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *BoolSliceCmd) SetVal(val []bool) {
+ cmd.val = val
+}
+
+func (cmd *BoolSliceCmd) Val() []bool {
+ return cmd.val
+}
+
+func (cmd *BoolSliceCmd) Result() ([]bool, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *BoolSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *BoolSliceCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]bool, n)
+ for i := 0; i < len(cmd.val); i++ {
+ if cmd.val[i], err = rd.ReadBool(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type MapStringStringCmd struct {
+ baseCmd
+
+ val map[string]string
+}
+
+var _ Cmder = (*MapStringStringCmd)(nil)
+
+func NewMapStringStringCmd(ctx context.Context, args ...interface{}) *MapStringStringCmd {
+ return &MapStringStringCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *MapStringStringCmd) Val() map[string]string {
+ return cmd.val
+}
+
+func (cmd *MapStringStringCmd) SetVal(val map[string]string) {
+ cmd.val = val
+}
+
+func (cmd *MapStringStringCmd) Result() (map[string]string, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *MapStringStringCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+// Scan scans the results from the map into a destination struct. The map keys
+// are matched in the Redis struct fields by the `redis:"field"` tag.
+func (cmd *MapStringStringCmd) Scan(dest interface{}) error {
+ if cmd.err != nil {
+ return cmd.err
+ }
+
+ strct, err := hscan.Struct(dest)
+ if err != nil {
+ return err
+ }
+
+ for k, v := range cmd.val {
+ if err := strct.Scan(k, v); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (cmd *MapStringStringCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = make(map[string]string, n)
+ for i := 0; i < n; i++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ value, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ cmd.val[key] = value
+ }
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type MapStringIntCmd struct {
+ baseCmd
+
+ val map[string]int64
+}
+
+var _ Cmder = (*MapStringIntCmd)(nil)
+
+func NewMapStringIntCmd(ctx context.Context, args ...interface{}) *MapStringIntCmd {
+ return &MapStringIntCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *MapStringIntCmd) SetVal(val map[string]int64) {
+ cmd.val = val
+}
+
+func (cmd *MapStringIntCmd) Val() map[string]int64 {
+ return cmd.val
+}
+
+func (cmd *MapStringIntCmd) Result() (map[string]int64, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *MapStringIntCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *MapStringIntCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = make(map[string]int64, n)
+ for i := 0; i < n; i++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ nn, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ cmd.val[key] = nn
+ }
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type StringStructMapCmd struct {
+ baseCmd
+
+ val map[string]struct{}
+}
+
+var _ Cmder = (*StringStructMapCmd)(nil)
+
+func NewStringStructMapCmd(ctx context.Context, args ...interface{}) *StringStructMapCmd {
+ return &StringStructMapCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *StringStructMapCmd) SetVal(val map[string]struct{}) {
+ cmd.val = val
+}
+
+func (cmd *StringStructMapCmd) Val() map[string]struct{} {
+ return cmd.val
+}
+
+func (cmd *StringStructMapCmd) Result() (map[string]struct{}, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *StringStructMapCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StringStructMapCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = make(map[string]struct{}, n)
+ for i := 0; i < n; i++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ cmd.val[key] = struct{}{}
+ }
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type XMessage struct {
+ ID string
+ Values map[string]interface{}
+}
+
+type XMessageSliceCmd struct {
+ baseCmd
+
+ val []XMessage
+}
+
+var _ Cmder = (*XMessageSliceCmd)(nil)
+
+func NewXMessageSliceCmd(ctx context.Context, args ...interface{}) *XMessageSliceCmd {
+ return &XMessageSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *XMessageSliceCmd) SetVal(val []XMessage) {
+ cmd.val = val
+}
+
+func (cmd *XMessageSliceCmd) Val() []XMessage {
+ return cmd.val
+}
+
+func (cmd *XMessageSliceCmd) Result() ([]XMessage, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XMessageSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XMessageSliceCmd) readReply(rd *proto.Reader) (err error) {
+ cmd.val, err = readXMessageSlice(rd)
+ return err
+}
+
+func readXMessageSlice(rd *proto.Reader) ([]XMessage, error) {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+
+ msgs := make([]XMessage, n)
+ for i := 0; i < len(msgs); i++ {
+ if msgs[i], err = readXMessage(rd); err != nil {
+ return nil, err
+ }
+ }
+ return msgs, nil
+}
+
+func readXMessage(rd *proto.Reader) (XMessage, error) {
+ if err := rd.ReadFixedArrayLen(2); err != nil {
+ return XMessage{}, err
+ }
+
+ id, err := rd.ReadString()
+ if err != nil {
+ return XMessage{}, err
+ }
+
+ v, err := stringInterfaceMapParser(rd)
+ if err != nil {
+ if err != proto.Nil {
+ return XMessage{}, err
+ }
+ }
+
+ return XMessage{
+ ID: id,
+ Values: v,
+ }, nil
+}
+
+func stringInterfaceMapParser(rd *proto.Reader) (map[string]interface{}, error) {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return nil, err
+ }
+
+ m := make(map[string]interface{}, n)
+ for i := 0; i < n; i++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ value, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ m[key] = value
+ }
+ return m, nil
+}
+
+//------------------------------------------------------------------------------
+
+type XStream struct {
+ Stream string
+ Messages []XMessage
+}
+
+type XStreamSliceCmd struct {
+ baseCmd
+
+ val []XStream
+}
+
+var _ Cmder = (*XStreamSliceCmd)(nil)
+
+func NewXStreamSliceCmd(ctx context.Context, args ...interface{}) *XStreamSliceCmd {
+ return &XStreamSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *XStreamSliceCmd) SetVal(val []XStream) {
+ cmd.val = val
+}
+
+func (cmd *XStreamSliceCmd) Val() []XStream {
+ return cmd.val
+}
+
+func (cmd *XStreamSliceCmd) Result() ([]XStream, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XStreamSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XStreamSliceCmd) readReply(rd *proto.Reader) error {
+ typ, err := rd.PeekReplyType()
+ if err != nil {
+ return err
+ }
+
+ var n int
+ if typ == proto.RespMap {
+ n, err = rd.ReadMapLen()
+ } else {
+ n, err = rd.ReadArrayLen()
+ }
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]XStream, n)
+ for i := 0; i < len(cmd.val); i++ {
+ if typ != proto.RespMap {
+ if err = rd.ReadFixedArrayLen(2); err != nil {
+ return err
+ }
+ }
+ if cmd.val[i].Stream, err = rd.ReadString(); err != nil {
+ return err
+ }
+ if cmd.val[i].Messages, err = readXMessageSlice(rd); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type XPending struct {
+ Count int64
+ Lower string
+ Higher string
+ Consumers map[string]int64
+}
+
+type XPendingCmd struct {
+ baseCmd
+ val *XPending
+}
+
+var _ Cmder = (*XPendingCmd)(nil)
+
+func NewXPendingCmd(ctx context.Context, args ...interface{}) *XPendingCmd {
+ return &XPendingCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *XPendingCmd) SetVal(val *XPending) {
+ cmd.val = val
+}
+
+func (cmd *XPendingCmd) Val() *XPending {
+ return cmd.val
+}
+
+func (cmd *XPendingCmd) Result() (*XPending, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XPendingCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XPendingCmd) readReply(rd *proto.Reader) error {
+ var err error
+ if err = rd.ReadFixedArrayLen(4); err != nil {
+ return err
+ }
+ cmd.val = &XPending{}
+
+ if cmd.val.Count, err = rd.ReadInt(); err != nil {
+ return err
+ }
+
+ if cmd.val.Lower, err = rd.ReadString(); err != nil && err != Nil {
+ return err
+ }
+
+ if cmd.val.Higher, err = rd.ReadString(); err != nil && err != Nil {
+ return err
+ }
+
+ n, err := rd.ReadArrayLen()
+ if err != nil && err != Nil {
+ return err
+ }
+ cmd.val.Consumers = make(map[string]int64, n)
+ for i := 0; i < n; i++ {
+ if err = rd.ReadFixedArrayLen(2); err != nil {
+ return err
+ }
+
+ consumerName, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ consumerPending, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ cmd.val.Consumers[consumerName] = consumerPending
+ }
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type XPendingExt struct {
+ ID string
+ Consumer string
+ Idle time.Duration
+ RetryCount int64
+}
+
+type XPendingExtCmd struct {
+ baseCmd
+ val []XPendingExt
+}
+
+var _ Cmder = (*XPendingExtCmd)(nil)
+
+func NewXPendingExtCmd(ctx context.Context, args ...interface{}) *XPendingExtCmd {
+ return &XPendingExtCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *XPendingExtCmd) SetVal(val []XPendingExt) {
+ cmd.val = val
+}
+
+func (cmd *XPendingExtCmd) Val() []XPendingExt {
+ return cmd.val
+}
+
+func (cmd *XPendingExtCmd) Result() ([]XPendingExt, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XPendingExtCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XPendingExtCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]XPendingExt, n)
+
+ for i := 0; i < len(cmd.val); i++ {
+ if err = rd.ReadFixedArrayLen(4); err != nil {
+ return err
+ }
+
+ if cmd.val[i].ID, err = rd.ReadString(); err != nil {
+ return err
+ }
+
+ if cmd.val[i].Consumer, err = rd.ReadString(); err != nil && err != Nil {
+ return err
+ }
+
+ idle, err := rd.ReadInt()
+ if err != nil && err != Nil {
+ return err
+ }
+ cmd.val[i].Idle = time.Duration(idle) * time.Millisecond
+
+ if cmd.val[i].RetryCount, err = rd.ReadInt(); err != nil && err != Nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type XAutoClaimCmd struct {
+ baseCmd
+
+ start string
+ val []XMessage
+}
+
+var _ Cmder = (*XAutoClaimCmd)(nil)
+
+func NewXAutoClaimCmd(ctx context.Context, args ...interface{}) *XAutoClaimCmd {
+ return &XAutoClaimCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *XAutoClaimCmd) SetVal(val []XMessage, start string) {
+ cmd.val = val
+ cmd.start = start
+}
+
+func (cmd *XAutoClaimCmd) Val() (messages []XMessage, start string) {
+ return cmd.val, cmd.start
+}
+
+func (cmd *XAutoClaimCmd) Result() (messages []XMessage, start string, err error) {
+ return cmd.val, cmd.start, cmd.err
+}
+
+func (cmd *XAutoClaimCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XAutoClaimCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ switch n {
+ case 2, // Redis 6
+ 3: // Redis 7:
+ // ok
+ default:
+ return fmt.Errorf("redis: got %d elements in XAutoClaim reply, wanted 2/3", n)
+ }
+
+ cmd.start, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ cmd.val, err = readXMessageSlice(rd)
+ if err != nil {
+ return err
+ }
+
+ if n >= 3 {
+ if err := rd.DiscardNext(); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type XAutoClaimJustIDCmd struct {
+ baseCmd
+
+ start string
+ val []string
+}
+
+var _ Cmder = (*XAutoClaimJustIDCmd)(nil)
+
+func NewXAutoClaimJustIDCmd(ctx context.Context, args ...interface{}) *XAutoClaimJustIDCmd {
+ return &XAutoClaimJustIDCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *XAutoClaimJustIDCmd) SetVal(val []string, start string) {
+ cmd.val = val
+ cmd.start = start
+}
+
+func (cmd *XAutoClaimJustIDCmd) Val() (ids []string, start string) {
+ return cmd.val, cmd.start
+}
+
+func (cmd *XAutoClaimJustIDCmd) Result() (ids []string, start string, err error) {
+ return cmd.val, cmd.start, cmd.err
+}
+
+func (cmd *XAutoClaimJustIDCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XAutoClaimJustIDCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ switch n {
+ case 2, // Redis 6
+ 3: // Redis 7:
+ // ok
+ default:
+ return fmt.Errorf("redis: got %d elements in XAutoClaimJustID reply, wanted 2/3", n)
+ }
+
+ cmd.start, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ nn, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = make([]string, nn)
+ for i := 0; i < nn; i++ {
+ cmd.val[i], err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+ }
+
+ if n >= 3 {
+ if err := rd.DiscardNext(); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type XInfoConsumersCmd struct {
+ baseCmd
+ val []XInfoConsumer
+}
+
+type XInfoConsumer struct {
+ Name string
+ Pending int64
+ Idle time.Duration
+ Inactive time.Duration
+}
+
+var _ Cmder = (*XInfoConsumersCmd)(nil)
+
+func NewXInfoConsumersCmd(ctx context.Context, stream string, group string) *XInfoConsumersCmd {
+ return &XInfoConsumersCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: []interface{}{"xinfo", "consumers", stream, group},
+ },
+ }
+}
+
+func (cmd *XInfoConsumersCmd) SetVal(val []XInfoConsumer) {
+ cmd.val = val
+}
+
+func (cmd *XInfoConsumersCmd) Val() []XInfoConsumer {
+ return cmd.val
+}
+
+func (cmd *XInfoConsumersCmd) Result() ([]XInfoConsumer, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XInfoConsumersCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XInfoConsumersCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]XInfoConsumer, n)
+
+ for i := 0; i < len(cmd.val); i++ {
+ nn, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ var key string
+ for f := 0; f < nn; f++ {
+ key, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "name":
+ cmd.val[i].Name, err = rd.ReadString()
+ case "pending":
+ cmd.val[i].Pending, err = rd.ReadInt()
+ case "idle":
+ var idle int64
+ idle, err = rd.ReadInt()
+ cmd.val[i].Idle = time.Duration(idle) * time.Millisecond
+ case "inactive":
+ var inactive int64
+ inactive, err = rd.ReadInt()
+ cmd.val[i].Inactive = time.Duration(inactive) * time.Millisecond
+ default:
+ return fmt.Errorf("redis: unexpected content %s in XINFO CONSUMERS reply", key)
+ }
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type XInfoGroupsCmd struct {
+ baseCmd
+ val []XInfoGroup
+}
+
+type XInfoGroup struct {
+ Name string
+ Consumers int64
+ Pending int64
+ LastDeliveredID string
+ EntriesRead int64
+ Lag int64
+}
+
+var _ Cmder = (*XInfoGroupsCmd)(nil)
+
+func NewXInfoGroupsCmd(ctx context.Context, stream string) *XInfoGroupsCmd {
+ return &XInfoGroupsCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: []interface{}{"xinfo", "groups", stream},
+ },
+ }
+}
+
+func (cmd *XInfoGroupsCmd) SetVal(val []XInfoGroup) {
+ cmd.val = val
+}
+
+func (cmd *XInfoGroupsCmd) Val() []XInfoGroup {
+ return cmd.val
+}
+
+func (cmd *XInfoGroupsCmd) Result() ([]XInfoGroup, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XInfoGroupsCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XInfoGroupsCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]XInfoGroup, n)
+
+ for i := 0; i < len(cmd.val); i++ {
+ group := &cmd.val[i]
+
+ nn, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ var key string
+ for j := 0; j < nn; j++ {
+ key, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "name":
+ group.Name, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+ case "consumers":
+ group.Consumers, err = rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ case "pending":
+ group.Pending, err = rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ case "last-delivered-id":
+ group.LastDeliveredID, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+ case "entries-read":
+ group.EntriesRead, err = rd.ReadInt()
+ if err != nil && err != Nil {
+ return err
+ }
+ case "lag":
+ group.Lag, err = rd.ReadInt()
+
+ // lag: the number of entries in the stream that are still waiting to be delivered
+ // to the group's consumers, or a NULL(Nil) when that number can't be determined.
+ if err != nil && err != Nil {
+ return err
+ }
+ default:
+ return fmt.Errorf("redis: unexpected key %q in XINFO GROUPS reply", key)
+ }
+ }
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type XInfoStreamCmd struct {
+ baseCmd
+ val *XInfoStream
+}
+
+type XInfoStream struct {
+ Length int64
+ RadixTreeKeys int64
+ RadixTreeNodes int64
+ Groups int64
+ LastGeneratedID string
+ MaxDeletedEntryID string
+ EntriesAdded int64
+ FirstEntry XMessage
+ LastEntry XMessage
+ RecordedFirstEntryID string
+}
+
+var _ Cmder = (*XInfoStreamCmd)(nil)
+
+func NewXInfoStreamCmd(ctx context.Context, stream string) *XInfoStreamCmd {
+ return &XInfoStreamCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: []interface{}{"xinfo", "stream", stream},
+ },
+ }
+}
+
+func (cmd *XInfoStreamCmd) SetVal(val *XInfoStream) {
+ cmd.val = val
+}
+
+func (cmd *XInfoStreamCmd) Val() *XInfoStream {
+ return cmd.val
+}
+
+func (cmd *XInfoStreamCmd) Result() (*XInfoStream, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XInfoStreamCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XInfoStreamCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = &XInfoStream{}
+
+ for i := 0; i < n; i++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ switch key {
+ case "length":
+ cmd.val.Length, err = rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ case "radix-tree-keys":
+ cmd.val.RadixTreeKeys, err = rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ case "radix-tree-nodes":
+ cmd.val.RadixTreeNodes, err = rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ case "groups":
+ cmd.val.Groups, err = rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ case "last-generated-id":
+ cmd.val.LastGeneratedID, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+ case "max-deleted-entry-id":
+ cmd.val.MaxDeletedEntryID, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+ case "entries-added":
+ cmd.val.EntriesAdded, err = rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ case "first-entry":
+ cmd.val.FirstEntry, err = readXMessage(rd)
+ if err != nil && err != Nil {
+ return err
+ }
+ case "last-entry":
+ cmd.val.LastEntry, err = readXMessage(rd)
+ if err != nil && err != Nil {
+ return err
+ }
+ case "recorded-first-entry-id":
+ cmd.val.RecordedFirstEntryID, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+ default:
+ return fmt.Errorf("redis: unexpected key %q in XINFO STREAM reply", key)
+ }
+ }
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type XInfoStreamFullCmd struct {
+ baseCmd
+ val *XInfoStreamFull
+}
+
+type XInfoStreamFull struct {
+ Length int64
+ RadixTreeKeys int64
+ RadixTreeNodes int64
+ LastGeneratedID string
+ MaxDeletedEntryID string
+ EntriesAdded int64
+ Entries []XMessage
+ Groups []XInfoStreamGroup
+ RecordedFirstEntryID string
+}
+
+type XInfoStreamGroup struct {
+ Name string
+ LastDeliveredID string
+ EntriesRead int64
+ Lag int64
+ PelCount int64
+ Pending []XInfoStreamGroupPending
+ Consumers []XInfoStreamConsumer
+}
+
+type XInfoStreamGroupPending struct {
+ ID string
+ Consumer string
+ DeliveryTime time.Time
+ DeliveryCount int64
+}
+
+type XInfoStreamConsumer struct {
+ Name string
+ SeenTime time.Time
+ ActiveTime time.Time
+ PelCount int64
+ Pending []XInfoStreamConsumerPending
+}
+
+type XInfoStreamConsumerPending struct {
+ ID string
+ DeliveryTime time.Time
+ DeliveryCount int64
+}
+
+var _ Cmder = (*XInfoStreamFullCmd)(nil)
+
+func NewXInfoStreamFullCmd(ctx context.Context, args ...interface{}) *XInfoStreamFullCmd {
+ return &XInfoStreamFullCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *XInfoStreamFullCmd) SetVal(val *XInfoStreamFull) {
+ cmd.val = val
+}
+
+func (cmd *XInfoStreamFullCmd) Val() *XInfoStreamFull {
+ return cmd.val
+}
+
+func (cmd *XInfoStreamFullCmd) Result() (*XInfoStreamFull, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XInfoStreamFullCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XInfoStreamFullCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = &XInfoStreamFull{}
+
+ for i := 0; i < n; i++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "length":
+ cmd.val.Length, err = rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ case "radix-tree-keys":
+ cmd.val.RadixTreeKeys, err = rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ case "radix-tree-nodes":
+ cmd.val.RadixTreeNodes, err = rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ case "last-generated-id":
+ cmd.val.LastGeneratedID, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+ case "entries-added":
+ cmd.val.EntriesAdded, err = rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ case "entries":
+ cmd.val.Entries, err = readXMessageSlice(rd)
+ if err != nil {
+ return err
+ }
+ case "groups":
+ cmd.val.Groups, err = readStreamGroups(rd)
+ if err != nil {
+ return err
+ }
+ case "max-deleted-entry-id":
+ cmd.val.MaxDeletedEntryID, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+ case "recorded-first-entry-id":
+ cmd.val.RecordedFirstEntryID, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+ default:
+ return fmt.Errorf("redis: unexpected key %q in XINFO STREAM FULL reply", key)
+ }
+ }
+ return nil
+}
+
+func readStreamGroups(rd *proto.Reader) ([]XInfoStreamGroup, error) {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+ groups := make([]XInfoStreamGroup, 0, n)
+ for i := 0; i < n; i++ {
+ nn, err := rd.ReadMapLen()
+ if err != nil {
+ return nil, err
+ }
+
+ group := XInfoStreamGroup{}
+
+ for j := 0; j < nn; j++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ switch key {
+ case "name":
+ group.Name, err = rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+ case "last-delivered-id":
+ group.LastDeliveredID, err = rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+ case "entries-read":
+ group.EntriesRead, err = rd.ReadInt()
+ if err != nil && err != Nil {
+ return nil, err
+ }
+ case "lag":
+ // lag: the number of entries in the stream that are still waiting to be delivered
+ // to the group's consumers, or a NULL(Nil) when that number can't be determined.
+ group.Lag, err = rd.ReadInt()
+ if err != nil && err != Nil {
+ return nil, err
+ }
+ case "pel-count":
+ group.PelCount, err = rd.ReadInt()
+ if err != nil {
+ return nil, err
+ }
+ case "pending":
+ group.Pending, err = readXInfoStreamGroupPending(rd)
+ if err != nil {
+ return nil, err
+ }
+ case "consumers":
+ group.Consumers, err = readXInfoStreamConsumers(rd)
+ if err != nil {
+ return nil, err
+ }
+ default:
+ return nil, fmt.Errorf("redis: unexpected key %q in XINFO STREAM FULL reply", key)
+ }
+ }
+
+ groups = append(groups, group)
+ }
+
+ return groups, nil
+}
+
+func readXInfoStreamGroupPending(rd *proto.Reader) ([]XInfoStreamGroupPending, error) {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+
+ pending := make([]XInfoStreamGroupPending, 0, n)
+
+ for i := 0; i < n; i++ {
+ if err = rd.ReadFixedArrayLen(4); err != nil {
+ return nil, err
+ }
+
+ p := XInfoStreamGroupPending{}
+
+ p.ID, err = rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ p.Consumer, err = rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ delivery, err := rd.ReadInt()
+ if err != nil {
+ return nil, err
+ }
+ p.DeliveryTime = time.Unix(delivery/1000, delivery%1000*int64(time.Millisecond))
+
+ p.DeliveryCount, err = rd.ReadInt()
+ if err != nil {
+ return nil, err
+ }
+
+ pending = append(pending, p)
+ }
+
+ return pending, nil
+}
+
+func readXInfoStreamConsumers(rd *proto.Reader) ([]XInfoStreamConsumer, error) {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+
+ consumers := make([]XInfoStreamConsumer, 0, n)
+
+ for i := 0; i < n; i++ {
+ nn, err := rd.ReadMapLen()
+ if err != nil {
+ return nil, err
+ }
+
+ c := XInfoStreamConsumer{}
+
+ for f := 0; f < nn; f++ {
+ cKey, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ switch cKey {
+ case "name":
+ c.Name, err = rd.ReadString()
+ case "seen-time":
+ seen, err := rd.ReadInt()
+ if err != nil {
+ return nil, err
+ }
+ c.SeenTime = time.UnixMilli(seen)
+ case "active-time":
+ active, err := rd.ReadInt()
+ if err != nil {
+ return nil, err
+ }
+ c.ActiveTime = time.UnixMilli(active)
+ case "pel-count":
+ c.PelCount, err = rd.ReadInt()
+ case "pending":
+ pendingNumber, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+
+ c.Pending = make([]XInfoStreamConsumerPending, 0, pendingNumber)
+
+ for pn := 0; pn < pendingNumber; pn++ {
+ if err = rd.ReadFixedArrayLen(3); err != nil {
+ return nil, err
+ }
+
+ p := XInfoStreamConsumerPending{}
+
+ p.ID, err = rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ delivery, err := rd.ReadInt()
+ if err != nil {
+ return nil, err
+ }
+ p.DeliveryTime = time.Unix(delivery/1000, delivery%1000*int64(time.Millisecond))
+
+ p.DeliveryCount, err = rd.ReadInt()
+ if err != nil {
+ return nil, err
+ }
+
+ c.Pending = append(c.Pending, p)
+ }
+ default:
+ return nil, fmt.Errorf("redis: unexpected content %s "+
+ "in XINFO STREAM FULL reply", cKey)
+ }
+ if err != nil {
+ return nil, err
+ }
+ }
+ consumers = append(consumers, c)
+ }
+
+ return consumers, nil
+}
+
+//------------------------------------------------------------------------------
+
+type ZSliceCmd struct {
+ baseCmd
+
+ val []Z
+}
+
+var _ Cmder = (*ZSliceCmd)(nil)
+
+func NewZSliceCmd(ctx context.Context, args ...interface{}) *ZSliceCmd {
+ return &ZSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *ZSliceCmd) SetVal(val []Z) {
+ cmd.val = val
+}
+
+func (cmd *ZSliceCmd) Val() []Z {
+ return cmd.val
+}
+
+func (cmd *ZSliceCmd) Result() ([]Z, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *ZSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ZSliceCmd) readReply(rd *proto.Reader) error { // nolint:dupl
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ // If the n is 0, can't continue reading.
+ if n == 0 {
+ cmd.val = make([]Z, 0)
+ return nil
+ }
+
+ typ, err := rd.PeekReplyType()
+ if err != nil {
+ return err
+ }
+ array := typ == proto.RespArray
+
+ if array {
+ cmd.val = make([]Z, n)
+ } else {
+ cmd.val = make([]Z, n/2)
+ }
+
+ for i := 0; i < len(cmd.val); i++ {
+ if array {
+ if err = rd.ReadFixedArrayLen(2); err != nil {
+ return err
+ }
+ }
+
+ if cmd.val[i].Member, err = rd.ReadString(); err != nil {
+ return err
+ }
+
+ if cmd.val[i].Score, err = rd.ReadFloat(); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type ZWithKeyCmd struct {
+ baseCmd
+
+ val *ZWithKey
+}
+
+var _ Cmder = (*ZWithKeyCmd)(nil)
+
+func NewZWithKeyCmd(ctx context.Context, args ...interface{}) *ZWithKeyCmd {
+ return &ZWithKeyCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *ZWithKeyCmd) SetVal(val *ZWithKey) {
+ cmd.val = val
+}
+
+func (cmd *ZWithKeyCmd) Val() *ZWithKey {
+ return cmd.val
+}
+
+func (cmd *ZWithKeyCmd) Result() (*ZWithKey, error) {
+ return cmd.Val(), cmd.Err()
+}
+
+func (cmd *ZWithKeyCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ZWithKeyCmd) readReply(rd *proto.Reader) (err error) {
+ if err = rd.ReadFixedArrayLen(3); err != nil {
+ return err
+ }
+ cmd.val = &ZWithKey{}
+
+ if cmd.val.Key, err = rd.ReadString(); err != nil {
+ return err
+ }
+ if cmd.val.Member, err = rd.ReadString(); err != nil {
+ return err
+ }
+ if cmd.val.Score, err = rd.ReadFloat(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type ScanCmd struct {
+ baseCmd
+
+ page []string
+ cursor uint64
+
+ process cmdable
+}
+
+var _ Cmder = (*ScanCmd)(nil)
+
+func NewScanCmd(ctx context.Context, process cmdable, args ...interface{}) *ScanCmd {
+ return &ScanCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ process: process,
+ }
+}
+
+func (cmd *ScanCmd) SetVal(page []string, cursor uint64) {
+ cmd.page = page
+ cmd.cursor = cursor
+}
+
+func (cmd *ScanCmd) Val() (keys []string, cursor uint64) {
+ return cmd.page, cmd.cursor
+}
+
+func (cmd *ScanCmd) Result() (keys []string, cursor uint64, err error) {
+ return cmd.page, cmd.cursor, cmd.err
+}
+
+func (cmd *ScanCmd) String() string {
+ return cmdString(cmd, cmd.page)
+}
+
+func (cmd *ScanCmd) readReply(rd *proto.Reader) error {
+ if err := rd.ReadFixedArrayLen(2); err != nil {
+ return err
+ }
+
+ cursor, err := rd.ReadUint()
+ if err != nil {
+ return err
+ }
+ cmd.cursor = cursor
+
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.page = make([]string, n)
+
+ for i := 0; i < len(cmd.page); i++ {
+ if cmd.page[i], err = rd.ReadString(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Iterator creates a new ScanIterator.
+func (cmd *ScanCmd) Iterator() *ScanIterator {
+ return &ScanIterator{
+ cmd: cmd,
+ }
+}
+
+//------------------------------------------------------------------------------
+
+type ClusterNode struct {
+ ID string
+ Addr string
+ NetworkingMetadata map[string]string
+}
+
+type ClusterSlot struct {
+ Start int
+ End int
+ Nodes []ClusterNode
+}
+
+type ClusterSlotsCmd struct {
+ baseCmd
+
+ val []ClusterSlot
+}
+
+var _ Cmder = (*ClusterSlotsCmd)(nil)
+
+func NewClusterSlotsCmd(ctx context.Context, args ...interface{}) *ClusterSlotsCmd {
+ return &ClusterSlotsCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *ClusterSlotsCmd) SetVal(val []ClusterSlot) {
+ cmd.val = val
+}
+
+func (cmd *ClusterSlotsCmd) Val() []ClusterSlot {
+ return cmd.val
+}
+
+func (cmd *ClusterSlotsCmd) Result() ([]ClusterSlot, error) {
+ return cmd.Val(), cmd.Err()
+}
+
+func (cmd *ClusterSlotsCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ClusterSlotsCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]ClusterSlot, n)
+
+ for i := 0; i < len(cmd.val); i++ {
+ n, err = rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ if n < 2 {
+ return fmt.Errorf("redis: got %d elements in cluster info, expected at least 2", n)
+ }
+
+ start, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+
+ end, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+
+ // subtract start and end.
+ nodes := make([]ClusterNode, n-2)
+
+ for j := 0; j < len(nodes); j++ {
+ nn, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ if nn < 2 || nn > 4 {
+ return fmt.Errorf("got %d elements in cluster info address, expected 2, 3, or 4", n)
+ }
+
+ ip, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ port, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ nodes[j].Addr = net.JoinHostPort(ip, port)
+
+ if nn >= 3 {
+ id, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ nodes[j].ID = id
+ }
+
+ if nn >= 4 {
+ metadataLength, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ networkingMetadata := make(map[string]string, metadataLength)
+
+ for i := 0; i < metadataLength; i++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ value, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ networkingMetadata[key] = value
+ }
+
+ nodes[j].NetworkingMetadata = networkingMetadata
+ }
+ }
+
+ cmd.val[i] = ClusterSlot{
+ Start: int(start),
+ End: int(end),
+ Nodes: nodes,
+ }
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+// GeoLocation is used with GeoAdd to add geospatial location.
+type GeoLocation struct {
+ Name string
+ Longitude, Latitude, Dist float64
+ GeoHash int64
+}
+
+// GeoRadiusQuery is used with GeoRadius to query geospatial index.
+type GeoRadiusQuery struct {
+ Radius float64
+ // Can be m, km, ft, or mi. Default is km.
+ Unit string
+ WithCoord bool
+ WithDist bool
+ WithGeoHash bool
+ Count int
+ // Can be ASC or DESC. Default is no sort order.
+ Sort string
+ Store string
+ StoreDist string
+
+ // WithCoord+WithDist+WithGeoHash
+ withLen int
+}
+
+type GeoLocationCmd struct {
+ baseCmd
+
+ q *GeoRadiusQuery
+ locations []GeoLocation
+}
+
+var _ Cmder = (*GeoLocationCmd)(nil)
+
+func NewGeoLocationCmd(ctx context.Context, q *GeoRadiusQuery, args ...interface{}) *GeoLocationCmd {
+ return &GeoLocationCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: geoLocationArgs(q, args...),
+ },
+ q: q,
+ }
+}
+
+func geoLocationArgs(q *GeoRadiusQuery, args ...interface{}) []interface{} {
+ args = append(args, q.Radius)
+ if q.Unit != "" {
+ args = append(args, q.Unit)
+ } else {
+ args = append(args, "km")
+ }
+ if q.WithCoord {
+ args = append(args, "withcoord")
+ q.withLen++
+ }
+ if q.WithDist {
+ args = append(args, "withdist")
+ q.withLen++
+ }
+ if q.WithGeoHash {
+ args = append(args, "withhash")
+ q.withLen++
+ }
+ if q.Count > 0 {
+ args = append(args, "count", q.Count)
+ }
+ if q.Sort != "" {
+ args = append(args, q.Sort)
+ }
+ if q.Store != "" {
+ args = append(args, "store")
+ args = append(args, q.Store)
+ }
+ if q.StoreDist != "" {
+ args = append(args, "storedist")
+ args = append(args, q.StoreDist)
+ }
+ return args
+}
+
+func (cmd *GeoLocationCmd) SetVal(locations []GeoLocation) {
+ cmd.locations = locations
+}
+
+func (cmd *GeoLocationCmd) Val() []GeoLocation {
+ return cmd.locations
+}
+
+func (cmd *GeoLocationCmd) Result() ([]GeoLocation, error) {
+ return cmd.locations, cmd.err
+}
+
+func (cmd *GeoLocationCmd) String() string {
+ return cmdString(cmd, cmd.locations)
+}
+
+func (cmd *GeoLocationCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.locations = make([]GeoLocation, n)
+
+ for i := 0; i < len(cmd.locations); i++ {
+ // only name
+ if cmd.q.withLen == 0 {
+ if cmd.locations[i].Name, err = rd.ReadString(); err != nil {
+ return err
+ }
+ continue
+ }
+
+ // +name
+ if err = rd.ReadFixedArrayLen(cmd.q.withLen + 1); err != nil {
+ return err
+ }
+
+ if cmd.locations[i].Name, err = rd.ReadString(); err != nil {
+ return err
+ }
+ if cmd.q.WithDist {
+ if cmd.locations[i].Dist, err = rd.ReadFloat(); err != nil {
+ return err
+ }
+ }
+ if cmd.q.WithGeoHash {
+ if cmd.locations[i].GeoHash, err = rd.ReadInt(); err != nil {
+ return err
+ }
+ }
+ if cmd.q.WithCoord {
+ if err = rd.ReadFixedArrayLen(2); err != nil {
+ return err
+ }
+ if cmd.locations[i].Longitude, err = rd.ReadFloat(); err != nil {
+ return err
+ }
+ if cmd.locations[i].Latitude, err = rd.ReadFloat(); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+// GeoSearchQuery is used for GEOSearch/GEOSearchStore command query.
+type GeoSearchQuery struct {
+ Member string
+
+ // Latitude and Longitude when using FromLonLat option.
+ Longitude float64
+ Latitude float64
+
+ // Distance and unit when using ByRadius option.
+ // Can use m, km, ft, or mi. Default is km.
+ Radius float64
+ RadiusUnit string
+
+ // Height, width and unit when using ByBox option.
+ // Can be m, km, ft, or mi. Default is km.
+ BoxWidth float64
+ BoxHeight float64
+ BoxUnit string
+
+ // Can be ASC or DESC. Default is no sort order.
+ Sort string
+ Count int
+ CountAny bool
+}
+
+type GeoSearchLocationQuery struct {
+ GeoSearchQuery
+
+ WithCoord bool
+ WithDist bool
+ WithHash bool
+}
+
+type GeoSearchStoreQuery struct {
+ GeoSearchQuery
+
+ // When using the StoreDist option, the command stores the items in a
+ // sorted set populated with their distance from the center of the circle or box,
+ // as a floating-point number, in the same unit specified for that shape.
+ StoreDist bool
+}
+
+func geoSearchLocationArgs(q *GeoSearchLocationQuery, args []interface{}) []interface{} {
+ args = geoSearchArgs(&q.GeoSearchQuery, args)
+
+ if q.WithCoord {
+ args = append(args, "withcoord")
+ }
+ if q.WithDist {
+ args = append(args, "withdist")
+ }
+ if q.WithHash {
+ args = append(args, "withhash")
+ }
+
+ return args
+}
+
+func geoSearchArgs(q *GeoSearchQuery, args []interface{}) []interface{} {
+ if q.Member != "" {
+ args = append(args, "frommember", q.Member)
+ } else {
+ args = append(args, "fromlonlat", q.Longitude, q.Latitude)
+ }
+
+ if q.Radius > 0 {
+ if q.RadiusUnit == "" {
+ q.RadiusUnit = "km"
+ }
+ args = append(args, "byradius", q.Radius, q.RadiusUnit)
+ } else {
+ if q.BoxUnit == "" {
+ q.BoxUnit = "km"
+ }
+ args = append(args, "bybox", q.BoxWidth, q.BoxHeight, q.BoxUnit)
+ }
+
+ if q.Sort != "" {
+ args = append(args, q.Sort)
+ }
+
+ if q.Count > 0 {
+ args = append(args, "count", q.Count)
+ if q.CountAny {
+ args = append(args, "any")
+ }
+ }
+
+ return args
+}
+
+type GeoSearchLocationCmd struct {
+ baseCmd
+
+ opt *GeoSearchLocationQuery
+ val []GeoLocation
+}
+
+var _ Cmder = (*GeoSearchLocationCmd)(nil)
+
+func NewGeoSearchLocationCmd(
+ ctx context.Context, opt *GeoSearchLocationQuery, args ...interface{},
+) *GeoSearchLocationCmd {
+ return &GeoSearchLocationCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ opt: opt,
+ }
+}
+
+func (cmd *GeoSearchLocationCmd) SetVal(val []GeoLocation) {
+ cmd.val = val
+}
+
+func (cmd *GeoSearchLocationCmd) Val() []GeoLocation {
+ return cmd.val
+}
+
+func (cmd *GeoSearchLocationCmd) Result() ([]GeoLocation, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *GeoSearchLocationCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *GeoSearchLocationCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = make([]GeoLocation, n)
+ for i := 0; i < n; i++ {
+ _, err = rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ var loc GeoLocation
+
+ loc.Name, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+ if cmd.opt.WithDist {
+ loc.Dist, err = rd.ReadFloat()
+ if err != nil {
+ return err
+ }
+ }
+ if cmd.opt.WithHash {
+ loc.GeoHash, err = rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ }
+ if cmd.opt.WithCoord {
+ if err = rd.ReadFixedArrayLen(2); err != nil {
+ return err
+ }
+ loc.Longitude, err = rd.ReadFloat()
+ if err != nil {
+ return err
+ }
+ loc.Latitude, err = rd.ReadFloat()
+ if err != nil {
+ return err
+ }
+ }
+
+ cmd.val[i] = loc
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type GeoPos struct {
+ Longitude, Latitude float64
+}
+
+type GeoPosCmd struct {
+ baseCmd
+
+ val []*GeoPos
+}
+
+var _ Cmder = (*GeoPosCmd)(nil)
+
+func NewGeoPosCmd(ctx context.Context, args ...interface{}) *GeoPosCmd {
+ return &GeoPosCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *GeoPosCmd) SetVal(val []*GeoPos) {
+ cmd.val = val
+}
+
+func (cmd *GeoPosCmd) Val() []*GeoPos {
+ return cmd.val
+}
+
+func (cmd *GeoPosCmd) Result() ([]*GeoPos, error) {
+ return cmd.Val(), cmd.Err()
+}
+
+func (cmd *GeoPosCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *GeoPosCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]*GeoPos, n)
+
+ for i := 0; i < len(cmd.val); i++ {
+ err = rd.ReadFixedArrayLen(2)
+ if err != nil {
+ if err == Nil {
+ cmd.val[i] = nil
+ continue
+ }
+ return err
+ }
+
+ longitude, err := rd.ReadFloat()
+ if err != nil {
+ return err
+ }
+ latitude, err := rd.ReadFloat()
+ if err != nil {
+ return err
+ }
+
+ cmd.val[i] = &GeoPos{
+ Longitude: longitude,
+ Latitude: latitude,
+ }
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type CommandInfo struct {
+ Name string
+ Arity int8
+ Flags []string
+ ACLFlags []string
+ FirstKeyPos int8
+ LastKeyPos int8
+ StepCount int8
+ ReadOnly bool
+}
+
+type CommandsInfoCmd struct {
+ baseCmd
+
+ val map[string]*CommandInfo
+}
+
+var _ Cmder = (*CommandsInfoCmd)(nil)
+
+func NewCommandsInfoCmd(ctx context.Context, args ...interface{}) *CommandsInfoCmd {
+ return &CommandsInfoCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *CommandsInfoCmd) SetVal(val map[string]*CommandInfo) {
+ cmd.val = val
+}
+
+func (cmd *CommandsInfoCmd) Val() map[string]*CommandInfo {
+ return cmd.val
+}
+
+func (cmd *CommandsInfoCmd) Result() (map[string]*CommandInfo, error) {
+ return cmd.Val(), cmd.Err()
+}
+
+func (cmd *CommandsInfoCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *CommandsInfoCmd) readReply(rd *proto.Reader) error {
+ const numArgRedis5 = 6
+ const numArgRedis6 = 7
+ const numArgRedis7 = 10
+
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make(map[string]*CommandInfo, n)
+
+ for i := 0; i < n; i++ {
+ nn, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ switch nn {
+ case numArgRedis5, numArgRedis6, numArgRedis7:
+ // ok
+ default:
+ return fmt.Errorf("redis: got %d elements in COMMAND reply, wanted 6/7/10", nn)
+ }
+
+ cmdInfo := &CommandInfo{}
+ if cmdInfo.Name, err = rd.ReadString(); err != nil {
+ return err
+ }
+
+ arity, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ cmdInfo.Arity = int8(arity)
+
+ flagLen, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmdInfo.Flags = make([]string, flagLen)
+ for f := 0; f < len(cmdInfo.Flags); f++ {
+ switch s, err := rd.ReadString(); {
+ case err == Nil:
+ cmdInfo.Flags[f] = ""
+ case err != nil:
+ return err
+ default:
+ if !cmdInfo.ReadOnly && s == "readonly" {
+ cmdInfo.ReadOnly = true
+ }
+ cmdInfo.Flags[f] = s
+ }
+ }
+
+ firstKeyPos, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ cmdInfo.FirstKeyPos = int8(firstKeyPos)
+
+ lastKeyPos, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ cmdInfo.LastKeyPos = int8(lastKeyPos)
+
+ stepCount, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ cmdInfo.StepCount = int8(stepCount)
+
+ if nn >= numArgRedis6 {
+ aclFlagLen, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmdInfo.ACLFlags = make([]string, aclFlagLen)
+ for f := 0; f < len(cmdInfo.ACLFlags); f++ {
+ switch s, err := rd.ReadString(); {
+ case err == Nil:
+ cmdInfo.ACLFlags[f] = ""
+ case err != nil:
+ return err
+ default:
+ cmdInfo.ACLFlags[f] = s
+ }
+ }
+ }
+
+ if nn >= numArgRedis7 {
+ if err := rd.DiscardNext(); err != nil {
+ return err
+ }
+ if err := rd.DiscardNext(); err != nil {
+ return err
+ }
+ if err := rd.DiscardNext(); err != nil {
+ return err
+ }
+ }
+
+ cmd.val[cmdInfo.Name] = cmdInfo
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type cmdsInfoCache struct {
+ fn func(ctx context.Context) (map[string]*CommandInfo, error)
+
+ once internal.Once
+ cmds map[string]*CommandInfo
+}
+
+func newCmdsInfoCache(fn func(ctx context.Context) (map[string]*CommandInfo, error)) *cmdsInfoCache {
+ return &cmdsInfoCache{
+ fn: fn,
+ }
+}
+
+func (c *cmdsInfoCache) Get(ctx context.Context) (map[string]*CommandInfo, error) {
+ err := c.once.Do(func() error {
+ cmds, err := c.fn(ctx)
+ if err != nil {
+ return err
+ }
+
+ // Extensions have cmd names in upper case. Convert them to lower case.
+ for k, v := range cmds {
+ lower := internal.ToLower(k)
+ if lower != k {
+ cmds[lower] = v
+ }
+ }
+
+ c.cmds = cmds
+ return nil
+ })
+ return c.cmds, err
+}
+
+//------------------------------------------------------------------------------
+
+type SlowLog struct {
+ ID int64
+ Time time.Time
+ Duration time.Duration
+ Args []string
+ // These are also optional fields emitted only by Redis 4.0 or greater:
+ // https://redis.io/commands/slowlog#output-format
+ ClientAddr string
+ ClientName string
+}
+
+type SlowLogCmd struct {
+ baseCmd
+
+ val []SlowLog
+}
+
+var _ Cmder = (*SlowLogCmd)(nil)
+
+func NewSlowLogCmd(ctx context.Context, args ...interface{}) *SlowLogCmd {
+ return &SlowLogCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *SlowLogCmd) SetVal(val []SlowLog) {
+ cmd.val = val
+}
+
+func (cmd *SlowLogCmd) Val() []SlowLog {
+ return cmd.val
+}
+
+func (cmd *SlowLogCmd) Result() ([]SlowLog, error) {
+ return cmd.Val(), cmd.Err()
+}
+
+func (cmd *SlowLogCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *SlowLogCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]SlowLog, n)
+
+ for i := 0; i < len(cmd.val); i++ {
+ nn, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ if nn < 4 {
+ return fmt.Errorf("redis: got %d elements in slowlog get, expected at least 4", nn)
+ }
+
+ if cmd.val[i].ID, err = rd.ReadInt(); err != nil {
+ return err
+ }
+
+ createdAt, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ cmd.val[i].Time = time.Unix(createdAt, 0)
+
+ costs, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ cmd.val[i].Duration = time.Duration(costs) * time.Microsecond
+
+ cmdLen, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ if cmdLen < 1 {
+ return fmt.Errorf("redis: got %d elements commands reply in slowlog get, expected at least 1", cmdLen)
+ }
+
+ cmd.val[i].Args = make([]string, cmdLen)
+ for f := 0; f < len(cmd.val[i].Args); f++ {
+ cmd.val[i].Args[f], err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+ }
+
+ if nn >= 5 {
+ if cmd.val[i].ClientAddr, err = rd.ReadString(); err != nil {
+ return err
+ }
+ }
+
+ if nn >= 6 {
+ if cmd.val[i].ClientName, err = rd.ReadString(); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+//-----------------------------------------------------------------------
+
+type MapStringInterfaceCmd struct {
+ baseCmd
+
+ val map[string]interface{}
+}
+
+var _ Cmder = (*MapStringInterfaceCmd)(nil)
+
+func NewMapStringInterfaceCmd(ctx context.Context, args ...interface{}) *MapStringInterfaceCmd {
+ return &MapStringInterfaceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *MapStringInterfaceCmd) SetVal(val map[string]interface{}) {
+ cmd.val = val
+}
+
+func (cmd *MapStringInterfaceCmd) Val() map[string]interface{} {
+ return cmd.val
+}
+
+func (cmd *MapStringInterfaceCmd) Result() (map[string]interface{}, error) {
+ return cmd.Val(), cmd.Err()
+}
+
+func (cmd *MapStringInterfaceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *MapStringInterfaceCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = make(map[string]interface{}, n)
+ for i := 0; i < n; i++ {
+ k, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ v, err := rd.ReadReply()
+ if err != nil {
+ if err == Nil {
+ cmd.val[k] = Nil
+ continue
+ }
+ if err, ok := err.(proto.RedisError); ok {
+ cmd.val[k] = err
+ continue
+ }
+ return err
+ }
+ cmd.val[k] = v
+ }
+ return nil
+}
+
+//-----------------------------------------------------------------------
+
+type MapStringStringSliceCmd struct {
+ baseCmd
+
+ val []map[string]string
+}
+
+var _ Cmder = (*MapStringStringSliceCmd)(nil)
+
+func NewMapStringStringSliceCmd(ctx context.Context, args ...interface{}) *MapStringStringSliceCmd {
+ return &MapStringStringSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *MapStringStringSliceCmd) SetVal(val []map[string]string) {
+ cmd.val = val
+}
+
+func (cmd *MapStringStringSliceCmd) Val() []map[string]string {
+ return cmd.val
+}
+
+func (cmd *MapStringStringSliceCmd) Result() ([]map[string]string, error) {
+ return cmd.Val(), cmd.Err()
+}
+
+func (cmd *MapStringStringSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *MapStringStringSliceCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = make([]map[string]string, n)
+ for i := 0; i < n; i++ {
+ nn, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+ cmd.val[i] = make(map[string]string, nn)
+ for f := 0; f < nn; f++ {
+ k, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ v, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ cmd.val[i][k] = v
+ }
+ }
+ return nil
+}
+
+//-----------------------------------------------------------------------
+
+type MapStringInterfaceSliceCmd struct {
+ baseCmd
+
+ val []map[string]interface{}
+}
+
+var _ Cmder = (*MapStringInterfaceSliceCmd)(nil)
+
+func NewMapStringInterfaceSliceCmd(ctx context.Context, args ...interface{}) *MapStringInterfaceSliceCmd {
+ return &MapStringInterfaceSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *MapStringInterfaceSliceCmd) SetVal(val []map[string]interface{}) {
+ cmd.val = val
+}
+
+func (cmd *MapStringInterfaceSliceCmd) Val() []map[string]interface{} {
+ return cmd.val
+}
+
+func (cmd *MapStringInterfaceSliceCmd) Result() ([]map[string]interface{}, error) {
+ return cmd.Val(), cmd.Err()
+}
+
+func (cmd *MapStringInterfaceSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *MapStringInterfaceSliceCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = make([]map[string]interface{}, n)
+ for i := 0; i < n; i++ {
+ nn, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+ cmd.val[i] = make(map[string]interface{}, nn)
+ for f := 0; f < nn; f++ {
+ k, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ v, err := rd.ReadReply()
+ if err != nil {
+ if err != Nil {
+ return err
+ }
+ }
+ cmd.val[i][k] = v
+ }
+ }
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type KeyValuesCmd struct {
+ baseCmd
+
+ key string
+ val []string
+}
+
+var _ Cmder = (*KeyValuesCmd)(nil)
+
+func NewKeyValuesCmd(ctx context.Context, args ...interface{}) *KeyValuesCmd {
+ return &KeyValuesCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *KeyValuesCmd) SetVal(key string, val []string) {
+ cmd.key = key
+ cmd.val = val
+}
+
+func (cmd *KeyValuesCmd) Val() (string, []string) {
+ return cmd.key, cmd.val
+}
+
+func (cmd *KeyValuesCmd) Result() (string, []string, error) {
+ return cmd.key, cmd.val, cmd.err
+}
+
+func (cmd *KeyValuesCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *KeyValuesCmd) readReply(rd *proto.Reader) (err error) {
+ if err = rd.ReadFixedArrayLen(2); err != nil {
+ return err
+ }
+
+ cmd.key, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]string, n)
+ for i := 0; i < n; i++ {
+ cmd.val[i], err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type ZSliceWithKeyCmd struct {
+ baseCmd
+
+ key string
+ val []Z
+}
+
+var _ Cmder = (*ZSliceWithKeyCmd)(nil)
+
+func NewZSliceWithKeyCmd(ctx context.Context, args ...interface{}) *ZSliceWithKeyCmd {
+ return &ZSliceWithKeyCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *ZSliceWithKeyCmd) SetVal(key string, val []Z) {
+ cmd.key = key
+ cmd.val = val
+}
+
+func (cmd *ZSliceWithKeyCmd) Val() (string, []Z) {
+ return cmd.key, cmd.val
+}
+
+func (cmd *ZSliceWithKeyCmd) Result() (string, []Z, error) {
+ return cmd.key, cmd.val, cmd.err
+}
+
+func (cmd *ZSliceWithKeyCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ZSliceWithKeyCmd) readReply(rd *proto.Reader) (err error) {
+ if err = rd.ReadFixedArrayLen(2); err != nil {
+ return err
+ }
+
+ cmd.key, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ typ, err := rd.PeekReplyType()
+ if err != nil {
+ return err
+ }
+ array := typ == proto.RespArray
+
+ if array {
+ cmd.val = make([]Z, n)
+ } else {
+ cmd.val = make([]Z, n/2)
+ }
+
+ for i := 0; i < len(cmd.val); i++ {
+ if array {
+ if err = rd.ReadFixedArrayLen(2); err != nil {
+ return err
+ }
+ }
+
+ if cmd.val[i].Member, err = rd.ReadString(); err != nil {
+ return err
+ }
+
+ if cmd.val[i].Score, err = rd.ReadFloat(); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+type Function struct {
+ Name string
+ Description string
+ Flags []string
+}
+
+type Library struct {
+ Name string
+ Engine string
+ Functions []Function
+ Code string
+}
+
+type FunctionListCmd struct {
+ baseCmd
+
+ val []Library
+}
+
+var _ Cmder = (*FunctionListCmd)(nil)
+
+func NewFunctionListCmd(ctx context.Context, args ...interface{}) *FunctionListCmd {
+ return &FunctionListCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *FunctionListCmd) SetVal(val []Library) {
+ cmd.val = val
+}
+
+func (cmd *FunctionListCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *FunctionListCmd) Val() []Library {
+ return cmd.val
+}
+
+func (cmd *FunctionListCmd) Result() ([]Library, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *FunctionListCmd) First() (*Library, error) {
+ if cmd.err != nil {
+ return nil, cmd.err
+ }
+ if len(cmd.val) > 0 {
+ return &cmd.val[0], nil
+ }
+ return nil, Nil
+}
+
+func (cmd *FunctionListCmd) readReply(rd *proto.Reader) (err error) {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ libraries := make([]Library, n)
+ for i := 0; i < n; i++ {
+ nn, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ library := Library{}
+ for f := 0; f < nn; f++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "library_name":
+ library.Name, err = rd.ReadString()
+ case "engine":
+ library.Engine, err = rd.ReadString()
+ case "functions":
+ library.Functions, err = cmd.readFunctions(rd)
+ case "library_code":
+ library.Code, err = rd.ReadString()
+ default:
+ return fmt.Errorf("redis: function list unexpected key %s", key)
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+
+ libraries[i] = library
+ }
+ cmd.val = libraries
+ return nil
+}
+
+func (cmd *FunctionListCmd) readFunctions(rd *proto.Reader) ([]Function, error) {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+
+ functions := make([]Function, n)
+ for i := 0; i < n; i++ {
+ nn, err := rd.ReadMapLen()
+ if err != nil {
+ return nil, err
+ }
+
+ function := Function{}
+ for f := 0; f < nn; f++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ switch key {
+ case "name":
+ if function.Name, err = rd.ReadString(); err != nil {
+ return nil, err
+ }
+ case "description":
+ if function.Description, err = rd.ReadString(); err != nil && err != Nil {
+ return nil, err
+ }
+ case "flags":
+ // resp set
+ nx, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+
+ function.Flags = make([]string, nx)
+ for j := 0; j < nx; j++ {
+ if function.Flags[j], err = rd.ReadString(); err != nil {
+ return nil, err
+ }
+ }
+ default:
+ return nil, fmt.Errorf("redis: function list unexpected key %s", key)
+ }
+ }
+
+ functions[i] = function
+ }
+ return functions, nil
+}
+
+// FunctionStats contains information about the scripts currently executing on the server, and the available engines
+// - Engines:
+// Statistics about the engine like number of functions and number of libraries
+// - RunningScript:
+// The script currently running on the shard we're connecting to.
+// For Redis Enterprise and Redis Cloud, this represents the
+// function with the longest running time, across all the running functions, on all shards
+// - RunningScripts
+// All scripts currently running in a Redis Enterprise clustered database.
+// Only available on Redis Enterprise
+type FunctionStats struct {
+ Engines []Engine
+ isRunning bool
+ rs RunningScript
+ allrs []RunningScript
+}
+
+func (fs *FunctionStats) Running() bool {
+ return fs.isRunning
+}
+
+func (fs *FunctionStats) RunningScript() (RunningScript, bool) {
+ return fs.rs, fs.isRunning
+}
+
+// AllRunningScripts returns all scripts currently running in a Redis Enterprise clustered database.
+// Only available on Redis Enterprise
+func (fs *FunctionStats) AllRunningScripts() []RunningScript {
+ return fs.allrs
+}
+
+type RunningScript struct {
+ Name string
+ Command []string
+ Duration time.Duration
+}
+
+type Engine struct {
+ Language string
+ LibrariesCount int64
+ FunctionsCount int64
+}
+
+type FunctionStatsCmd struct {
+ baseCmd
+ val FunctionStats
+}
+
+var _ Cmder = (*FunctionStatsCmd)(nil)
+
+func NewFunctionStatsCmd(ctx context.Context, args ...interface{}) *FunctionStatsCmd {
+ return &FunctionStatsCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *FunctionStatsCmd) SetVal(val FunctionStats) {
+ cmd.val = val
+}
+
+func (cmd *FunctionStatsCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *FunctionStatsCmd) Val() FunctionStats {
+ return cmd.val
+}
+
+func (cmd *FunctionStatsCmd) Result() (FunctionStats, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *FunctionStatsCmd) readReply(rd *proto.Reader) (err error) {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ var key string
+ var result FunctionStats
+ for f := 0; f < n; f++ {
+ key, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "running_script":
+ result.rs, result.isRunning, err = cmd.readRunningScript(rd)
+ case "engines":
+ result.Engines, err = cmd.readEngines(rd)
+ case "all_running_scripts": // Redis Enterprise only
+ result.allrs, result.isRunning, err = cmd.readRunningScripts(rd)
+ default:
+ return fmt.Errorf("redis: function stats unexpected key %s", key)
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+
+ cmd.val = result
+ return nil
+}
+
+func (cmd *FunctionStatsCmd) readRunningScript(rd *proto.Reader) (RunningScript, bool, error) {
+ err := rd.ReadFixedMapLen(3)
+ if err != nil {
+ if err == Nil {
+ return RunningScript{}, false, nil
+ }
+ return RunningScript{}, false, err
+ }
+
+ var runningScript RunningScript
+ for i := 0; i < 3; i++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return RunningScript{}, false, err
+ }
+
+ switch key {
+ case "name":
+ runningScript.Name, err = rd.ReadString()
+ case "duration_ms":
+ runningScript.Duration, err = cmd.readDuration(rd)
+ case "command":
+ runningScript.Command, err = cmd.readCommand(rd)
+ default:
+ return RunningScript{}, false, fmt.Errorf("redis: function stats unexpected running_script key %s", key)
+ }
+
+ if err != nil {
+ return RunningScript{}, false, err
+ }
+ }
+
+ return runningScript, true, nil
+}
+
+func (cmd *FunctionStatsCmd) readEngines(rd *proto.Reader) ([]Engine, error) {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return nil, err
+ }
+
+ engines := make([]Engine, 0, n)
+ for i := 0; i < n; i++ {
+ engine := Engine{}
+ engine.Language, err = rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ err = rd.ReadFixedMapLen(2)
+ if err != nil {
+ return nil, fmt.Errorf("redis: function stats unexpected %s engine map length", engine.Language)
+ }
+
+ for i := 0; i < 2; i++ {
+ key, err := rd.ReadString()
+ switch key {
+ case "libraries_count":
+ engine.LibrariesCount, err = rd.ReadInt()
+ case "functions_count":
+ engine.FunctionsCount, err = rd.ReadInt()
+ }
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ engines = append(engines, engine)
+ }
+ return engines, nil
+}
+
+func (cmd *FunctionStatsCmd) readDuration(rd *proto.Reader) (time.Duration, error) {
+ t, err := rd.ReadInt()
+ if err != nil {
+ return time.Duration(0), err
+ }
+ return time.Duration(t) * time.Millisecond, nil
+}
+
+func (cmd *FunctionStatsCmd) readCommand(rd *proto.Reader) ([]string, error) {
+
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+
+ command := make([]string, 0, n)
+ for i := 0; i < n; i++ {
+ x, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+ command = append(command, x)
+ }
+
+ return command, nil
+}
+func (cmd *FunctionStatsCmd) readRunningScripts(rd *proto.Reader) ([]RunningScript, bool, error) {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, false, err
+ }
+
+ runningScripts := make([]RunningScript, 0, n)
+ for i := 0; i < n; i++ {
+ rs, _, err := cmd.readRunningScript(rd)
+ if err != nil {
+ return nil, false, err
+ }
+ runningScripts = append(runningScripts, rs)
+ }
+
+ return runningScripts, len(runningScripts) > 0, nil
+}
+
+//------------------------------------------------------------------------------
+
+// LCSQuery is a parameter used for the LCS command
+type LCSQuery struct {
+ Key1 string
+ Key2 string
+ Len bool
+ Idx bool
+ MinMatchLen int
+ WithMatchLen bool
+}
+
+// LCSMatch is the result set of the LCS command.
+type LCSMatch struct {
+ MatchString string
+ Matches []LCSMatchedPosition
+ Len int64
+}
+
+type LCSMatchedPosition struct {
+ Key1 LCSPosition
+ Key2 LCSPosition
+
+ // only for withMatchLen is true
+ MatchLen int64
+}
+
+type LCSPosition struct {
+ Start int64
+ End int64
+}
+
+type LCSCmd struct {
+ baseCmd
+
+ // 1: match string
+ // 2: match len
+ // 3: match idx LCSMatch
+ readType uint8
+ val *LCSMatch
+}
+
+func NewLCSCmd(ctx context.Context, q *LCSQuery) *LCSCmd {
+ args := make([]interface{}, 3, 7)
+ args[0] = "lcs"
+ args[1] = q.Key1
+ args[2] = q.Key2
+
+ cmd := &LCSCmd{readType: 1}
+ if q.Len {
+ cmd.readType = 2
+ args = append(args, "len")
+ } else if q.Idx {
+ cmd.readType = 3
+ args = append(args, "idx")
+ if q.MinMatchLen != 0 {
+ args = append(args, "minmatchlen", q.MinMatchLen)
+ }
+ if q.WithMatchLen {
+ args = append(args, "withmatchlen")
+ }
+ }
+ cmd.baseCmd = baseCmd{
+ ctx: ctx,
+ args: args,
+ }
+
+ return cmd
+}
+
+func (cmd *LCSCmd) SetVal(val *LCSMatch) {
+ cmd.val = val
+}
+
+func (cmd *LCSCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *LCSCmd) Val() *LCSMatch {
+ return cmd.val
+}
+
+func (cmd *LCSCmd) Result() (*LCSMatch, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *LCSCmd) readReply(rd *proto.Reader) (err error) {
+ lcs := &LCSMatch{}
+ switch cmd.readType {
+ case 1:
+ // match string
+ if lcs.MatchString, err = rd.ReadString(); err != nil {
+ return err
+ }
+ case 2:
+ // match len
+ if lcs.Len, err = rd.ReadInt(); err != nil {
+ return err
+ }
+ case 3:
+ // read LCSMatch
+ if err = rd.ReadFixedMapLen(2); err != nil {
+ return err
+ }
+
+ // read matches or len field
+ for i := 0; i < 2; i++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "matches":
+ // read array of matched positions
+ if lcs.Matches, err = cmd.readMatchedPositions(rd); err != nil {
+ return err
+ }
+ case "len":
+ // read match length
+ if lcs.Len, err = rd.ReadInt(); err != nil {
+ return err
+ }
+ }
+ }
+ }
+
+ cmd.val = lcs
+ return nil
+}
+
+func (cmd *LCSCmd) readMatchedPositions(rd *proto.Reader) ([]LCSMatchedPosition, error) {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+
+ positions := make([]LCSMatchedPosition, n)
+ for i := 0; i < n; i++ {
+ pn, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+
+ if positions[i].Key1, err = cmd.readPosition(rd); err != nil {
+ return nil, err
+ }
+ if positions[i].Key2, err = cmd.readPosition(rd); err != nil {
+ return nil, err
+ }
+
+ // read match length if WithMatchLen is true
+ if pn > 2 {
+ if positions[i].MatchLen, err = rd.ReadInt(); err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ return positions, nil
+}
+
+func (cmd *LCSCmd) readPosition(rd *proto.Reader) (pos LCSPosition, err error) {
+ if err = rd.ReadFixedArrayLen(2); err != nil {
+ return pos, err
+ }
+ if pos.Start, err = rd.ReadInt(); err != nil {
+ return pos, err
+ }
+ if pos.End, err = rd.ReadInt(); err != nil {
+ return pos, err
+ }
+
+ return pos, nil
+}
+
+// ------------------------------------------------------------------------
+
+type KeyFlags struct {
+ Key string
+ Flags []string
+}
+
+type KeyFlagsCmd struct {
+ baseCmd
+
+ val []KeyFlags
+}
+
+var _ Cmder = (*KeyFlagsCmd)(nil)
+
+func NewKeyFlagsCmd(ctx context.Context, args ...interface{}) *KeyFlagsCmd {
+ return &KeyFlagsCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *KeyFlagsCmd) SetVal(val []KeyFlags) {
+ cmd.val = val
+}
+
+func (cmd *KeyFlagsCmd) Val() []KeyFlags {
+ return cmd.val
+}
+
+func (cmd *KeyFlagsCmd) Result() ([]KeyFlags, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *KeyFlagsCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *KeyFlagsCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ if n == 0 {
+ cmd.val = make([]KeyFlags, 0)
+ return nil
+ }
+
+ cmd.val = make([]KeyFlags, n)
+
+ for i := 0; i < len(cmd.val); i++ {
+
+ if err = rd.ReadFixedArrayLen(2); err != nil {
+ return err
+ }
+
+ if cmd.val[i].Key, err = rd.ReadString(); err != nil {
+ return err
+ }
+ flagsLen, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val[i].Flags = make([]string, flagsLen)
+
+ for j := 0; j < flagsLen; j++ {
+ if cmd.val[i].Flags[j], err = rd.ReadString(); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+// ---------------------------------------------------------------------------------------------------
+
+type ClusterLink struct {
+ Direction string
+ Node string
+ CreateTime int64
+ Events string
+ SendBufferAllocated int64
+ SendBufferUsed int64
+}
+
+type ClusterLinksCmd struct {
+ baseCmd
+
+ val []ClusterLink
+}
+
+var _ Cmder = (*ClusterLinksCmd)(nil)
+
+func NewClusterLinksCmd(ctx context.Context, args ...interface{}) *ClusterLinksCmd {
+ return &ClusterLinksCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *ClusterLinksCmd) SetVal(val []ClusterLink) {
+ cmd.val = val
+}
+
+func (cmd *ClusterLinksCmd) Val() []ClusterLink {
+ return cmd.val
+}
+
+func (cmd *ClusterLinksCmd) Result() ([]ClusterLink, error) {
+ return cmd.Val(), cmd.Err()
+}
+
+func (cmd *ClusterLinksCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ClusterLinksCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]ClusterLink, n)
+
+ for i := 0; i < len(cmd.val); i++ {
+ m, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ for j := 0; j < m; j++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "direction":
+ cmd.val[i].Direction, err = rd.ReadString()
+ case "node":
+ cmd.val[i].Node, err = rd.ReadString()
+ case "create-time":
+ cmd.val[i].CreateTime, err = rd.ReadInt()
+ case "events":
+ cmd.val[i].Events, err = rd.ReadString()
+ case "send-buffer-allocated":
+ cmd.val[i].SendBufferAllocated, err = rd.ReadInt()
+ case "send-buffer-used":
+ cmd.val[i].SendBufferUsed, err = rd.ReadInt()
+ default:
+ return fmt.Errorf("redis: unexpected key %q in CLUSTER LINKS reply", key)
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+// ------------------------------------------------------------------------------------------------------------------
+
+type SlotRange struct {
+ Start int64
+ End int64
+}
+
+type Node struct {
+ ID string
+ Endpoint string
+ IP string
+ Hostname string
+ Port int64
+ TLSPort int64
+ Role string
+ ReplicationOffset int64
+ Health string
+}
+
+type ClusterShard struct {
+ Slots []SlotRange
+ Nodes []Node
+}
+
+type ClusterShardsCmd struct {
+ baseCmd
+
+ val []ClusterShard
+}
+
+var _ Cmder = (*ClusterShardsCmd)(nil)
+
+func NewClusterShardsCmd(ctx context.Context, args ...interface{}) *ClusterShardsCmd {
+ return &ClusterShardsCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *ClusterShardsCmd) SetVal(val []ClusterShard) {
+ cmd.val = val
+}
+
+func (cmd *ClusterShardsCmd) Val() []ClusterShard {
+ return cmd.val
+}
+
+func (cmd *ClusterShardsCmd) Result() ([]ClusterShard, error) {
+ return cmd.Val(), cmd.Err()
+}
+
+func (cmd *ClusterShardsCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ClusterShardsCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]ClusterShard, n)
+
+ for i := 0; i < n; i++ {
+ m, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ for j := 0; j < m; j++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "slots":
+ l, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ for k := 0; k < l; k += 2 {
+ start, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+
+ end, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+
+ cmd.val[i].Slots = append(cmd.val[i].Slots, SlotRange{Start: start, End: end})
+ }
+ case "nodes":
+ nodesLen, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val[i].Nodes = make([]Node, nodesLen)
+ for k := 0; k < nodesLen; k++ {
+ nodeMapLen, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ for l := 0; l < nodeMapLen; l++ {
+ nodeKey, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch nodeKey {
+ case "id":
+ cmd.val[i].Nodes[k].ID, err = rd.ReadString()
+ case "endpoint":
+ cmd.val[i].Nodes[k].Endpoint, err = rd.ReadString()
+ case "ip":
+ cmd.val[i].Nodes[k].IP, err = rd.ReadString()
+ case "hostname":
+ cmd.val[i].Nodes[k].Hostname, err = rd.ReadString()
+ case "port":
+ cmd.val[i].Nodes[k].Port, err = rd.ReadInt()
+ case "tls-port":
+ cmd.val[i].Nodes[k].TLSPort, err = rd.ReadInt()
+ case "role":
+ cmd.val[i].Nodes[k].Role, err = rd.ReadString()
+ case "replication-offset":
+ cmd.val[i].Nodes[k].ReplicationOffset, err = rd.ReadInt()
+ case "health":
+ cmd.val[i].Nodes[k].Health, err = rd.ReadString()
+ default:
+ return fmt.Errorf("redis: unexpected key %q in CLUSTER SHARDS node reply", nodeKey)
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+ }
+ default:
+ return fmt.Errorf("redis: unexpected key %q in CLUSTER SHARDS reply", key)
+ }
+ }
+ }
+
+ return nil
+}
+
+// -----------------------------------------
+
+type RankScore struct {
+ Rank int64
+ Score float64
+}
+
+type RankWithScoreCmd struct {
+ baseCmd
+
+ val RankScore
+}
+
+var _ Cmder = (*RankWithScoreCmd)(nil)
+
+func NewRankWithScoreCmd(ctx context.Context, args ...interface{}) *RankWithScoreCmd {
+ return &RankWithScoreCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *RankWithScoreCmd) SetVal(val RankScore) {
+ cmd.val = val
+}
+
+func (cmd *RankWithScoreCmd) Val() RankScore {
+ return cmd.val
+}
+
+func (cmd *RankWithScoreCmd) Result() (RankScore, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *RankWithScoreCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *RankWithScoreCmd) readReply(rd *proto.Reader) error {
+ if err := rd.ReadFixedArrayLen(2); err != nil {
+ return err
+ }
+
+ rank, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+
+ score, err := rd.ReadFloat()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = RankScore{Rank: rank, Score: score}
+
+ return nil
+}
+
+// --------------------------------------------------------------------------------------------------
+
+// ClientFlags is redis-server client flags, copy from redis/src/server.h (redis 7.0)
+type ClientFlags uint64
+
+const (
+ ClientSlave ClientFlags = 1 << 0 /* This client is a replica */
+ ClientMaster ClientFlags = 1 << 1 /* This client is a master */
+ ClientMonitor ClientFlags = 1 << 2 /* This client is a slave monitor, see MONITOR */
+ ClientMulti ClientFlags = 1 << 3 /* This client is in a MULTI context */
+ ClientBlocked ClientFlags = 1 << 4 /* The client is waiting in a blocking operation */
+ ClientDirtyCAS ClientFlags = 1 << 5 /* Watched keys modified. EXEC will fail. */
+ ClientCloseAfterReply ClientFlags = 1 << 6 /* Close after writing entire reply. */
+ ClientUnBlocked ClientFlags = 1 << 7 /* This client was unblocked and is stored in server.unblocked_clients */
+ ClientScript ClientFlags = 1 << 8 /* This is a non-connected client used by Lua */
+ ClientAsking ClientFlags = 1 << 9 /* Client issued the ASKING command */
+ ClientCloseASAP ClientFlags = 1 << 10 /* Close this client ASAP */
+ ClientUnixSocket ClientFlags = 1 << 11 /* Client connected via Unix domain socket */
+ ClientDirtyExec ClientFlags = 1 << 12 /* EXEC will fail for errors while queueing */
+ ClientMasterForceReply ClientFlags = 1 << 13 /* Queue replies even if is master */
+ ClientForceAOF ClientFlags = 1 << 14 /* Force AOF propagation of current cmd. */
+ ClientForceRepl ClientFlags = 1 << 15 /* Force replication of current cmd. */
+ ClientPrePSync ClientFlags = 1 << 16 /* Instance don't understand PSYNC. */
+ ClientReadOnly ClientFlags = 1 << 17 /* Cluster client is in read-only state. */
+ ClientPubSub ClientFlags = 1 << 18 /* Client is in Pub/Sub mode. */
+ ClientPreventAOFProp ClientFlags = 1 << 19 /* Don't propagate to AOF. */
+ ClientPreventReplProp ClientFlags = 1 << 20 /* Don't propagate to slaves. */
+ ClientPreventProp ClientFlags = ClientPreventAOFProp | ClientPreventReplProp
+ ClientPendingWrite ClientFlags = 1 << 21 /* Client has output to send but a-write handler is yet not installed. */
+ ClientReplyOff ClientFlags = 1 << 22 /* Don't send replies to client. */
+ ClientReplySkipNext ClientFlags = 1 << 23 /* Set ClientREPLY_SKIP for next cmd */
+ ClientReplySkip ClientFlags = 1 << 24 /* Don't send just this reply. */
+ ClientLuaDebug ClientFlags = 1 << 25 /* Run EVAL in debug mode. */
+ ClientLuaDebugSync ClientFlags = 1 << 26 /* EVAL debugging without fork() */
+ ClientModule ClientFlags = 1 << 27 /* Non connected client used by some module. */
+ ClientProtected ClientFlags = 1 << 28 /* Client should not be freed for now. */
+ ClientExecutingCommand ClientFlags = 1 << 29 /* Indicates that the client is currently in the process of handling
+ a command. usually this will be marked only during call()
+ however, blocked clients might have this flag kept until they
+ will try to reprocess the command. */
+ ClientPendingCommand ClientFlags = 1 << 30 /* Indicates the client has a fully * parsed command ready for execution. */
+ ClientTracking ClientFlags = 1 << 31 /* Client enabled keys tracking in order to perform client side caching. */
+ ClientTrackingBrokenRedir ClientFlags = 1 << 32 /* Target client is invalid. */
+ ClientTrackingBCAST ClientFlags = 1 << 33 /* Tracking in BCAST mode. */
+ ClientTrackingOptIn ClientFlags = 1 << 34 /* Tracking in opt-in mode. */
+ ClientTrackingOptOut ClientFlags = 1 << 35 /* Tracking in opt-out mode. */
+ ClientTrackingCaching ClientFlags = 1 << 36 /* CACHING yes/no was given, depending on optin/optout mode. */
+ ClientTrackingNoLoop ClientFlags = 1 << 37 /* Don't send invalidation messages about writes performed by myself.*/
+ ClientInTimeoutTable ClientFlags = 1 << 38 /* This client is in the timeout table. */
+ ClientProtocolError ClientFlags = 1 << 39 /* Protocol error chatting with it. */
+ ClientCloseAfterCommand ClientFlags = 1 << 40 /* Close after executing commands * and writing entire reply. */
+ ClientDenyBlocking ClientFlags = 1 << 41 /* Indicate that the client should not be blocked. currently, turned on inside MULTI, Lua, RM_Call, and AOF client */
+ ClientReplRDBOnly ClientFlags = 1 << 42 /* This client is a replica that only wants RDB without replication buffer. */
+ ClientNoEvict ClientFlags = 1 << 43 /* This client is protected against client memory eviction. */
+ ClientAllowOOM ClientFlags = 1 << 44 /* Client used by RM_Call is allowed to fully execute scripts even when in OOM */
+ ClientNoTouch ClientFlags = 1 << 45 /* This client will not touch LFU/LRU stats. */
+ ClientPushing ClientFlags = 1 << 46 /* This client is pushing notifications. */
+)
+
+// ClientInfo is redis-server ClientInfo, not go-redis *Client
+type ClientInfo struct {
+ ID int64 // redis version 2.8.12, a unique 64-bit client ID
+ Addr string // address/port of the client
+ LAddr string // address/port of local address client connected to (bind address)
+ FD int64 // file descriptor corresponding to the socket
+ Name string // the name set by the client with CLIENT SETNAME
+ Age time.Duration // total duration of the connection in seconds
+ Idle time.Duration // idle time of the connection in seconds
+ Flags ClientFlags // client flags (see below)
+ DB int // current database ID
+ Sub int // number of channel subscriptions
+ PSub int // number of pattern matching subscriptions
+ SSub int // redis version 7.0.3, number of shard channel subscriptions
+ Multi int // number of commands in a MULTI/EXEC context
+ QueryBuf int // qbuf, query buffer length (0 means no query pending)
+ QueryBufFree int // qbuf-free, free space of the query buffer (0 means the buffer is full)
+ ArgvMem int // incomplete arguments for the next command (already extracted from query buffer)
+ MultiMem int // redis version 7.0, memory is used up by buffered multi commands
+ BufferSize int // rbs, usable size of buffer
+ BufferPeak int // rbp, peak used size of buffer in last 5 sec interval
+ OutputBufferLength int // obl, output buffer length
+ OutputListLength int // oll, output list length (replies are queued in this list when the buffer is full)
+ OutputMemory int // omem, output buffer memory usage
+ TotalMemory int // tot-mem, total memory consumed by this client in its various buffers
+ Events string // file descriptor events (see below)
+ LastCmd string // cmd, last command played
+ User string // the authenticated username of the client
+ Redir int64 // client id of current client tracking redirection
+ Resp int // redis version 7.0, client RESP protocol version
+ LibName string // redis version 7.2, client library name
+ LibVer string // redis version 7.2, client library version
+}
+
+type ClientInfoCmd struct {
+ baseCmd
+
+ val *ClientInfo
+}
+
+var _ Cmder = (*ClientInfoCmd)(nil)
+
+func NewClientInfoCmd(ctx context.Context, args ...interface{}) *ClientInfoCmd {
+ return &ClientInfoCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *ClientInfoCmd) SetVal(val *ClientInfo) {
+ cmd.val = val
+}
+
+func (cmd *ClientInfoCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ClientInfoCmd) Val() *ClientInfo {
+ return cmd.val
+}
+
+func (cmd *ClientInfoCmd) Result() (*ClientInfo, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *ClientInfoCmd) readReply(rd *proto.Reader) (err error) {
+ txt, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ // sds o = catClientInfoString(sdsempty(), c);
+ // o = sdscatlen(o,"\n",1);
+ // addReplyVerbatim(c,o,sdslen(o),"txt");
+ // sdsfree(o);
+ cmd.val, err = parseClientInfo(strings.TrimSpace(txt))
+ return err
+}
+
+// fmt.Sscanf() cannot handle null values
+func parseClientInfo(txt string) (info *ClientInfo, err error) {
+ info = &ClientInfo{}
+ for _, s := range strings.Split(txt, " ") {
+ kv := strings.Split(s, "=")
+ if len(kv) != 2 {
+ return nil, fmt.Errorf("redis: unexpected client info data (%s)", s)
+ }
+ key, val := kv[0], kv[1]
+
+ switch key {
+ case "id":
+ info.ID, err = strconv.ParseInt(val, 10, 64)
+ case "addr":
+ info.Addr = val
+ case "laddr":
+ info.LAddr = val
+ case "fd":
+ info.FD, err = strconv.ParseInt(val, 10, 64)
+ case "name":
+ info.Name = val
+ case "age":
+ var age int
+ if age, err = strconv.Atoi(val); err == nil {
+ info.Age = time.Duration(age) * time.Second
+ }
+ case "idle":
+ var idle int
+ if idle, err = strconv.Atoi(val); err == nil {
+ info.Idle = time.Duration(idle) * time.Second
+ }
+ case "flags":
+ if val == "N" {
+ break
+ }
+
+ for i := 0; i < len(val); i++ {
+ switch val[i] {
+ case 'S':
+ info.Flags |= ClientSlave
+ case 'O':
+ info.Flags |= ClientSlave | ClientMonitor
+ case 'M':
+ info.Flags |= ClientMaster
+ case 'P':
+ info.Flags |= ClientPubSub
+ case 'x':
+ info.Flags |= ClientMulti
+ case 'b':
+ info.Flags |= ClientBlocked
+ case 't':
+ info.Flags |= ClientTracking
+ case 'R':
+ info.Flags |= ClientTrackingBrokenRedir
+ case 'B':
+ info.Flags |= ClientTrackingBCAST
+ case 'd':
+ info.Flags |= ClientDirtyCAS
+ case 'c':
+ info.Flags |= ClientCloseAfterCommand
+ case 'u':
+ info.Flags |= ClientUnBlocked
+ case 'A':
+ info.Flags |= ClientCloseASAP
+ case 'U':
+ info.Flags |= ClientUnixSocket
+ case 'r':
+ info.Flags |= ClientReadOnly
+ case 'e':
+ info.Flags |= ClientNoEvict
+ case 'T':
+ info.Flags |= ClientNoTouch
+ default:
+ return nil, fmt.Errorf("redis: unexpected client info flags(%s)", string(val[i]))
+ }
+ }
+ case "db":
+ info.DB, err = strconv.Atoi(val)
+ case "sub":
+ info.Sub, err = strconv.Atoi(val)
+ case "psub":
+ info.PSub, err = strconv.Atoi(val)
+ case "ssub":
+ info.SSub, err = strconv.Atoi(val)
+ case "multi":
+ info.Multi, err = strconv.Atoi(val)
+ case "qbuf":
+ info.QueryBuf, err = strconv.Atoi(val)
+ case "qbuf-free":
+ info.QueryBufFree, err = strconv.Atoi(val)
+ case "argv-mem":
+ info.ArgvMem, err = strconv.Atoi(val)
+ case "multi-mem":
+ info.MultiMem, err = strconv.Atoi(val)
+ case "rbs":
+ info.BufferSize, err = strconv.Atoi(val)
+ case "rbp":
+ info.BufferPeak, err = strconv.Atoi(val)
+ case "obl":
+ info.OutputBufferLength, err = strconv.Atoi(val)
+ case "oll":
+ info.OutputListLength, err = strconv.Atoi(val)
+ case "omem":
+ info.OutputMemory, err = strconv.Atoi(val)
+ case "tot-mem":
+ info.TotalMemory, err = strconv.Atoi(val)
+ case "events":
+ info.Events = val
+ case "cmd":
+ info.LastCmd = val
+ case "user":
+ info.User = val
+ case "redir":
+ info.Redir, err = strconv.ParseInt(val, 10, 64)
+ case "resp":
+ info.Resp, err = strconv.Atoi(val)
+ case "lib-name":
+ info.LibName = val
+ case "lib-ver":
+ info.LibVer = val
+ default:
+ return nil, fmt.Errorf("redis: unexpected client info key(%s)", key)
+ }
+
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return info, nil
+}
+
+// -------------------------------------------
+
+type ACLLogEntry struct {
+ Count int64
+ Reason string
+ Context string
+ Object string
+ Username string
+ AgeSeconds float64
+ ClientInfo *ClientInfo
+ EntryID int64
+ TimestampCreated int64
+ TimestampLastUpdated int64
+}
+
+type ACLLogCmd struct {
+ baseCmd
+
+ val []*ACLLogEntry
+}
+
+var _ Cmder = (*ACLLogCmd)(nil)
+
+func NewACLLogCmd(ctx context.Context, args ...interface{}) *ACLLogCmd {
+ return &ACLLogCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *ACLLogCmd) SetVal(val []*ACLLogEntry) {
+ cmd.val = val
+}
+
+func (cmd *ACLLogCmd) Val() []*ACLLogEntry {
+ return cmd.val
+}
+
+func (cmd *ACLLogCmd) Result() ([]*ACLLogEntry, error) {
+ return cmd.Val(), cmd.Err()
+}
+
+func (cmd *ACLLogCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ACLLogCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = make([]*ACLLogEntry, n)
+ for i := 0; i < n; i++ {
+ cmd.val[i] = &ACLLogEntry{}
+ entry := cmd.val[i]
+ respLen, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+ for j := 0; j < respLen; j++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "count":
+ entry.Count, err = rd.ReadInt()
+ case "reason":
+ entry.Reason, err = rd.ReadString()
+ case "context":
+ entry.Context, err = rd.ReadString()
+ case "object":
+ entry.Object, err = rd.ReadString()
+ case "username":
+ entry.Username, err = rd.ReadString()
+ case "age-seconds":
+ entry.AgeSeconds, err = rd.ReadFloat()
+ case "client-info":
+ txt, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ entry.ClientInfo, err = parseClientInfo(strings.TrimSpace(txt))
+ if err != nil {
+ return err
+ }
+ case "entry-id":
+ entry.EntryID, err = rd.ReadInt()
+ case "timestamp-created":
+ entry.TimestampCreated, err = rd.ReadInt()
+ case "timestamp-last-updated":
+ entry.TimestampLastUpdated, err = rd.ReadInt()
+ default:
+ return fmt.Errorf("redis: unexpected key %q in ACL LOG reply", key)
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/go-redis/redis/v8/commands.go b/vendor/github.com/redis/go-redis/v9/commands.go
similarity index 79%
rename from vendor/github.com/go-redis/redis/v8/commands.go
rename to vendor/github.com/redis/go-redis/v9/commands.go
index bbfe089df..07c8e2c88 100644
--- a/vendor/github.com/go-redis/redis/v8/commands.go
+++ b/vendor/github.com/redis/go-redis/v9/commands.go
@@ -2,18 +2,22 @@ package redis
import (
"context"
+ "encoding"
"errors"
"io"
+ "net"
+ "reflect"
+ "strings"
"time"
- "github.com/go-redis/redis/v8/internal"
+ "github.com/redis/go-redis/v9/internal"
)
// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0,
// otherwise you will receive an error: (error) ERR syntax error.
// For example:
//
-// rdb.Set(ctx, key, value, redis.KeepTTL)
+// rdb.Set(ctx, key, value, redis.KeepTTL)
const KeepTTL = -1
func usePrecise(dur time.Duration) bool {
@@ -73,11 +77,84 @@ func appendArg(dst []interface{}, arg interface{}) []interface{} {
dst = append(dst, k, v)
}
return dst
+ case time.Time, time.Duration, encoding.BinaryMarshaler, net.IP:
+ return append(dst, arg)
default:
+ // scan struct field
+ v := reflect.ValueOf(arg)
+ if v.Type().Kind() == reflect.Ptr {
+ if v.IsNil() {
+ // error: arg is not a valid object
+ return dst
+ }
+ v = v.Elem()
+ }
+
+ if v.Type().Kind() == reflect.Struct {
+ return appendStructField(dst, v)
+ }
+
return append(dst, arg)
}
}
+// appendStructField appends the field and value held by the structure v to dst, and returns the appended dst.
+func appendStructField(dst []interface{}, v reflect.Value) []interface{} {
+ typ := v.Type()
+ for i := 0; i < typ.NumField(); i++ {
+ tag := typ.Field(i).Tag.Get("redis")
+ if tag == "" || tag == "-" {
+ continue
+ }
+ name, opt, _ := strings.Cut(tag, ",")
+ if name == "" {
+ continue
+ }
+
+ field := v.Field(i)
+
+ // miss field
+ if omitEmpty(opt) && isEmptyValue(field) {
+ continue
+ }
+
+ if field.CanInterface() {
+ dst = append(dst, name, field.Interface())
+ }
+ }
+
+ return dst
+}
+
+func omitEmpty(opt string) bool {
+ for opt != "" {
+ var name string
+ name, opt, _ = strings.Cut(opt, ",")
+ if name == "omitempty" {
+ return true
+ }
+ }
+ return false
+}
+
+func isEmptyValue(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+ return v.Len() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Interface, reflect.Pointer:
+ return v.IsNil()
+ }
+ return false
+}
+
type Cmdable interface {
Pipeline() Pipeliner
Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error)
@@ -86,6 +163,9 @@ type Cmdable interface {
TxPipeline() Pipeliner
Command(ctx context.Context) *CommandsInfoCmd
+ CommandList(ctx context.Context, filter *FilterBy) *StringSliceCmd
+ CommandGetKeys(ctx context.Context, commands ...interface{}) *StringSliceCmd
+ CommandGetKeysAndFlags(ctx context.Context, commands ...interface{}) *KeyFlagsCmd
ClientGetName(ctx context.Context) *StringCmd
Echo(ctx context.Context, message interface{}) *StringCmd
Ping(ctx context.Context) *StatusCmd
@@ -96,6 +176,7 @@ type Cmdable interface {
Exists(ctx context.Context, keys ...string) *IntCmd
Expire(ctx context.Context, key string, expiration time.Duration) *BoolCmd
ExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd
+ ExpireTime(ctx context.Context, key string) *DurationCmd
ExpireNX(ctx context.Context, key string, expiration time.Duration) *BoolCmd
ExpireXX(ctx context.Context, key string, expiration time.Duration) *BoolCmd
ExpireGT(ctx context.Context, key string, expiration time.Duration) *BoolCmd
@@ -109,6 +190,7 @@ type Cmdable interface {
Persist(ctx context.Context, key string) *BoolCmd
PExpire(ctx context.Context, key string, expiration time.Duration) *BoolCmd
PExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd
+ PExpireTime(ctx context.Context, key string) *DurationCmd
PTTL(ctx context.Context, key string) *DurationCmd
RandomKey(ctx context.Context) *StringCmd
Rename(ctx context.Context, key, newkey string) *StatusCmd
@@ -116,6 +198,7 @@ type Cmdable interface {
Restore(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd
RestoreReplace(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd
Sort(ctx context.Context, key string, sort *Sort) *StringSliceCmd
+ SortRO(ctx context.Context, key string, sort *Sort) *StringSliceCmd
SortStore(ctx context.Context, key, store string, sort *Sort) *IntCmd
SortInterfaces(ctx context.Context, key string, sort *Sort) *SliceCmd
Touch(ctx context.Context, keys ...string) *IntCmd
@@ -137,8 +220,7 @@ type Cmdable interface {
MSetNX(ctx context.Context, values ...interface{}) *BoolCmd
Set(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd
SetArgs(ctx context.Context, key string, value interface{}, a SetArgs) *StatusCmd
- // TODO: rename to SetEx
- SetEX(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd
+ SetEx(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd
SetNX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd
SetXX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd
SetRange(ctx context.Context, key string, offset int64, value string) *IntCmd
@@ -153,6 +235,7 @@ type Cmdable interface {
BitOpXor(ctx context.Context, destKey string, keys ...string) *IntCmd
BitOpNot(ctx context.Context, destKey string, key string) *IntCmd
BitPos(ctx context.Context, key string, bit int64, pos ...int64) *IntCmd
+ BitPosSpan(ctx context.Context, key string, bit int8, start, end int64, span string) *IntCmd
BitField(ctx context.Context, key string, args ...interface{}) *IntSliceCmd
Scan(ctx context.Context, cursor uint64, match string, count int64) *ScanCmd
@@ -164,7 +247,7 @@ type Cmdable interface {
HDel(ctx context.Context, key string, fields ...string) *IntCmd
HExists(ctx context.Context, key, field string) *BoolCmd
HGet(ctx context.Context, key, field string) *StringCmd
- HGetAll(ctx context.Context, key string) *StringStringMapCmd
+ HGetAll(ctx context.Context, key string) *MapStringStringCmd
HIncrBy(ctx context.Context, key, field string, incr int64) *IntCmd
HIncrByFloat(ctx context.Context, key, field string, incr float64) *FloatCmd
HKeys(ctx context.Context, key string) *StringSliceCmd
@@ -174,16 +257,20 @@ type Cmdable interface {
HMSet(ctx context.Context, key string, values ...interface{}) *BoolCmd
HSetNX(ctx context.Context, key, field string, value interface{}) *BoolCmd
HVals(ctx context.Context, key string) *StringSliceCmd
- HRandField(ctx context.Context, key string, count int, withValues bool) *StringSliceCmd
+ HRandField(ctx context.Context, key string, count int) *StringSliceCmd
+ HRandFieldWithValues(ctx context.Context, key string, count int) *KeyValueSliceCmd
BLPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd
+ BLMPop(ctx context.Context, timeout time.Duration, direction string, count int64, keys ...string) *KeyValuesCmd
BRPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd
BRPopLPush(ctx context.Context, source, destination string, timeout time.Duration) *StringCmd
+ LCS(ctx context.Context, q *LCSQuery) *LCSCmd
LIndex(ctx context.Context, key string, index int64) *StringCmd
LInsert(ctx context.Context, key, op string, pivot, value interface{}) *IntCmd
LInsertBefore(ctx context.Context, key string, pivot, value interface{}) *IntCmd
LInsertAfter(ctx context.Context, key string, pivot, value interface{}) *IntCmd
LLen(ctx context.Context, key string) *IntCmd
+ LMPop(ctx context.Context, direction string, count int64, keys ...string) *KeyValuesCmd
LPop(ctx context.Context, key string) *StringCmd
LPopCount(ctx context.Context, key string, count int) *StringSliceCmd
LPos(ctx context.Context, key string, value string, args LPosArgs) *IntCmd
@@ -207,6 +294,7 @@ type Cmdable interface {
SDiff(ctx context.Context, keys ...string) *StringSliceCmd
SDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd
SInter(ctx context.Context, keys ...string) *StringSliceCmd
+ SInterCard(ctx context.Context, limit int64, keys ...string) *IntCmd
SInterStore(ctx context.Context, destination string, keys ...string) *IntCmd
SIsMember(ctx context.Context, key string, member interface{}) *BoolCmd
SMIsMember(ctx context.Context, key string, members ...interface{}) *BoolSliceCmd
@@ -244,10 +332,6 @@ type Cmdable interface {
XClaimJustID(ctx context.Context, a *XClaimArgs) *StringSliceCmd
XAutoClaim(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimCmd
XAutoClaimJustID(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimJustIDCmd
-
- // TODO: XTrim and XTrimApprox remove in v9.
- XTrim(ctx context.Context, key string, maxLen int64) *IntCmd
- XTrimApprox(ctx context.Context, key string, maxLen int64) *IntCmd
XTrimMaxLen(ctx context.Context, key string, maxLen int64) *IntCmd
XTrimMaxLenApprox(ctx context.Context, key string, maxLen, limit int64) *IntCmd
XTrimMinID(ctx context.Context, key string, minID string) *IntCmd
@@ -259,35 +343,24 @@ type Cmdable interface {
BZPopMax(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd
BZPopMin(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd
+ BZMPop(ctx context.Context, timeout time.Duration, order string, count int64, keys ...string) *ZSliceWithKeyCmd
- // TODO: remove
- // ZAddCh
- // ZIncr
- // ZAddNXCh
- // ZAddXXCh
- // ZIncrNX
- // ZIncrXX
- // in v9.
- // use ZAddArgs and ZAddArgsIncr.
-
- ZAdd(ctx context.Context, key string, members ...*Z) *IntCmd
- ZAddNX(ctx context.Context, key string, members ...*Z) *IntCmd
- ZAddXX(ctx context.Context, key string, members ...*Z) *IntCmd
- ZAddCh(ctx context.Context, key string, members ...*Z) *IntCmd
- ZAddNXCh(ctx context.Context, key string, members ...*Z) *IntCmd
- ZAddXXCh(ctx context.Context, key string, members ...*Z) *IntCmd
+ ZAdd(ctx context.Context, key string, members ...Z) *IntCmd
+ ZAddLT(ctx context.Context, key string, members ...Z) *IntCmd
+ ZAddGT(ctx context.Context, key string, members ...Z) *IntCmd
+ ZAddNX(ctx context.Context, key string, members ...Z) *IntCmd
+ ZAddXX(ctx context.Context, key string, members ...Z) *IntCmd
ZAddArgs(ctx context.Context, key string, args ZAddArgs) *IntCmd
ZAddArgsIncr(ctx context.Context, key string, args ZAddArgs) *FloatCmd
- ZIncr(ctx context.Context, key string, member *Z) *FloatCmd
- ZIncrNX(ctx context.Context, key string, member *Z) *FloatCmd
- ZIncrXX(ctx context.Context, key string, member *Z) *FloatCmd
ZCard(ctx context.Context, key string) *IntCmd
ZCount(ctx context.Context, key, min, max string) *IntCmd
ZLexCount(ctx context.Context, key, min, max string) *IntCmd
ZIncrBy(ctx context.Context, key string, increment float64, member string) *FloatCmd
ZInter(ctx context.Context, store *ZStore) *StringSliceCmd
ZInterWithScores(ctx context.Context, store *ZStore) *ZSliceCmd
+ ZInterCard(ctx context.Context, limit int64, keys ...string) *IntCmd
ZInterStore(ctx context.Context, destination string, store *ZStore) *IntCmd
+ ZMPop(ctx context.Context, order string, count int64, keys ...string) *ZSliceWithKeyCmd
ZMScore(ctx context.Context, key string, members ...string) *FloatSliceCmd
ZPopMax(ctx context.Context, key string, count ...int64) *ZSliceCmd
ZPopMin(ctx context.Context, key string, count ...int64) *ZSliceCmd
@@ -300,6 +373,7 @@ type Cmdable interface {
ZRangeArgsWithScores(ctx context.Context, z ZRangeArgs) *ZSliceCmd
ZRangeStore(ctx context.Context, dst string, z ZRangeArgs) *IntCmd
ZRank(ctx context.Context, key, member string) *IntCmd
+ ZRankWithScore(ctx context.Context, key, member string) *RankWithScoreCmd
ZRem(ctx context.Context, key string, members ...interface{}) *IntCmd
ZRemRangeByRank(ctx context.Context, key string, start, stop int64) *IntCmd
ZRemRangeByScore(ctx context.Context, key, min, max string) *IntCmd
@@ -310,11 +384,13 @@ type Cmdable interface {
ZRevRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd
ZRevRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd
ZRevRank(ctx context.Context, key, member string) *IntCmd
+ ZRevRankWithScore(ctx context.Context, key, member string) *RankWithScoreCmd
ZScore(ctx context.Context, key, member string) *FloatCmd
ZUnionStore(ctx context.Context, dest string, store *ZStore) *IntCmd
+ ZRandMember(ctx context.Context, key string, count int) *StringSliceCmd
+ ZRandMemberWithScores(ctx context.Context, key string, count int) *ZSliceCmd
ZUnion(ctx context.Context, store ZStore) *StringSliceCmd
ZUnionWithScores(ctx context.Context, store ZStore) *ZSliceCmd
- ZRandMember(ctx context.Context, key string, count int, withScores bool) *StringSliceCmd
ZDiff(ctx context.Context, keys ...string) *StringSliceCmd
ZDiffWithScores(ctx context.Context, keys ...string) *ZSliceCmd
ZDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd
@@ -328,9 +404,13 @@ type Cmdable interface {
ClientKill(ctx context.Context, ipPort string) *StatusCmd
ClientKillByFilter(ctx context.Context, keys ...string) *IntCmd
ClientList(ctx context.Context) *StringCmd
+ ClientInfo(ctx context.Context) *ClientInfoCmd
ClientPause(ctx context.Context, dur time.Duration) *BoolCmd
+ ClientUnpause(ctx context.Context) *BoolCmd
ClientID(ctx context.Context) *IntCmd
- ConfigGet(ctx context.Context, parameter string) *SliceCmd
+ ClientUnblock(ctx context.Context, id int64) *IntCmd
+ ClientUnblockWithError(ctx context.Context, id int64) *IntCmd
+ ConfigGet(ctx context.Context, parameter string) *MapStringStringCmd
ConfigResetStat(ctx context.Context) *StatusCmd
ConfigSet(ctx context.Context, parameter, value string) *StatusCmd
ConfigRewrite(ctx context.Context) *StatusCmd
@@ -346,6 +426,7 @@ type Cmdable interface {
ShutdownSave(ctx context.Context) *StatusCmd
ShutdownNoSave(ctx context.Context) *StatusCmd
SlaveOf(ctx context.Context, host, port string) *StatusCmd
+ SlowLogGet(ctx context.Context, num int64) *SlowLogCmd
Time(ctx context.Context) *TimeCmd
DebugObject(ctx context.Context, key string) *StringCmd
ReadOnly(ctx context.Context) *StatusCmd
@@ -354,17 +435,39 @@ type Cmdable interface {
Eval(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd
EvalSha(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd
+ EvalRO(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd
+ EvalShaRO(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd
ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd
ScriptFlush(ctx context.Context) *StatusCmd
ScriptKill(ctx context.Context) *StatusCmd
ScriptLoad(ctx context.Context, script string) *StringCmd
- Publish(ctx context.Context, channel string, message interface{}) *IntCmd
- PubSubChannels(ctx context.Context, pattern string) *StringSliceCmd
- PubSubNumSub(ctx context.Context, channels ...string) *StringIntMapCmd
- PubSubNumPat(ctx context.Context) *IntCmd
+ FunctionLoad(ctx context.Context, code string) *StringCmd
+ FunctionLoadReplace(ctx context.Context, code string) *StringCmd
+ FunctionDelete(ctx context.Context, libName string) *StringCmd
+ FunctionFlush(ctx context.Context) *StringCmd
+ FunctionKill(ctx context.Context) *StringCmd
+ FunctionFlushAsync(ctx context.Context) *StringCmd
+ FunctionList(ctx context.Context, q FunctionListQuery) *FunctionListCmd
+ FunctionDump(ctx context.Context) *StringCmd
+ FunctionRestore(ctx context.Context, libDump string) *StringCmd
+ FunctionStats(ctx context.Context) *FunctionStatsCmd
+ FCall(ctx context.Context, function string, keys []string, args ...interface{}) *Cmd
+ FCallRo(ctx context.Context, function string, keys []string, args ...interface{}) *Cmd
+ FCallRO(ctx context.Context, function string, keys []string, args ...interface{}) *Cmd
+ Publish(ctx context.Context, channel string, message interface{}) *IntCmd
+ SPublish(ctx context.Context, channel string, message interface{}) *IntCmd
+ PubSubChannels(ctx context.Context, pattern string) *StringSliceCmd
+ PubSubNumSub(ctx context.Context, channels ...string) *MapStringIntCmd
+ PubSubNumPat(ctx context.Context) *IntCmd
+ PubSubShardChannels(ctx context.Context, pattern string) *StringSliceCmd
+ PubSubShardNumSub(ctx context.Context, channels ...string) *MapStringIntCmd
+
+ ClusterMyShardID(ctx context.Context) *StringCmd
ClusterSlots(ctx context.Context) *ClusterSlotsCmd
+ ClusterShards(ctx context.Context) *ClusterShardsCmd
+ ClusterLinks(ctx context.Context) *ClusterLinksCmd
ClusterNodes(ctx context.Context) *StringCmd
ClusterMeet(ctx context.Context, host, port string) *StatusCmd
ClusterForget(ctx context.Context, nodeID string) *StatusCmd
@@ -395,6 +498,15 @@ type Cmdable interface {
GeoSearchStore(ctx context.Context, key, store string, q *GeoSearchStoreQuery) *IntCmd
GeoDist(ctx context.Context, key string, member1, member2, unit string) *FloatCmd
GeoHash(ctx context.Context, key string, members ...string) *StringSliceCmd
+
+ ACLDryRun(ctx context.Context, username string, command ...interface{}) *StringCmd
+ ACLLog(ctx context.Context, count int64) *ACLLogCmd
+ ACLLogReset(ctx context.Context) *StatusCmd
+
+ ModuleLoadex(ctx context.Context, conf *ModuleLoadexConfig) *StringCmd
+
+ gearsCmdable
+ probabilisticCmdable
}
type StatefulCmdable interface {
@@ -404,6 +516,7 @@ type StatefulCmdable interface {
Select(ctx context.Context, index int) *StatusCmd
SwapDB(ctx context.Context, index1, index2 int) *StatusCmd
ClientSetName(ctx context.Context, name string) *BoolCmd
+ Hello(ctx context.Context, ver int, username, password, clientName string) *MapStringInterfaceCmd
}
var (
@@ -460,6 +573,26 @@ func (c statefulCmdable) ClientSetName(ctx context.Context, name string) *BoolCm
return cmd
}
+// Hello Set the resp protocol used.
+func (c statefulCmdable) Hello(ctx context.Context,
+ ver int, username, password, clientName string) *MapStringInterfaceCmd {
+ args := make([]interface{}, 0, 7)
+ args = append(args, "hello", ver)
+ if password != "" {
+ if username != "" {
+ args = append(args, "auth", username, password)
+ } else {
+ args = append(args, "auth", "default", password)
+ }
+ }
+ if clientName != "" {
+ args = append(args, "setname", clientName)
+ }
+ cmd := NewMapStringInterfaceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
//------------------------------------------------------------------------------
func (c cmdable) Command(ctx context.Context) *CommandsInfoCmd {
@@ -468,6 +601,50 @@ func (c cmdable) Command(ctx context.Context) *CommandsInfoCmd {
return cmd
}
+// FilterBy is used for the `CommandList` command parameter.
+type FilterBy struct {
+ Module string
+ ACLCat string
+ Pattern string
+}
+
+func (c cmdable) CommandList(ctx context.Context, filter *FilterBy) *StringSliceCmd {
+ args := make([]interface{}, 0, 5)
+ args = append(args, "command", "list")
+ if filter != nil {
+ if filter.Module != "" {
+ args = append(args, "filterby", "module", filter.Module)
+ } else if filter.ACLCat != "" {
+ args = append(args, "filterby", "aclcat", filter.ACLCat)
+ } else if filter.Pattern != "" {
+ args = append(args, "filterby", "pattern", filter.Pattern)
+ }
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) CommandGetKeys(ctx context.Context, commands ...interface{}) *StringSliceCmd {
+ args := make([]interface{}, 2+len(commands))
+ args[0] = "command"
+ args[1] = "getkeys"
+ copy(args[2:], commands)
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) CommandGetKeysAndFlags(ctx context.Context, commands ...interface{}) *KeyFlagsCmd {
+ args := make([]interface{}, 2+len(commands))
+ args[0] = "command"
+ args[1] = "getkeysandflags"
+ copy(args[2:], commands)
+ cmd := NewKeyFlagsCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
// ClientGetName returns the name of the connection.
func (c cmdable) ClientGetName(ctx context.Context) *StringCmd {
cmd := NewStringCmd(ctx, "client", "getname")
@@ -572,6 +749,12 @@ func (c cmdable) ExpireAt(ctx context.Context, key string, tm time.Time) *BoolCm
return cmd
}
+func (c cmdable) ExpireTime(ctx context.Context, key string) *DurationCmd {
+ cmd := NewDurationCmd(ctx, time.Second, "expiretime", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
func (c cmdable) Keys(ctx context.Context, pattern string) *StringSliceCmd {
cmd := NewStringSliceCmd(ctx, "keys", pattern)
_ = c(ctx, cmd)
@@ -640,6 +823,12 @@ func (c cmdable) PExpireAt(ctx context.Context, key string, tm time.Time) *BoolC
return cmd
}
+func (c cmdable) PExpireTime(ctx context.Context, key string) *DurationCmd {
+ cmd := NewDurationCmd(ctx, time.Millisecond, "pexpiretime", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
func (c cmdable) PTTL(ctx context.Context, key string) *DurationCmd {
cmd := NewDurationCmd(ctx, time.Millisecond, "pttl", key)
_ = c(ctx, cmd)
@@ -697,8 +886,9 @@ type Sort struct {
Alpha bool
}
-func (sort *Sort) args(key string) []interface{} {
- args := []interface{}{"sort", key}
+func (sort *Sort) args(command, key string) []interface{} {
+ args := []interface{}{command, key}
+
if sort.By != "" {
args = append(args, "by", sort.By)
}
@@ -717,14 +907,20 @@ func (sort *Sort) args(key string) []interface{} {
return args
}
+func (c cmdable) SortRO(ctx context.Context, key string, sort *Sort) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, sort.args("sort_ro", key)...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
func (c cmdable) Sort(ctx context.Context, key string, sort *Sort) *StringSliceCmd {
- cmd := NewStringSliceCmd(ctx, sort.args(key)...)
+ cmd := NewStringSliceCmd(ctx, sort.args("sort", key)...)
_ = c(ctx, cmd)
return cmd
}
func (c cmdable) SortStore(ctx context.Context, key, store string, sort *Sort) *IntCmd {
- args := sort.args(key)
+ args := sort.args("sort", key)
if store != "" {
args = append(args, "store", store)
}
@@ -734,7 +930,7 @@ func (c cmdable) SortStore(ctx context.Context, key, store string, sort *Sort) *
}
func (c cmdable) SortInterfaces(ctx context.Context, key string, sort *Sort) *SliceCmd {
- cmd := NewSliceCmd(ctx, sort.args(key)...)
+ cmd := NewSliceCmd(ctx, sort.args("sort", key)...)
_ = c(ctx, cmd)
return cmd
}
@@ -859,6 +1055,7 @@ func (c cmdable) MGet(ctx context.Context, keys ...string) *SliceCmd {
// - MSet("key1", "value1", "key2", "value2")
// - MSet([]string{"key1", "value1", "key2", "value2"})
// - MSet(map[string]interface{}{"key1": "value1", "key2": "value2"})
+// - MSet(struct), For struct types, see HSet description.
func (c cmdable) MSet(ctx context.Context, values ...interface{}) *StatusCmd {
args := make([]interface{}, 1, 1+len(values))
args[0] = "mset"
@@ -872,6 +1069,7 @@ func (c cmdable) MSet(ctx context.Context, values ...interface{}) *StatusCmd {
// - MSetNX("key1", "value1", "key2", "value2")
// - MSetNX([]string{"key1", "value1", "key2", "value2"})
// - MSetNX(map[string]interface{}{"key1": "value1", "key2": "value2"})
+// - MSetNX(struct), For struct types, see HSet description.
func (c cmdable) MSetNX(ctx context.Context, values ...interface{}) *BoolCmd {
args := make([]interface{}, 1, 1+len(values))
args[0] = "msetnx"
@@ -882,7 +1080,7 @@ func (c cmdable) MSetNX(ctx context.Context, values ...interface{}) *BoolCmd {
}
// Set Redis `SET key value [expiration]` command.
-// Use expiration for `SETEX`-like behavior.
+// Use expiration for `SETEx`-like behavior.
//
// Zero expiration means the key has no expiration time.
// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0,
@@ -958,8 +1156,8 @@ func (c cmdable) SetArgs(ctx context.Context, key string, value interface{}, a S
return cmd
}
-// SetEX Redis `SETEX key expiration value` command.
-func (c cmdable) SetEX(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd {
+// SetEx Redis `SETEx key expiration value` command.
+func (c cmdable) SetEx(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd {
cmd := NewStatusCmd(ctx, "setex", key, formatSec(ctx, expiration), value)
_ = c(ctx, cmd)
return cmd
@@ -1103,6 +1301,8 @@ func (c cmdable) BitOpNot(ctx context.Context, destKey string, key string) *IntC
return c.bitOp(ctx, "not", destKey, key)
}
+// BitPos is an API before Redis version 7.0, cmd: bitpos key bit start end
+// if you need the `byte | bit` parameter, please use `BitPosSpan`.
func (c cmdable) BitPos(ctx context.Context, key string, bit int64, pos ...int64) *IntCmd {
args := make([]interface{}, 3+len(pos))
args[0] = "bitpos"
@@ -1123,6 +1323,18 @@ func (c cmdable) BitPos(ctx context.Context, key string, bit int64, pos ...int64
return cmd
}
+// BitPosSpan supports the `byte | bit` parameters in redis version 7.0,
+// the bitpos command defaults to using byte type for the `start-end` range,
+// which means it counts in bytes from start to end. you can set the value
+// of "span" to determine the type of `start-end`.
+// span = "bit", cmd: bitpos key bit start end bit
+// span = "byte", cmd: bitpos key bit start end byte
+func (c cmdable) BitPosSpan(ctx context.Context, key string, bit int8, start, end int64, span string) *IntCmd {
+ cmd := NewIntCmd(ctx, "bitpos", key, bit, start, end, span)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
func (c cmdable) BitField(ctx context.Context, key string, args ...interface{}) *IntSliceCmd {
a := make([]interface{}, 0, 2+len(args))
a = append(a, "bitfield")
@@ -1229,8 +1441,8 @@ func (c cmdable) HGet(ctx context.Context, key, field string) *StringCmd {
return cmd
}
-func (c cmdable) HGetAll(ctx context.Context, key string) *StringStringMapCmd {
- cmd := NewStringStringMapCmd(ctx, "hgetall", key)
+func (c cmdable) HGetAll(ctx context.Context, key string) *MapStringStringCmd {
+ cmd := NewMapStringStringCmd(ctx, "hgetall", key)
_ = c(ctx, cmd)
return cmd
}
@@ -1274,11 +1486,29 @@ func (c cmdable) HMGet(ctx context.Context, key string, fields ...string) *Slice
}
// HSet accepts values in following formats:
+//
// - HSet("myhash", "key1", "value1", "key2", "value2")
+//
// - HSet("myhash", []string{"key1", "value1", "key2", "value2"})
+//
// - HSet("myhash", map[string]interface{}{"key1": "value1", "key2": "value2"})
//
-// Note that it requires Redis v4 for multiple field/value pairs support.
+// Playing struct With "redis" tag.
+// type MyHash struct { Key1 string `redis:"key1"`; Key2 int `redis:"key2"` }
+//
+// - HSet("myhash", MyHash{"value1", "value2"}) Warn: redis-server >= 4.0
+//
+// For struct, can be a structure pointer type, we only parse the field whose tag is redis.
+// if you don't want the field to be read, you can use the `redis:"-"` flag to ignore it,
+// or you don't need to set the redis tag.
+// For the type of structure field, we only support simple data types:
+// string, int/uint(8,16,32,64), float(32,64), time.Time(to RFC3339Nano), time.Duration(to Nanoseconds ),
+// if you are other more complex or custom data types, please implement the encoding.BinaryMarshaler interface.
+//
+// Note that in older versions of Redis server(redis-server < 4.0), HSet only supports a single key-value pair.
+// redis-docs: https://redis.io/commands/hset (Starting with Redis version 4.0.0: Accepts multiple field and value arguments.)
+// If you are using a Struct type and the number of fields is greater than one,
+// you will receive an error similar to "ERR wrong number of arguments", you can use HMSet as a substitute.
func (c cmdable) HSet(ctx context.Context, key string, values ...interface{}) *IntCmd {
args := make([]interface{}, 2, 2+len(values))
args[0] = "hset"
@@ -1313,16 +1543,15 @@ func (c cmdable) HVals(ctx context.Context, key string) *StringSliceCmd {
}
// HRandField redis-server version >= 6.2.0.
-func (c cmdable) HRandField(ctx context.Context, key string, count int, withValues bool) *StringSliceCmd {
- args := make([]interface{}, 0, 4)
+func (c cmdable) HRandField(ctx context.Context, key string, count int) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "hrandfield", key, count)
+ _ = c(ctx, cmd)
+ return cmd
+}
- // Although count=0 is meaningless, redis accepts count=0.
- args = append(args, "hrandfield", key, count)
- if withValues {
- args = append(args, "withvalues")
- }
-
- cmd := NewStringSliceCmd(ctx, args...)
+// HRandFieldWithValues redis-server version >= 6.2.0.
+func (c cmdable) HRandFieldWithValues(ctx context.Context, key string, count int) *KeyValueSliceCmd {
+ cmd := NewKeyValueSliceCmd(ctx, "hrandfield", key, count, "withvalues")
_ = c(ctx, cmd)
return cmd
}
@@ -1342,6 +1571,21 @@ func (c cmdable) BLPop(ctx context.Context, timeout time.Duration, keys ...strin
return cmd
}
+func (c cmdable) BLMPop(ctx context.Context, timeout time.Duration, direction string, count int64, keys ...string) *KeyValuesCmd {
+ args := make([]interface{}, 3+len(keys), 6+len(keys))
+ args[0] = "blmpop"
+ args[1] = formatSec(ctx, timeout)
+ args[2] = len(keys)
+ for i, key := range keys {
+ args[3+i] = key
+ }
+ args = append(args, strings.ToLower(direction), "count", count)
+ cmd := NewKeyValuesCmd(ctx, args...)
+ cmd.setReadTimeout(timeout)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
func (c cmdable) BRPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd {
args := make([]interface{}, 1+len(keys)+1)
args[0] = "brpop"
@@ -1368,12 +1612,34 @@ func (c cmdable) BRPopLPush(ctx context.Context, source, destination string, tim
return cmd
}
+func (c cmdable) LCS(ctx context.Context, q *LCSQuery) *LCSCmd {
+ cmd := NewLCSCmd(ctx, q)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
func (c cmdable) LIndex(ctx context.Context, key string, index int64) *StringCmd {
cmd := NewStringCmd(ctx, "lindex", key, index)
_ = c(ctx, cmd)
return cmd
}
+// LMPop Pops one or more elements from the first non-empty list key from the list of provided key names.
+// direction: left or right, count: > 0
+// example: client.LMPop(ctx, "left", 3, "key1", "key2")
+func (c cmdable) LMPop(ctx context.Context, direction string, count int64, keys ...string) *KeyValuesCmd {
+ args := make([]interface{}, 2+len(keys), 5+len(keys))
+ args[0] = "lmpop"
+ args[1] = len(keys)
+ for i, key := range keys {
+ args[2+i] = key
+ }
+ args = append(args, strings.ToLower(direction), "count", count)
+ cmd := NewKeyValuesCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
func (c cmdable) LInsert(ctx context.Context, key, op string, pivot, value interface{}) *IntCmd {
cmd := NewIntCmd(ctx, "linsert", key, op, pivot, value)
_ = c(ctx, cmd)
@@ -1602,6 +1868,22 @@ func (c cmdable) SInter(ctx context.Context, keys ...string) *StringSliceCmd {
return cmd
}
+func (c cmdable) SInterCard(ctx context.Context, limit int64, keys ...string) *IntCmd {
+ args := make([]interface{}, 4+len(keys))
+ args[0] = "sintercard"
+ numkeys := int64(0)
+ for i, key := range keys {
+ args[2+i] = key
+ numkeys++
+ }
+ args[1] = numkeys
+ args[2+numkeys] = "limit"
+ args[3+numkeys] = limit
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
func (c cmdable) SInterStore(ctx context.Context, destination string, keys ...string) *IntCmd {
args := make([]interface{}, 2+len(keys))
args[0] = "sinterstore"
@@ -1725,11 +2007,7 @@ type XAddArgs struct {
Stream string
NoMkStream bool
MaxLen int64 // MAXLEN N
-
- // Deprecated: use MaxLen+Approx, remove in v9.
- MaxLenApprox int64 // MAXLEN ~ N
-
- MinID string
+ MinID string
// Approx causes MaxLen and MinID to use "~" matcher (instead of "=").
Approx bool
Limit int64
@@ -1737,8 +2015,6 @@ type XAddArgs struct {
Values interface{}
}
-// XAdd a.Limit has a bug, please confirm it and use it.
-// issue: https://github.com/redis/redis/issues/9046
func (c cmdable) XAdd(ctx context.Context, a *XAddArgs) *StringCmd {
args := make([]interface{}, 0, 11)
args = append(args, "xadd", a.Stream)
@@ -1752,9 +2028,6 @@ func (c cmdable) XAdd(ctx context.Context, a *XAddArgs) *StringCmd {
} else {
args = append(args, "maxlen", a.MaxLen)
}
- case a.MaxLenApprox > 0:
- // TODO remove in v9.
- args = append(args, "maxlen", "~", a.MaxLenApprox)
case a.MinID != "":
if a.Approx {
args = append(args, "minid", "~", a.MinID)
@@ -2049,8 +2322,10 @@ func xClaimArgs(a *XClaimArgs) []interface{} {
// xTrim If approx is true, add the "~" parameter, otherwise it is the default "=" (redis default).
// example:
-// XTRIM key MAXLEN/MINID threshold LIMIT limit.
-// XTRIM key MAXLEN/MINID ~ threshold LIMIT limit.
+//
+// XTRIM key MAXLEN/MINID threshold LIMIT limit.
+// XTRIM key MAXLEN/MINID ~ threshold LIMIT limit.
+//
// The redis-server version is lower than 6.2, please set limit to 0.
func (c cmdable) xTrim(
ctx context.Context, key, strategy string,
@@ -2070,38 +2345,20 @@ func (c cmdable) xTrim(
return cmd
}
-// Deprecated: use XTrimMaxLen, remove in v9.
-func (c cmdable) XTrim(ctx context.Context, key string, maxLen int64) *IntCmd {
- return c.xTrim(ctx, key, "maxlen", false, maxLen, 0)
-}
-
-// Deprecated: use XTrimMaxLenApprox, remove in v9.
-func (c cmdable) XTrimApprox(ctx context.Context, key string, maxLen int64) *IntCmd {
- return c.xTrim(ctx, key, "maxlen", true, maxLen, 0)
-}
-
// XTrimMaxLen No `~` rules are used, `limit` cannot be used.
// cmd: XTRIM key MAXLEN maxLen
func (c cmdable) XTrimMaxLen(ctx context.Context, key string, maxLen int64) *IntCmd {
return c.xTrim(ctx, key, "maxlen", false, maxLen, 0)
}
-// XTrimMaxLenApprox LIMIT has a bug, please confirm it and use it.
-// issue: https://github.com/redis/redis/issues/9046
-// cmd: XTRIM key MAXLEN ~ maxLen LIMIT limit
func (c cmdable) XTrimMaxLenApprox(ctx context.Context, key string, maxLen, limit int64) *IntCmd {
return c.xTrim(ctx, key, "maxlen", true, maxLen, limit)
}
-// XTrimMinID No `~` rules are used, `limit` cannot be used.
-// cmd: XTRIM key MINID minID
func (c cmdable) XTrimMinID(ctx context.Context, key string, minID string) *IntCmd {
return c.xTrim(ctx, key, "minid", false, minID, 0)
}
-// XTrimMinIDApprox LIMIT has a bug, please confirm it and use it.
-// issue: https://github.com/redis/redis/issues/9046
-// cmd: XTRIM key MINID ~ minID LIMIT limit
func (c cmdable) XTrimMinIDApprox(ctx context.Context, key string, minID string, limit int64) *IntCmd {
return c.xTrim(ctx, key, "minid", true, minID, limit)
}
@@ -2214,6 +2471,26 @@ func (c cmdable) BZPopMin(ctx context.Context, timeout time.Duration, keys ...st
return cmd
}
+// BZMPop is the blocking variant of ZMPOP.
+// When any of the sorted sets contains elements, this command behaves exactly like ZMPOP.
+// When all sorted sets are empty, Redis will block the connection until another client adds members to one of the keys or until the timeout elapses.
+// A timeout of zero can be used to block indefinitely.
+// example: client.BZMPop(ctx, 0,"max", 1, "set")
+func (c cmdable) BZMPop(ctx context.Context, timeout time.Duration, order string, count int64, keys ...string) *ZSliceWithKeyCmd {
+ args := make([]interface{}, 3+len(keys), 6+len(keys))
+ args[0] = "bzmpop"
+ args[1] = formatSec(ctx, timeout)
+ args[2] = len(keys)
+ for i, key := range keys {
+ args[3+i] = key
+ }
+ args = append(args, strings.ToLower(order), "count", count)
+ cmd := NewZSliceWithKeyCmd(ctx, args...)
+ cmd.setReadTimeout(timeout)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
// ZAddArgs WARN: The GT, LT and NX options are mutually exclusive.
type ZAddArgs struct {
NX bool
@@ -2266,116 +2543,42 @@ func (c cmdable) ZAddArgsIncr(ctx context.Context, key string, args ZAddArgs) *F
return cmd
}
-// TODO: Compatible with v8 api, will be removed in v9.
-func (c cmdable) zAdd(ctx context.Context, key string, args ZAddArgs, members ...*Z) *IntCmd {
- args.Members = make([]Z, len(members))
- for i, m := range members {
- args.Members[i] = *m
- }
- cmd := NewIntCmd(ctx, c.zAddArgs(key, args, false)...)
- _ = c(ctx, cmd)
- return cmd
+// ZAdd Redis `ZADD key score member [score member ...]` command.
+func (c cmdable) ZAdd(ctx context.Context, key string, members ...Z) *IntCmd {
+ return c.ZAddArgs(ctx, key, ZAddArgs{
+ Members: members,
+ })
}
-// ZAdd Redis `ZADD key score member [score member ...]` command.
-func (c cmdable) ZAdd(ctx context.Context, key string, members ...*Z) *IntCmd {
- return c.zAdd(ctx, key, ZAddArgs{}, members...)
+// ZAddLT Redis `ZADD key LT score member [score member ...]` command.
+func (c cmdable) ZAddLT(ctx context.Context, key string, members ...Z) *IntCmd {
+ return c.ZAddArgs(ctx, key, ZAddArgs{
+ LT: true,
+ Members: members,
+ })
+}
+
+// ZAddGT Redis `ZADD key GT score member [score member ...]` command.
+func (c cmdable) ZAddGT(ctx context.Context, key string, members ...Z) *IntCmd {
+ return c.ZAddArgs(ctx, key, ZAddArgs{
+ GT: true,
+ Members: members,
+ })
}
// ZAddNX Redis `ZADD key NX score member [score member ...]` command.
-func (c cmdable) ZAddNX(ctx context.Context, key string, members ...*Z) *IntCmd {
- return c.zAdd(ctx, key, ZAddArgs{
- NX: true,
- }, members...)
+func (c cmdable) ZAddNX(ctx context.Context, key string, members ...Z) *IntCmd {
+ return c.ZAddArgs(ctx, key, ZAddArgs{
+ NX: true,
+ Members: members,
+ })
}
// ZAddXX Redis `ZADD key XX score member [score member ...]` command.
-func (c cmdable) ZAddXX(ctx context.Context, key string, members ...*Z) *IntCmd {
- return c.zAdd(ctx, key, ZAddArgs{
- XX: true,
- }, members...)
-}
-
-// ZAddCh Redis `ZADD key CH score member [score member ...]` command.
-// Deprecated: Use
-// client.ZAddArgs(ctx, ZAddArgs{
-// Ch: true,
-// Members: []Z,
-// })
-// remove in v9.
-func (c cmdable) ZAddCh(ctx context.Context, key string, members ...*Z) *IntCmd {
- return c.zAdd(ctx, key, ZAddArgs{
- Ch: true,
- }, members...)
-}
-
-// ZAddNXCh Redis `ZADD key NX CH score member [score member ...]` command.
-// Deprecated: Use
-// client.ZAddArgs(ctx, ZAddArgs{
-// NX: true,
-// Ch: true,
-// Members: []Z,
-// })
-// remove in v9.
-func (c cmdable) ZAddNXCh(ctx context.Context, key string, members ...*Z) *IntCmd {
- return c.zAdd(ctx, key, ZAddArgs{
- NX: true,
- Ch: true,
- }, members...)
-}
-
-// ZAddXXCh Redis `ZADD key XX CH score member [score member ...]` command.
-// Deprecated: Use
-// client.ZAddArgs(ctx, ZAddArgs{
-// XX: true,
-// Ch: true,
-// Members: []Z,
-// })
-// remove in v9.
-func (c cmdable) ZAddXXCh(ctx context.Context, key string, members ...*Z) *IntCmd {
- return c.zAdd(ctx, key, ZAddArgs{
- XX: true,
- Ch: true,
- }, members...)
-}
-
-// ZIncr Redis `ZADD key INCR score member` command.
-// Deprecated: Use
-// client.ZAddArgsIncr(ctx, ZAddArgs{
-// Members: []Z,
-// })
-// remove in v9.
-func (c cmdable) ZIncr(ctx context.Context, key string, member *Z) *FloatCmd {
- return c.ZAddArgsIncr(ctx, key, ZAddArgs{
- Members: []Z{*member},
- })
-}
-
-// ZIncrNX Redis `ZADD key NX INCR score member` command.
-// Deprecated: Use
-// client.ZAddArgsIncr(ctx, ZAddArgs{
-// NX: true,
-// Members: []Z,
-// })
-// remove in v9.
-func (c cmdable) ZIncrNX(ctx context.Context, key string, member *Z) *FloatCmd {
- return c.ZAddArgsIncr(ctx, key, ZAddArgs{
- NX: true,
- Members: []Z{*member},
- })
-}
-
-// ZIncrXX Redis `ZADD key XX INCR score member` command.
-// Deprecated: Use
-// client.ZAddArgsIncr(ctx, ZAddArgs{
-// XX: true,
-// Members: []Z,
-// })
-// remove in v9.
-func (c cmdable) ZIncrXX(ctx context.Context, key string, member *Z) *FloatCmd {
- return c.ZAddArgsIncr(ctx, key, ZAddArgs{
+func (c cmdable) ZAddXX(ctx context.Context, key string, members ...Z) *IntCmd {
+ return c.ZAddArgs(ctx, key, ZAddArgs{
XX: true,
- Members: []Z{*member},
+ Members: members,
})
}
@@ -2434,6 +2637,38 @@ func (c cmdable) ZInterWithScores(ctx context.Context, store *ZStore) *ZSliceCmd
return cmd
}
+func (c cmdable) ZInterCard(ctx context.Context, limit int64, keys ...string) *IntCmd {
+ args := make([]interface{}, 4+len(keys))
+ args[0] = "zintercard"
+ numkeys := int64(0)
+ for i, key := range keys {
+ args[2+i] = key
+ numkeys++
+ }
+ args[1] = numkeys
+ args[2+numkeys] = "limit"
+ args[3+numkeys] = limit
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ZMPop Pops one or more elements with the highest or lowest score from the first non-empty sorted set key from the list of provided key names.
+// direction: "max" (highest score) or "min" (lowest score), count: > 0
+// example: client.ZMPop(ctx, "max", 5, "set1", "set2")
+func (c cmdable) ZMPop(ctx context.Context, order string, count int64, keys ...string) *ZSliceWithKeyCmd {
+ args := make([]interface{}, 2+len(keys), 5+len(keys))
+ args[0] = "zmpop"
+ args[1] = len(keys)
+ for i, key := range keys {
+ args[2+i] = key
+ }
+ args = append(args, strings.ToLower(order), "count", count)
+ cmd := NewZSliceWithKeyCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
func (c cmdable) ZMScore(ctx context.Context, key string, members ...string) *FloatSliceCmd {
args := make([]interface{}, 2+len(members))
args[0] = "zmscore"
@@ -2488,11 +2723,13 @@ func (c cmdable) ZPopMin(ctx context.Context, key string, count ...int64) *ZSlic
// ZRangeArgs is all the options of the ZRange command.
// In version> 6.2.0, you can replace the(cmd):
-// ZREVRANGE,
-// ZRANGEBYSCORE,
-// ZREVRANGEBYSCORE,
-// ZRANGEBYLEX,
-// ZREVRANGEBYLEX.
+//
+// ZREVRANGE,
+// ZRANGEBYSCORE,
+// ZREVRANGEBYSCORE,
+// ZRANGEBYLEX,
+// ZREVRANGEBYLEX.
+//
// Please pay attention to your redis-server version.
//
// Rev, ByScore, ByLex and Offset+Count options require redis-server 6.2.0 and higher.
@@ -2655,6 +2892,14 @@ func (c cmdable) ZRank(ctx context.Context, key, member string) *IntCmd {
return cmd
}
+// ZRankWithScore according to the Redis documentation, if member does not exist
+// in the sorted set or key does not exist, it will return a redis.Nil error.
+func (c cmdable) ZRankWithScore(ctx context.Context, key, member string) *RankWithScoreCmd {
+ cmd := NewRankWithScoreCmd(ctx, "zrank", key, member, "withscore")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
func (c cmdable) ZRem(ctx context.Context, key string, members ...interface{}) *IntCmd {
args := make([]interface{}, 2, 2+len(members))
args[0] = "zrem"
@@ -2695,6 +2940,8 @@ func (c cmdable) ZRevRange(ctx context.Context, key string, start, stop int64) *
return cmd
}
+// ZRevRangeWithScores according to the Redis documentation, if member does not exist
+// in the sorted set or key does not exist, it will return a redis.Nil error.
func (c cmdable) ZRevRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd {
cmd := NewZSliceCmd(ctx, "zrevrange", key, start, stop, "withscores")
_ = c(ctx, cmd)
@@ -2745,6 +2992,12 @@ func (c cmdable) ZRevRank(ctx context.Context, key, member string) *IntCmd {
return cmd
}
+func (c cmdable) ZRevRankWithScore(ctx context.Context, key, member string) *RankWithScoreCmd {
+ cmd := NewRankWithScoreCmd(ctx, "zrevrank", key, member, "withscore")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
func (c cmdable) ZScore(ctx context.Context, key, member string) *FloatCmd {
cmd := NewFloatCmd(ctx, "zscore", key, member)
_ = c(ctx, cmd)
@@ -2783,16 +3036,15 @@ func (c cmdable) ZUnionStore(ctx context.Context, dest string, store *ZStore) *I
}
// ZRandMember redis-server version >= 6.2.0.
-func (c cmdable) ZRandMember(ctx context.Context, key string, count int, withScores bool) *StringSliceCmd {
- args := make([]interface{}, 0, 4)
+func (c cmdable) ZRandMember(ctx context.Context, key string, count int) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "zrandmember", key, count)
+ _ = c(ctx, cmd)
+ return cmd
+}
- // Although count=0 is meaningless, redis accepts count=0.
- args = append(args, "zrandmember", key, count)
- if withScores {
- args = append(args, "withscores")
- }
-
- cmd := NewStringSliceCmd(ctx, args...)
+// ZRandMemberWithScores redis-server version >= 6.2.0.
+func (c cmdable) ZRandMemberWithScores(ctx context.Context, key string, count int) *ZSliceCmd {
+ cmd := NewZSliceCmd(ctx, "zrandmember", key, count, "withscores")
_ = c(ctx, cmd)
return cmd
}
@@ -2897,7 +3149,7 @@ func (c cmdable) ClientKill(ctx context.Context, ipPort string) *StatusCmd {
// ClientKillByFilter is new style syntax, while the ClientKill is old
//
-// CLIENT KILL