dependencies: Update go-redis from v8 to v9 (#7041)

Updated so we can access the `SetAddrs()` method added to `*redis.Ring`
in `v9` in #7042.

Part of #5545
This commit is contained in:
Samantha 2023-08-22 13:06:25 -04:00 committed by GitHub
parent e7f78291ba
commit 4ed54ff9c6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
71 changed files with 10781 additions and 6204 deletions

View File

@ -7,7 +7,6 @@ import (
"testing"
"time"
"github.com/go-redis/redis/v8"
"github.com/jmhodges/clock"
capb "github.com/letsencrypt/boulder/ca/proto"
"github.com/letsencrypt/boulder/cmd"
@ -18,6 +17,7 @@ import (
"github.com/letsencrypt/boulder/sa"
"github.com/letsencrypt/boulder/test"
"github.com/letsencrypt/boulder/test/vars"
"github.com/redis/go-redis/v9"
"golang.org/x/crypto/ocsp"
"google.golang.org/grpc"
)

View File

@ -21,11 +21,11 @@ import (
"time"
"github.com/go-logr/stdr"
"github.com/go-redis/redis/v8"
"github.com/go-sql-driver/mysql"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/collectors"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/redis/go-redis/v9"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
"go.opentelemetry.io/otel/propagation"

3
go.mod
View File

@ -9,7 +9,6 @@ require (
github.com/aws/smithy-go v1.14.1
github.com/eggsampler/acme/v3 v3.4.0
github.com/go-logr/stdr v1.2.2
github.com/go-redis/redis/v8 v8.11.5
github.com/go-sql-driver/mysql v1.5.0
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da
github.com/google/certificate-transparency-go v1.1.6
@ -24,6 +23,7 @@ require (
github.com/miekg/pkcs11 v1.1.1
github.com/prometheus/client_golang v1.15.1
github.com/prometheus/client_model v0.4.0
github.com/redis/go-redis/v9 v9.1.0
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399
github.com/weppos/publicsuffix-go v0.30.1-0.20230620154423-38c92ad2d5c6
github.com/zmap/zcrypto v0.0.0-20230310154051-c8b263fd8300
@ -66,6 +66,7 @@ require (
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
github.com/felixge/httpsnoop v1.0.3 // indirect
github.com/fsnotify/fsnotify v1.4.9 // indirect
github.com/go-logr/logr v1.2.4 // indirect
github.com/go-playground/locales v0.14.1 // indirect
github.com/go-playground/universal-translator v0.18.1 // indirect

12
go.sum
View File

@ -90,6 +90,8 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bsm/ginkgo/v2 v2.9.5 h1:rtVBYPs3+TC5iLUVOis1B9tjLTup7Cj5IfzosKtvTJ0=
github.com/bsm/gomega v1.26.0 h1:LhQm+AFcgV2M0WyKroMASzAzCAJVpAxQXv4SaI9a69Y=
github.com/bwesterb/go-ristretto v1.2.0/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0=
github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
@ -137,6 +139,7 @@ github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBd
github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
@ -155,8 +158,6 @@ github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/o
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI=
github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo=
github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs=
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
@ -292,10 +293,7 @@ github.com/mreiferson/go-httpclient v0.0.0-20160630210159-31f0106b4474/go.mod h1
github.com/mreiferson/go-httpclient v0.0.0-20201222173833-5e475fde3a4d/go.mod h1:OQA4XLvDbMgS8P0CevmM4m9Q3Jq4phKUzcocxuGJ5m8=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/nelsam/hel/v2 v2.3.2/go.mod h1:1ZTGfU2PFTOd5mx22i5O0Lc2GY933lQ2wb/ggy+rL3w=
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE=
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/pelletier/go-toml v1.9.3 h1:zeC5b1GviRUyKYd6OJPvBU/mcVDVoL1OhT17FCt5dSQ=
@ -324,6 +322,8 @@ github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7z
github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI=
github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/redis/go-redis/v9 v9.1.0 h1:137FnGdk+EQdCbye1FW+qOEcY5S+SpY9T0NiuqvtfMY=
github.com/redis/go-redis/v9 v9.1.0/go.mod h1:urWj3He21Dj5k4TK1y59xH8Uj6ATueP8AH1cY3lZl4c=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
@ -534,6 +534,7 @@ golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -755,7 +756,6 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

View File

@ -8,9 +8,9 @@ import (
bredis "github.com/letsencrypt/boulder/redis"
"github.com/go-redis/redis/v8"
"github.com/jmhodges/clock"
"github.com/prometheus/client_golang/prometheus"
"github.com/redis/go-redis/v9"
)
// Compile-time check that RedisSource implements the source interface.

View File

@ -9,8 +9,8 @@ import (
"github.com/letsencrypt/boulder/test"
"golang.org/x/net/context"
"github.com/go-redis/redis/v8"
"github.com/jmhodges/clock"
"github.com/redis/go-redis/v9"
)
func newTestRedisSource(clk clock.FakeClock, addrs map[string]string) *RedisSource {

View File

@ -1,8 +1,8 @@
package redis
import (
"github.com/go-redis/redis/v8"
"github.com/prometheus/client_golang/prometheus"
"github.com/redis/go-redis/v9"
)
// An interface satisfied by *redis.ClusterClient and also by a mock in our tests.

View File

@ -4,9 +4,9 @@ import (
"strings"
"testing"
"github.com/go-redis/redis/v8"
"github.com/letsencrypt/boulder/metrics"
"github.com/prometheus/client_golang/prometheus"
"github.com/redis/go-redis/v9"
)
type mockPoolStatGetter struct{}

View File

@ -8,9 +8,9 @@ import (
"fmt"
"strings"
"github.com/go-redis/redis/v8"
"github.com/jmhodges/clock"
"github.com/prometheus/client_golang/prometheus"
"github.com/redis/go-redis/v9"
"golang.org/x/crypto/ocsp"
"github.com/letsencrypt/boulder/cmd"
@ -92,6 +92,7 @@ type RedisConfig struct {
// Default is 1 minute. -1 disables idle connections reaper,
// but idle connections are still discarded by the client
// if IdleTimeout is set.
// Deprecated: This field has been deprecated and will be removed.
IdleCheckFrequency config.Duration `validate:"-"`
}
@ -120,12 +121,11 @@ func MakeClient(c *RedisConfig, clk clock.Clock, stats prometheus.Registerer) (*
ReadTimeout: c.ReadTimeout.Duration,
WriteTimeout: c.WriteTimeout.Duration,
PoolSize: c.PoolSize,
MinIdleConns: c.MinIdleConns,
MaxConnAge: c.MaxConnAge.Duration,
PoolTimeout: c.PoolTimeout.Duration,
IdleTimeout: c.IdleTimeout.Duration,
IdleCheckFrequency: c.IdleCheckFrequency.Duration,
PoolSize: c.PoolSize,
MinIdleConns: c.MinIdleConns,
ConnMaxLifetime: c.MaxConnAge.Duration,
PoolTimeout: c.PoolTimeout.Duration,
ConnMaxIdleTime: c.IdleTimeout.Duration,
})
return rocsp.NewWritingClient(rdb, c.Timeout.Duration, clk, stats), nil
}
@ -160,12 +160,11 @@ func MakeReadClient(c *RedisConfig, clk clock.Clock, stats prometheus.Registerer
DialTimeout: c.DialTimeout.Duration,
ReadTimeout: c.ReadTimeout.Duration,
PoolSize: c.PoolSize,
MinIdleConns: c.MinIdleConns,
MaxConnAge: c.MaxConnAge.Duration,
PoolTimeout: c.PoolTimeout.Duration,
IdleTimeout: c.IdleTimeout.Duration,
IdleCheckFrequency: c.IdleCheckFrequency.Duration,
PoolSize: c.PoolSize,
MinIdleConns: c.MinIdleConns,
ConnMaxLifetime: c.MaxConnAge.Duration,
PoolTimeout: c.PoolTimeout.Duration,
ConnMaxIdleTime: c.IdleTimeout.Duration,
})
return rocsp.NewReadingClient(rdb, c.Timeout.Duration, clk, stats), nil
}

View File

@ -10,9 +10,9 @@ import (
"github.com/letsencrypt/boulder/core"
bredis "github.com/letsencrypt/boulder/redis"
"github.com/go-redis/redis/v8"
"github.com/jmhodges/clock"
"github.com/prometheus/client_golang/prometheus"
"github.com/redis/go-redis/v9"
"golang.org/x/crypto/ocsp"
)

View File

@ -8,10 +8,10 @@ import (
"testing"
"time"
"github.com/go-redis/redis/v8"
"github.com/jmhodges/clock"
"github.com/letsencrypt/boulder/cmd"
"github.com/letsencrypt/boulder/metrics"
"github.com/redis/go-redis/v9"
"golang.org/x/crypto/ocsp"
)

View File

@ -1,3 +0,0 @@
*.rdb
testdata/*/
.idea/

View File

@ -1,177 +0,0 @@
## [8.11.5](https://github.com/go-redis/redis/compare/v8.11.4...v8.11.5) (2022-03-17)
### Bug Fixes
* add missing Expire methods to Cmdable ([17e3b43](https://github.com/go-redis/redis/commit/17e3b43879d516437ada71cf9c0deac6a382ed9a))
* add whitespace for avoid unlikely colisions ([7f7c181](https://github.com/go-redis/redis/commit/7f7c1817617cfec909efb13d14ad22ef05a6ad4c))
* example/otel compile error ([#2028](https://github.com/go-redis/redis/issues/2028)) ([187c07c](https://github.com/go-redis/redis/commit/187c07c41bf68dc3ab280bc3a925e960bbef6475))
* **extra/redisotel:** set span.kind attribute to client ([065b200](https://github.com/go-redis/redis/commit/065b200070b41e6e949710b4f9e01b50ccc60ab2))
* format ([96f53a0](https://github.com/go-redis/redis/commit/96f53a0159a28affa94beec1543a62234e7f8b32))
* invalid type assert in stringArg ([de6c131](https://github.com/go-redis/redis/commit/de6c131865b8263400c8491777b295035f2408e4))
* rename Golang to Go ([#2030](https://github.com/go-redis/redis/issues/2030)) ([b82a2d9](https://github.com/go-redis/redis/commit/b82a2d9d4d2de7b7cbe8fcd4895be62dbcacacbc))
* set timeout for WAIT command. Fixes [#1963](https://github.com/go-redis/redis/issues/1963) ([333fee1](https://github.com/go-redis/redis/commit/333fee1a8fd98a2fbff1ab187c1b03246a7eb01f))
* update some argument counts in pre-allocs ([f6974eb](https://github.com/go-redis/redis/commit/f6974ebb5c40a8adf90d2cacab6dc297f4eba4c2))
### Features
* Add redis v7's NX, XX, GT, LT expire variants ([e19bbb2](https://github.com/go-redis/redis/commit/e19bbb26e2e395c6e077b48d80d79e99f729a8b8))
* add support for acl sentinel auth in universal client ([ab0ccc4](https://github.com/go-redis/redis/commit/ab0ccc47413f9b2a6eabc852fed5005a3ee1af6e))
* add support for COPY command ([#2016](https://github.com/go-redis/redis/issues/2016)) ([730afbc](https://github.com/go-redis/redis/commit/730afbcffb93760e8a36cc06cfe55ab102b693a7))
* add support for passing extra attributes added to spans ([39faaa1](https://github.com/go-redis/redis/commit/39faaa171523834ba527c9789710c4fde87f5a2e))
* add support for time.Duration write and scan ([2f1b74e](https://github.com/go-redis/redis/commit/2f1b74e20cdd7719b2aecf0768d3e3ae7c3e781b))
* **redisotel:** ability to override TracerProvider ([#1998](https://github.com/go-redis/redis/issues/1998)) ([bf8d4aa](https://github.com/go-redis/redis/commit/bf8d4aa60c00366cda2e98c3ddddc8cf68507417))
* set net.peer.name and net.peer.port in otel example ([69bf454](https://github.com/go-redis/redis/commit/69bf454f706204211cd34835f76b2e8192d3766d))
## [8.11.4](https://github.com/go-redis/redis/compare/v8.11.3...v8.11.4) (2021-10-04)
### Features
* add acl auth support for sentinels ([f66582f](https://github.com/go-redis/redis/commit/f66582f44f3dc3a4705a5260f982043fde4aa634))
* add Cmd.{String,Int,Float,Bool}Slice helpers and an example ([5d3d293](https://github.com/go-redis/redis/commit/5d3d293cc9c60b90871e2420602001463708ce24))
* add SetVal method for each command ([168981d](https://github.com/go-redis/redis/commit/168981da2d84ee9e07d15d3e74d738c162e264c4))
## v8.11
- Remove OpenTelemetry metrics.
- Supports more redis commands and options.
## v8.10
- Removed extra OpenTelemetry spans from go-redis core. Now go-redis instrumentation only adds a
single span with a Redis command (instead of 4 spans). There are multiple reasons behind this
decision:
- Traces become smaller and less noisy.
- It may be costly to process those 3 extra spans for each query.
- go-redis no longer depends on OpenTelemetry.
Eventually we hope to replace the information that we no longer collect with OpenTelemetry
Metrics.
## v8.9
- Changed `PubSub.Channel` to only rely on `Ping` result. You can now use `WithChannelSize`,
`WithChannelHealthCheckInterval`, and `WithChannelSendTimeout` to override default settings.
## v8.8
- To make updating easier, extra modules now have the same version as go-redis does. That means that
you need to update your imports:
```
github.com/go-redis/redis/extra/redisotel -> github.com/go-redis/redis/extra/redisotel/v8
github.com/go-redis/redis/extra/rediscensus -> github.com/go-redis/redis/extra/rediscensus/v8
```
## v8.5
- [knadh](https://github.com/knadh) contributed long-awaited ability to scan Redis Hash into a
struct:
```go
err := rdb.HGetAll(ctx, "hash").Scan(&data)
err := rdb.MGet(ctx, "key1", "key2").Scan(&data)
```
- Please check [redismock](https://github.com/go-redis/redismock) by
[monkey92t](https://github.com/monkey92t) if you are looking for mocking Redis Client.
## v8
- All commands require `context.Context` as a first argument, e.g. `rdb.Ping(ctx)`. If you are not
using `context.Context` yet, the simplest option is to define global package variable
`var ctx = context.TODO()` and use it when `ctx` is required.
- Full support for `context.Context` canceling.
- Added `redis.NewFailoverClusterClient` that supports routing read-only commands to a slave node.
- Added `redisext.OpenTemetryHook` that adds
[Redis OpenTelemetry instrumentation](https://redis.uptrace.dev/tracing/).
- Redis slow log support.
- Ring uses Rendezvous Hashing by default which provides better distribution. You need to move
existing keys to a new location or keys will be inaccessible / lost. To use old hashing scheme:
```go
import "github.com/golang/groupcache/consistenthash"
ring := redis.NewRing(&redis.RingOptions{
NewConsistentHash: func() {
return consistenthash.New(100, crc32.ChecksumIEEE)
},
})
```
- `ClusterOptions.MaxRedirects` default value is changed from 8 to 3.
- `Options.MaxRetries` default value is changed from 0 to 3.
- `Cluster.ForEachNode` is renamed to `ForEachShard` for consistency with `Ring`.
## v7.3
- New option `Options.Username` which causes client to use `AuthACL`. Be aware if your connection
URL contains username.
## v7.2
- Existing `HMSet` is renamed to `HSet` and old deprecated `HMSet` is restored for Redis 3 users.
## v7.1
- Existing `Cmd.String` is renamed to `Cmd.Text`. New `Cmd.String` implements `fmt.Stringer`
interface.
## v7
- _Important_. Tx.Pipeline now returns a non-transactional pipeline. Use Tx.TxPipeline for a
transactional pipeline.
- WrapProcess is replaced with more convenient AddHook that has access to context.Context.
- WithContext now can not be used to create a shallow copy of the client.
- New methods ProcessContext, DoContext, and ExecContext.
- Client respects Context.Deadline when setting net.Conn deadline.
- Client listens on Context.Done while waiting for a connection from the pool and returns an error
when context context is cancelled.
- Add PubSub.ChannelWithSubscriptions that sends `*Subscription` in addition to `*Message` to allow
detecting reconnections.
- `time.Time` is now marshalled in RFC3339 format. `rdb.Get("foo").Time()` helper is added to parse
the time.
- `SetLimiter` is removed and added `Options.Limiter` instead.
- `HMSet` is deprecated as of Redis v4.
## v6.15
- Cluster and Ring pipelines process commands for each node in its own goroutine.
## 6.14
- Added Options.MinIdleConns.
- Added Options.MaxConnAge.
- PoolStats.FreeConns is renamed to PoolStats.IdleConns.
- Add Client.Do to simplify creating custom commands.
- Add Cmd.String, Cmd.Int, Cmd.Int64, Cmd.Uint64, Cmd.Float64, and Cmd.Bool helpers.
- Lower memory usage.
## v6.13
- Ring got new options called `HashReplicas` and `Hash`. It is recommended to set
`HashReplicas = 1000` for better keys distribution between shards.
- Cluster client was optimized to use much less memory when reloading cluster state.
- PubSub.ReceiveMessage is re-worked to not use ReceiveTimeout so it does not lose data when timeout
occurres. In most cases it is recommended to use PubSub.Channel instead.
- Dialer.KeepAlive is set to 5 minutes by default.
## v6.12
- ClusterClient got new option called `ClusterSlots` which allows to build cluster of normal Redis
Servers that don't have cluster mode enabled. See
https://godoc.org/github.com/go-redis/redis#example-NewClusterClient--ManualSetup

View File

@ -1,35 +0,0 @@
PACKAGE_DIRS := $(shell find . -mindepth 2 -type f -name 'go.mod' -exec dirname {} \; | sort)
test: testdeps
go test ./...
go test ./... -short -race
go test ./... -run=NONE -bench=. -benchmem
env GOOS=linux GOARCH=386 go test ./...
go vet
testdeps: testdata/redis/src/redis-server
bench: testdeps
go test ./... -test.run=NONE -test.bench=. -test.benchmem
.PHONY: all test testdeps bench
testdata/redis:
mkdir -p $@
wget -qO- https://download.redis.io/releases/redis-6.2.5.tar.gz | tar xvz --strip-components=1 -C $@
testdata/redis/src/redis-server: testdata/redis
cd $< && make all
fmt:
gofmt -w -s ./
goimports -w -local github.com/go-redis/redis ./
go_mod_tidy:
go get -u && go mod tidy
set -e; for dir in $(PACKAGE_DIRS); do \
echo "go mod tidy in $${dir}"; \
(cd "$${dir}" && \
go get -u && \
go mod tidy); \
done

View File

@ -1,175 +0,0 @@
# Redis client for Go
![build workflow](https://github.com/go-redis/redis/actions/workflows/build.yml/badge.svg)
[![PkgGoDev](https://pkg.go.dev/badge/github.com/go-redis/redis/v8)](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc)
[![Documentation](https://img.shields.io/badge/redis-documentation-informational)](https://redis.uptrace.dev/)
go-redis is brought to you by :star: [**uptrace/uptrace**](https://github.com/uptrace/uptrace).
Uptrace is an open source and blazingly fast **distributed tracing** backend powered by
OpenTelemetry and ClickHouse. Give it a star as well!
## Resources
- [Discussions](https://github.com/go-redis/redis/discussions)
- [Documentation](https://redis.uptrace.dev)
- [Reference](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc)
- [Examples](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#pkg-examples)
- [RealWorld example app](https://github.com/uptrace/go-treemux-realworld-example-app)
Other projects you may like:
- [Bun](https://bun.uptrace.dev) - fast and simple SQL client for PostgreSQL, MySQL, and SQLite.
- [BunRouter](https://bunrouter.uptrace.dev/) - fast and flexible HTTP router for Go.
## Ecosystem
- [Redis Mock](https://github.com/go-redis/redismock)
- [Distributed Locks](https://github.com/bsm/redislock)
- [Redis Cache](https://github.com/go-redis/cache)
- [Rate limiting](https://github.com/go-redis/redis_rate)
## Features
- Redis 3 commands except QUIT, MONITOR, and SYNC.
- Automatic connection pooling with
[circuit breaker](https://en.wikipedia.org/wiki/Circuit_breaker_design_pattern) support.
- [Pub/Sub](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#PubSub).
- [Transactions](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client-TxPipeline).
- [Pipeline](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client.Pipeline) and
[TxPipeline](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client.TxPipeline).
- [Scripting](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#Script).
- [Timeouts](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#Options).
- [Redis Sentinel](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewFailoverClient).
- [Redis Cluster](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewClusterClient).
- [Cluster of Redis Servers](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-NewClusterClient-ManualSetup)
without using cluster mode and Redis Sentinel.
- [Ring](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewRing).
- [Instrumentation](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-package-Instrumentation).
## Installation
go-redis supports 2 last Go versions and requires a Go version with
[modules](https://github.com/golang/go/wiki/Modules) support. So make sure to initialize a Go
module:
```shell
go mod init github.com/my/repo
```
And then install go-redis/v8 (note _v8_ in the import; omitting it is a popular mistake):
```shell
go get github.com/go-redis/redis/v8
```
## Quickstart
```go
import (
"context"
"github.com/go-redis/redis/v8"
"fmt"
)
var ctx = context.Background()
func ExampleClient() {
rdb := redis.NewClient(&redis.Options{
Addr: "localhost:6379",
Password: "", // no password set
DB: 0, // use default DB
})
err := rdb.Set(ctx, "key", "value", 0).Err()
if err != nil {
panic(err)
}
val, err := rdb.Get(ctx, "key").Result()
if err != nil {
panic(err)
}
fmt.Println("key", val)
val2, err := rdb.Get(ctx, "key2").Result()
if err == redis.Nil {
fmt.Println("key2 does not exist")
} else if err != nil {
panic(err)
} else {
fmt.Println("key2", val2)
}
// Output: key value
// key2 does not exist
}
```
## Look and feel
Some corner cases:
```go
// SET key value EX 10 NX
set, err := rdb.SetNX(ctx, "key", "value", 10*time.Second).Result()
// SET key value keepttl NX
set, err := rdb.SetNX(ctx, "key", "value", redis.KeepTTL).Result()
// SORT list LIMIT 0 2 ASC
vals, err := rdb.Sort(ctx, "list", &redis.Sort{Offset: 0, Count: 2, Order: "ASC"}).Result()
// ZRANGEBYSCORE zset -inf +inf WITHSCORES LIMIT 0 2
vals, err := rdb.ZRangeByScoreWithScores(ctx, "zset", &redis.ZRangeBy{
Min: "-inf",
Max: "+inf",
Offset: 0,
Count: 2,
}).Result()
// ZINTERSTORE out 2 zset1 zset2 WEIGHTS 2 3 AGGREGATE SUM
vals, err := rdb.ZInterStore(ctx, "out", &redis.ZStore{
Keys: []string{"zset1", "zset2"},
Weights: []int64{2, 3}
}).Result()
// EVAL "return {KEYS[1],ARGV[1]}" 1 "key" "hello"
vals, err := rdb.Eval(ctx, "return {KEYS[1],ARGV[1]}", []string{"key"}, "hello").Result()
// custom command
res, err := rdb.Do(ctx, "set", "key", "value").Result()
```
## Run the test
go-redis will start a redis-server and run the test cases.
The paths of redis-server bin file and redis config file are defined in `main_test.go`:
```
var (
redisServerBin, _ = filepath.Abs(filepath.Join("testdata", "redis", "src", "redis-server"))
redisServerConf, _ = filepath.Abs(filepath.Join("testdata", "redis", "redis.conf"))
)
```
For local testing, you can change the variables to refer to your local files, or create a soft link
to the corresponding folder for redis-server and copy the config file to `testdata/redis/`:
```
ln -s /usr/bin/redis-server ./go-redis/testdata/redis/src
cp ./go-redis/testdata/redis.conf ./go-redis/testdata/redis/
```
Lastly, run:
```
go test
```
## Contributors
Thanks to all the people who already contributed!
<a href="https://github.com/go-redis/redis/graphs/contributors">
<img src="https://contributors-img.web.app/image?repo=go-redis/redis" />
</a>

File diff suppressed because it is too large Load Diff

View File

@ -1,332 +0,0 @@
package proto
import (
"bufio"
"fmt"
"io"
"github.com/go-redis/redis/v8/internal/util"
)
// redis resp protocol data type.
const (
ErrorReply = '-'
StatusReply = '+'
IntReply = ':'
StringReply = '$'
ArrayReply = '*'
)
//------------------------------------------------------------------------------
const Nil = RedisError("redis: nil") // nolint:errname
type RedisError string
func (e RedisError) Error() string { return string(e) }
func (RedisError) RedisError() {}
//------------------------------------------------------------------------------
type MultiBulkParse func(*Reader, int64) (interface{}, error)
type Reader struct {
rd *bufio.Reader
_buf []byte
}
func NewReader(rd io.Reader) *Reader {
return &Reader{
rd: bufio.NewReader(rd),
_buf: make([]byte, 64),
}
}
func (r *Reader) Buffered() int {
return r.rd.Buffered()
}
func (r *Reader) Peek(n int) ([]byte, error) {
return r.rd.Peek(n)
}
func (r *Reader) Reset(rd io.Reader) {
r.rd.Reset(rd)
}
func (r *Reader) ReadLine() ([]byte, error) {
line, err := r.readLine()
if err != nil {
return nil, err
}
if isNilReply(line) {
return nil, Nil
}
return line, nil
}
// readLine that returns an error if:
// - there is a pending read error;
// - or line does not end with \r\n.
func (r *Reader) readLine() ([]byte, error) {
b, err := r.rd.ReadSlice('\n')
if err != nil {
if err != bufio.ErrBufferFull {
return nil, err
}
full := make([]byte, len(b))
copy(full, b)
b, err = r.rd.ReadBytes('\n')
if err != nil {
return nil, err
}
full = append(full, b...) //nolint:makezero
b = full
}
if len(b) <= 2 || b[len(b)-1] != '\n' || b[len(b)-2] != '\r' {
return nil, fmt.Errorf("redis: invalid reply: %q", b)
}
return b[:len(b)-2], nil
}
func (r *Reader) ReadReply(m MultiBulkParse) (interface{}, error) {
line, err := r.ReadLine()
if err != nil {
return nil, err
}
switch line[0] {
case ErrorReply:
return nil, ParseErrorReply(line)
case StatusReply:
return string(line[1:]), nil
case IntReply:
return util.ParseInt(line[1:], 10, 64)
case StringReply:
return r.readStringReply(line)
case ArrayReply:
n, err := parseArrayLen(line)
if err != nil {
return nil, err
}
if m == nil {
err := fmt.Errorf("redis: got %.100q, but multi bulk parser is nil", line)
return nil, err
}
return m(r, n)
}
return nil, fmt.Errorf("redis: can't parse %.100q", line)
}
func (r *Reader) ReadIntReply() (int64, error) {
line, err := r.ReadLine()
if err != nil {
return 0, err
}
switch line[0] {
case ErrorReply:
return 0, ParseErrorReply(line)
case IntReply:
return util.ParseInt(line[1:], 10, 64)
default:
return 0, fmt.Errorf("redis: can't parse int reply: %.100q", line)
}
}
func (r *Reader) ReadString() (string, error) {
line, err := r.ReadLine()
if err != nil {
return "", err
}
switch line[0] {
case ErrorReply:
return "", ParseErrorReply(line)
case StringReply:
return r.readStringReply(line)
case StatusReply:
return string(line[1:]), nil
case IntReply:
return string(line[1:]), nil
default:
return "", fmt.Errorf("redis: can't parse reply=%.100q reading string", line)
}
}
func (r *Reader) readStringReply(line []byte) (string, error) {
if isNilReply(line) {
return "", Nil
}
replyLen, err := util.Atoi(line[1:])
if err != nil {
return "", err
}
b := make([]byte, replyLen+2)
_, err = io.ReadFull(r.rd, b)
if err != nil {
return "", err
}
return util.BytesToString(b[:replyLen]), nil
}
func (r *Reader) ReadArrayReply(m MultiBulkParse) (interface{}, error) {
line, err := r.ReadLine()
if err != nil {
return nil, err
}
switch line[0] {
case ErrorReply:
return nil, ParseErrorReply(line)
case ArrayReply:
n, err := parseArrayLen(line)
if err != nil {
return nil, err
}
return m(r, n)
default:
return nil, fmt.Errorf("redis: can't parse array reply: %.100q", line)
}
}
func (r *Reader) ReadArrayLen() (int, error) {
line, err := r.ReadLine()
if err != nil {
return 0, err
}
switch line[0] {
case ErrorReply:
return 0, ParseErrorReply(line)
case ArrayReply:
n, err := parseArrayLen(line)
if err != nil {
return 0, err
}
return int(n), nil
default:
return 0, fmt.Errorf("redis: can't parse array reply: %.100q", line)
}
}
func (r *Reader) ReadScanReply() ([]string, uint64, error) {
n, err := r.ReadArrayLen()
if err != nil {
return nil, 0, err
}
if n != 2 {
return nil, 0, fmt.Errorf("redis: got %d elements in scan reply, expected 2", n)
}
cursor, err := r.ReadUint()
if err != nil {
return nil, 0, err
}
n, err = r.ReadArrayLen()
if err != nil {
return nil, 0, err
}
keys := make([]string, n)
for i := 0; i < n; i++ {
key, err := r.ReadString()
if err != nil {
return nil, 0, err
}
keys[i] = key
}
return keys, cursor, err
}
func (r *Reader) ReadInt() (int64, error) {
b, err := r.readTmpBytesReply()
if err != nil {
return 0, err
}
return util.ParseInt(b, 10, 64)
}
func (r *Reader) ReadUint() (uint64, error) {
b, err := r.readTmpBytesReply()
if err != nil {
return 0, err
}
return util.ParseUint(b, 10, 64)
}
func (r *Reader) ReadFloatReply() (float64, error) {
b, err := r.readTmpBytesReply()
if err != nil {
return 0, err
}
return util.ParseFloat(b, 64)
}
func (r *Reader) readTmpBytesReply() ([]byte, error) {
line, err := r.ReadLine()
if err != nil {
return nil, err
}
switch line[0] {
case ErrorReply:
return nil, ParseErrorReply(line)
case StringReply:
return r._readTmpBytesReply(line)
case StatusReply:
return line[1:], nil
default:
return nil, fmt.Errorf("redis: can't parse string reply: %.100q", line)
}
}
func (r *Reader) _readTmpBytesReply(line []byte) ([]byte, error) {
if isNilReply(line) {
return nil, Nil
}
replyLen, err := util.Atoi(line[1:])
if err != nil {
return nil, err
}
buf := r.buf(replyLen + 2)
_, err = io.ReadFull(r.rd, buf)
if err != nil {
return nil, err
}
return buf[:replyLen], nil
}
func (r *Reader) buf(n int) []byte {
if n <= cap(r._buf) {
return r._buf[:n]
}
d := n - cap(r._buf)
r._buf = append(r._buf, make([]byte, d)...)
return r._buf
}
func isNilReply(b []byte) bool {
return len(b) == 3 &&
(b[0] == StringReply || b[0] == ArrayReply) &&
b[1] == '-' && b[2] == '1'
}
func ParseErrorReply(line []byte) error {
return RedisError(string(line[1:]))
}
func parseArrayLen(line []byte) (int64, error) {
if isNilReply(line) {
return 0, Nil
}
return util.ParseInt(line[1:], 10, 64)
}

View File

@ -1,12 +0,0 @@
//go:build appengine
// +build appengine
package internal
func String(b []byte) string {
return string(b)
}
func Bytes(s string) []byte {
return []byte(s)
}

View File

@ -1,21 +0,0 @@
//go:build !appengine
// +build !appengine
package internal
import "unsafe"
// String converts byte slice to string.
func String(b []byte) string {
return *(*string)(unsafe.Pointer(&b))
}
// Bytes converts string to byte slice.
func Bytes(s string) []byte {
return *(*[]byte)(unsafe.Pointer(
&struct {
string
Cap int
}{s, len(s)},
))
}

View File

@ -1,773 +0,0 @@
package redis
import (
"context"
"errors"
"fmt"
"sync/atomic"
"time"
"github.com/go-redis/redis/v8/internal"
"github.com/go-redis/redis/v8/internal/pool"
"github.com/go-redis/redis/v8/internal/proto"
)
// Nil reply returned by Redis when key does not exist.
const Nil = proto.Nil
func SetLogger(logger internal.Logging) {
internal.Logger = logger
}
//------------------------------------------------------------------------------
type Hook interface {
BeforeProcess(ctx context.Context, cmd Cmder) (context.Context, error)
AfterProcess(ctx context.Context, cmd Cmder) error
BeforeProcessPipeline(ctx context.Context, cmds []Cmder) (context.Context, error)
AfterProcessPipeline(ctx context.Context, cmds []Cmder) error
}
type hooks struct {
hooks []Hook
}
func (hs *hooks) lock() {
hs.hooks = hs.hooks[:len(hs.hooks):len(hs.hooks)]
}
func (hs hooks) clone() hooks {
clone := hs
clone.lock()
return clone
}
func (hs *hooks) AddHook(hook Hook) {
hs.hooks = append(hs.hooks, hook)
}
func (hs hooks) process(
ctx context.Context, cmd Cmder, fn func(context.Context, Cmder) error,
) error {
if len(hs.hooks) == 0 {
err := fn(ctx, cmd)
cmd.SetErr(err)
return err
}
var hookIndex int
var retErr error
for ; hookIndex < len(hs.hooks) && retErr == nil; hookIndex++ {
ctx, retErr = hs.hooks[hookIndex].BeforeProcess(ctx, cmd)
if retErr != nil {
cmd.SetErr(retErr)
}
}
if retErr == nil {
retErr = fn(ctx, cmd)
cmd.SetErr(retErr)
}
for hookIndex--; hookIndex >= 0; hookIndex-- {
if err := hs.hooks[hookIndex].AfterProcess(ctx, cmd); err != nil {
retErr = err
cmd.SetErr(retErr)
}
}
return retErr
}
func (hs hooks) processPipeline(
ctx context.Context, cmds []Cmder, fn func(context.Context, []Cmder) error,
) error {
if len(hs.hooks) == 0 {
err := fn(ctx, cmds)
return err
}
var hookIndex int
var retErr error
for ; hookIndex < len(hs.hooks) && retErr == nil; hookIndex++ {
ctx, retErr = hs.hooks[hookIndex].BeforeProcessPipeline(ctx, cmds)
if retErr != nil {
setCmdsErr(cmds, retErr)
}
}
if retErr == nil {
retErr = fn(ctx, cmds)
}
for hookIndex--; hookIndex >= 0; hookIndex-- {
if err := hs.hooks[hookIndex].AfterProcessPipeline(ctx, cmds); err != nil {
retErr = err
setCmdsErr(cmds, retErr)
}
}
return retErr
}
func (hs hooks) processTxPipeline(
ctx context.Context, cmds []Cmder, fn func(context.Context, []Cmder) error,
) error {
cmds = wrapMultiExec(ctx, cmds)
return hs.processPipeline(ctx, cmds, fn)
}
//------------------------------------------------------------------------------
type baseClient struct {
opt *Options
connPool pool.Pooler
onClose func() error // hook called when client is closed
}
func newBaseClient(opt *Options, connPool pool.Pooler) *baseClient {
return &baseClient{
opt: opt,
connPool: connPool,
}
}
func (c *baseClient) clone() *baseClient {
clone := *c
return &clone
}
func (c *baseClient) withTimeout(timeout time.Duration) *baseClient {
opt := c.opt.clone()
opt.ReadTimeout = timeout
opt.WriteTimeout = timeout
clone := c.clone()
clone.opt = opt
return clone
}
func (c *baseClient) String() string {
return fmt.Sprintf("Redis<%s db:%d>", c.getAddr(), c.opt.DB)
}
func (c *baseClient) newConn(ctx context.Context) (*pool.Conn, error) {
cn, err := c.connPool.NewConn(ctx)
if err != nil {
return nil, err
}
err = c.initConn(ctx, cn)
if err != nil {
_ = c.connPool.CloseConn(cn)
return nil, err
}
return cn, nil
}
func (c *baseClient) getConn(ctx context.Context) (*pool.Conn, error) {
if c.opt.Limiter != nil {
err := c.opt.Limiter.Allow()
if err != nil {
return nil, err
}
}
cn, err := c._getConn(ctx)
if err != nil {
if c.opt.Limiter != nil {
c.opt.Limiter.ReportResult(err)
}
return nil, err
}
return cn, nil
}
func (c *baseClient) _getConn(ctx context.Context) (*pool.Conn, error) {
cn, err := c.connPool.Get(ctx)
if err != nil {
return nil, err
}
if cn.Inited {
return cn, nil
}
if err := c.initConn(ctx, cn); err != nil {
c.connPool.Remove(ctx, cn, err)
if err := errors.Unwrap(err); err != nil {
return nil, err
}
return nil, err
}
return cn, nil
}
func (c *baseClient) initConn(ctx context.Context, cn *pool.Conn) error {
if cn.Inited {
return nil
}
cn.Inited = true
if c.opt.Password == "" &&
c.opt.DB == 0 &&
!c.opt.readOnly &&
c.opt.OnConnect == nil {
return nil
}
connPool := pool.NewSingleConnPool(c.connPool, cn)
conn := newConn(ctx, c.opt, connPool)
_, err := conn.Pipelined(ctx, func(pipe Pipeliner) error {
if c.opt.Password != "" {
if c.opt.Username != "" {
pipe.AuthACL(ctx, c.opt.Username, c.opt.Password)
} else {
pipe.Auth(ctx, c.opt.Password)
}
}
if c.opt.DB > 0 {
pipe.Select(ctx, c.opt.DB)
}
if c.opt.readOnly {
pipe.ReadOnly(ctx)
}
return nil
})
if err != nil {
return err
}
if c.opt.OnConnect != nil {
return c.opt.OnConnect(ctx, conn)
}
return nil
}
func (c *baseClient) releaseConn(ctx context.Context, cn *pool.Conn, err error) {
if c.opt.Limiter != nil {
c.opt.Limiter.ReportResult(err)
}
if isBadConn(err, false, c.opt.Addr) {
c.connPool.Remove(ctx, cn, err)
} else {
c.connPool.Put(ctx, cn)
}
}
func (c *baseClient) withConn(
ctx context.Context, fn func(context.Context, *pool.Conn) error,
) error {
cn, err := c.getConn(ctx)
if err != nil {
return err
}
defer func() {
c.releaseConn(ctx, cn, err)
}()
done := ctx.Done() //nolint:ifshort
if done == nil {
err = fn(ctx, cn)
return err
}
errc := make(chan error, 1)
go func() { errc <- fn(ctx, cn) }()
select {
case <-done:
_ = cn.Close()
// Wait for the goroutine to finish and send something.
<-errc
err = ctx.Err()
return err
case err = <-errc:
return err
}
}
func (c *baseClient) process(ctx context.Context, cmd Cmder) error {
var lastErr error
for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ {
attempt := attempt
retry, err := c._process(ctx, cmd, attempt)
if err == nil || !retry {
return err
}
lastErr = err
}
return lastErr
}
func (c *baseClient) _process(ctx context.Context, cmd Cmder, attempt int) (bool, error) {
if attempt > 0 {
if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
return false, err
}
}
retryTimeout := uint32(1)
err := c.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
return writeCmd(wr, cmd)
})
if err != nil {
return err
}
err = cn.WithReader(ctx, c.cmdTimeout(cmd), cmd.readReply)
if err != nil {
if cmd.readTimeout() == nil {
atomic.StoreUint32(&retryTimeout, 1)
}
return err
}
return nil
})
if err == nil {
return false, nil
}
retry := shouldRetry(err, atomic.LoadUint32(&retryTimeout) == 1)
return retry, err
}
func (c *baseClient) retryBackoff(attempt int) time.Duration {
return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff)
}
func (c *baseClient) cmdTimeout(cmd Cmder) time.Duration {
if timeout := cmd.readTimeout(); timeout != nil {
t := *timeout
if t == 0 {
return 0
}
return t + 10*time.Second
}
return c.opt.ReadTimeout
}
// Close closes the client, releasing any open resources.
//
// It is rare to Close a Client, as the Client is meant to be
// long-lived and shared between many goroutines.
func (c *baseClient) Close() error {
var firstErr error
if c.onClose != nil {
if err := c.onClose(); err != nil {
firstErr = err
}
}
if err := c.connPool.Close(); err != nil && firstErr == nil {
firstErr = err
}
return firstErr
}
func (c *baseClient) getAddr() string {
return c.opt.Addr
}
func (c *baseClient) processPipeline(ctx context.Context, cmds []Cmder) error {
return c.generalProcessPipeline(ctx, cmds, c.pipelineProcessCmds)
}
func (c *baseClient) processTxPipeline(ctx context.Context, cmds []Cmder) error {
return c.generalProcessPipeline(ctx, cmds, c.txPipelineProcessCmds)
}
type pipelineProcessor func(context.Context, *pool.Conn, []Cmder) (bool, error)
func (c *baseClient) generalProcessPipeline(
ctx context.Context, cmds []Cmder, p pipelineProcessor,
) error {
err := c._generalProcessPipeline(ctx, cmds, p)
if err != nil {
setCmdsErr(cmds, err)
return err
}
return cmdsFirstErr(cmds)
}
func (c *baseClient) _generalProcessPipeline(
ctx context.Context, cmds []Cmder, p pipelineProcessor,
) error {
var lastErr error
for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ {
if attempt > 0 {
if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
return err
}
}
var canRetry bool
lastErr = c.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
var err error
canRetry, err = p(ctx, cn, cmds)
return err
})
if lastErr == nil || !canRetry || !shouldRetry(lastErr, true) {
return lastErr
}
}
return lastErr
}
func (c *baseClient) pipelineProcessCmds(
ctx context.Context, cn *pool.Conn, cmds []Cmder,
) (bool, error) {
err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
return writeCmds(wr, cmds)
})
if err != nil {
return true, err
}
err = cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error {
return pipelineReadCmds(rd, cmds)
})
return true, err
}
func pipelineReadCmds(rd *proto.Reader, cmds []Cmder) error {
for _, cmd := range cmds {
err := cmd.readReply(rd)
cmd.SetErr(err)
if err != nil && !isRedisError(err) {
return err
}
}
return nil
}
func (c *baseClient) txPipelineProcessCmds(
ctx context.Context, cn *pool.Conn, cmds []Cmder,
) (bool, error) {
err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
return writeCmds(wr, cmds)
})
if err != nil {
return true, err
}
err = cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error {
statusCmd := cmds[0].(*StatusCmd)
// Trim multi and exec.
cmds = cmds[1 : len(cmds)-1]
err := txPipelineReadQueued(rd, statusCmd, cmds)
if err != nil {
return err
}
return pipelineReadCmds(rd, cmds)
})
return false, err
}
func wrapMultiExec(ctx context.Context, cmds []Cmder) []Cmder {
if len(cmds) == 0 {
panic("not reached")
}
cmdCopy := make([]Cmder, len(cmds)+2)
cmdCopy[0] = NewStatusCmd(ctx, "multi")
copy(cmdCopy[1:], cmds)
cmdCopy[len(cmdCopy)-1] = NewSliceCmd(ctx, "exec")
return cmdCopy
}
func txPipelineReadQueued(rd *proto.Reader, statusCmd *StatusCmd, cmds []Cmder) error {
// Parse queued replies.
if err := statusCmd.readReply(rd); err != nil {
return err
}
for range cmds {
if err := statusCmd.readReply(rd); err != nil && !isRedisError(err) {
return err
}
}
// Parse number of replies.
line, err := rd.ReadLine()
if err != nil {
if err == Nil {
err = TxFailedErr
}
return err
}
switch line[0] {
case proto.ErrorReply:
return proto.ParseErrorReply(line)
case proto.ArrayReply:
// ok
default:
err := fmt.Errorf("redis: expected '*', but got line %q", line)
return err
}
return nil
}
//------------------------------------------------------------------------------
// Client is a Redis client representing a pool of zero or more
// underlying connections. It's safe for concurrent use by multiple
// goroutines.
type Client struct {
*baseClient
cmdable
hooks
ctx context.Context
}
// NewClient returns a client to the Redis Server specified by Options.
func NewClient(opt *Options) *Client {
opt.init()
c := Client{
baseClient: newBaseClient(opt, newConnPool(opt)),
ctx: context.Background(),
}
c.cmdable = c.Process
return &c
}
func (c *Client) clone() *Client {
clone := *c
clone.cmdable = clone.Process
clone.hooks.lock()
return &clone
}
func (c *Client) WithTimeout(timeout time.Duration) *Client {
clone := c.clone()
clone.baseClient = c.baseClient.withTimeout(timeout)
return clone
}
func (c *Client) Context() context.Context {
return c.ctx
}
func (c *Client) WithContext(ctx context.Context) *Client {
if ctx == nil {
panic("nil context")
}
clone := c.clone()
clone.ctx = ctx
return clone
}
func (c *Client) Conn(ctx context.Context) *Conn {
return newConn(ctx, c.opt, pool.NewStickyConnPool(c.connPool))
}
// Do creates a Cmd from the args and processes the cmd.
func (c *Client) Do(ctx context.Context, args ...interface{}) *Cmd {
cmd := NewCmd(ctx, args...)
_ = c.Process(ctx, cmd)
return cmd
}
func (c *Client) Process(ctx context.Context, cmd Cmder) error {
return c.hooks.process(ctx, cmd, c.baseClient.process)
}
func (c *Client) processPipeline(ctx context.Context, cmds []Cmder) error {
return c.hooks.processPipeline(ctx, cmds, c.baseClient.processPipeline)
}
func (c *Client) processTxPipeline(ctx context.Context, cmds []Cmder) error {
return c.hooks.processTxPipeline(ctx, cmds, c.baseClient.processTxPipeline)
}
// Options returns read-only Options that were used to create the client.
func (c *Client) Options() *Options {
return c.opt
}
type PoolStats pool.Stats
// PoolStats returns connection pool stats.
func (c *Client) PoolStats() *PoolStats {
stats := c.connPool.Stats()
return (*PoolStats)(stats)
}
func (c *Client) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
return c.Pipeline().Pipelined(ctx, fn)
}
func (c *Client) Pipeline() Pipeliner {
pipe := Pipeline{
ctx: c.ctx,
exec: c.processPipeline,
}
pipe.init()
return &pipe
}
func (c *Client) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
return c.TxPipeline().Pipelined(ctx, fn)
}
// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC.
func (c *Client) TxPipeline() Pipeliner {
pipe := Pipeline{
ctx: c.ctx,
exec: c.processTxPipeline,
}
pipe.init()
return &pipe
}
func (c *Client) pubSub() *PubSub {
pubsub := &PubSub{
opt: c.opt,
newConn: func(ctx context.Context, channels []string) (*pool.Conn, error) {
return c.newConn(ctx)
},
closeConn: c.connPool.CloseConn,
}
pubsub.init()
return pubsub
}
// Subscribe subscribes the client to the specified channels.
// Channels can be omitted to create empty subscription.
// Note that this method does not wait on a response from Redis, so the
// subscription may not be active immediately. To force the connection to wait,
// you may call the Receive() method on the returned *PubSub like so:
//
// sub := client.Subscribe(queryResp)
// iface, err := sub.Receive()
// if err != nil {
// // handle error
// }
//
// // Should be *Subscription, but others are possible if other actions have been
// // taken on sub since it was created.
// switch iface.(type) {
// case *Subscription:
// // subscribe succeeded
// case *Message:
// // received first message
// case *Pong:
// // pong received
// default:
// // handle error
// }
//
// ch := sub.Channel()
func (c *Client) Subscribe(ctx context.Context, channels ...string) *PubSub {
pubsub := c.pubSub()
if len(channels) > 0 {
_ = pubsub.Subscribe(ctx, channels...)
}
return pubsub
}
// PSubscribe subscribes the client to the given patterns.
// Patterns can be omitted to create empty subscription.
func (c *Client) PSubscribe(ctx context.Context, channels ...string) *PubSub {
pubsub := c.pubSub()
if len(channels) > 0 {
_ = pubsub.PSubscribe(ctx, channels...)
}
return pubsub
}
//------------------------------------------------------------------------------
type conn struct {
baseClient
cmdable
statefulCmdable
hooks // TODO: inherit hooks
}
// Conn represents a single Redis connection rather than a pool of connections.
// Prefer running commands from Client unless there is a specific need
// for a continuous single Redis connection.
type Conn struct {
*conn
ctx context.Context
}
func newConn(ctx context.Context, opt *Options, connPool pool.Pooler) *Conn {
c := Conn{
conn: &conn{
baseClient: baseClient{
opt: opt,
connPool: connPool,
},
},
ctx: ctx,
}
c.cmdable = c.Process
c.statefulCmdable = c.Process
return &c
}
func (c *Conn) Process(ctx context.Context, cmd Cmder) error {
return c.hooks.process(ctx, cmd, c.baseClient.process)
}
func (c *Conn) processPipeline(ctx context.Context, cmds []Cmder) error {
return c.hooks.processPipeline(ctx, cmds, c.baseClient.processPipeline)
}
func (c *Conn) processTxPipeline(ctx context.Context, cmds []Cmder) error {
return c.hooks.processTxPipeline(ctx, cmds, c.baseClient.processTxPipeline)
}
func (c *Conn) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
return c.Pipeline().Pipelined(ctx, fn)
}
func (c *Conn) Pipeline() Pipeliner {
pipe := Pipeline{
ctx: c.ctx,
exec: c.processPipeline,
}
pipe.init()
return &pipe
}
func (c *Conn) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
return c.TxPipeline().Pipelined(ctx, fn)
}
// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC.
func (c *Conn) TxPipeline() Pipeliner {
pipe := Pipeline{
ctx: c.ctx,
exec: c.processTxPipeline,
}
pipe.init()
return &pipe
}

4
vendor/github.com/redis/go-redis/v9/.gitignore generated vendored Normal file
View File

@ -0,0 +1,4 @@
*.rdb
testdata/*
.idea/
.DS_Store

124
vendor/github.com/redis/go-redis/v9/CHANGELOG.md generated vendored Normal file
View File

@ -0,0 +1,124 @@
## [9.0.5](https://github.com/redis/go-redis/compare/v9.0.4...v9.0.5) (2023-05-29)
### Features
* Add ACL LOG ([#2536](https://github.com/redis/go-redis/issues/2536)) ([31ba855](https://github.com/redis/go-redis/commit/31ba855ddebc38fbcc69a75d9d4fb769417cf602))
* add field protocol to setupClusterQueryParams ([#2600](https://github.com/redis/go-redis/issues/2600)) ([840c25c](https://github.com/redis/go-redis/commit/840c25cb6f320501886a82a5e75f47b491e46fbe))
* add protocol option ([#2598](https://github.com/redis/go-redis/issues/2598)) ([3917988](https://github.com/redis/go-redis/commit/391798880cfb915c4660f6c3ba63e0c1a459e2af))
## [9.0.4](https://github.com/redis/go-redis/compare/v9.0.3...v9.0.4) (2023-05-01)
### Bug Fixes
* reader float parser ([#2513](https://github.com/redis/go-redis/issues/2513)) ([46f2450](https://github.com/redis/go-redis/commit/46f245075e6e3a8bd8471f9ca67ea95fd675e241))
### Features
* add client info command ([#2483](https://github.com/redis/go-redis/issues/2483)) ([b8c7317](https://github.com/redis/go-redis/commit/b8c7317cc6af444603731f7017c602347c0ba61e))
* no longer verify HELLO error messages ([#2515](https://github.com/redis/go-redis/issues/2515)) ([7b4f217](https://github.com/redis/go-redis/commit/7b4f2179cb5dba3d3c6b0c6f10db52b837c912c8))
* read the structure to increase the judgment of the omitempty op… ([#2529](https://github.com/redis/go-redis/issues/2529)) ([37c057b](https://github.com/redis/go-redis/commit/37c057b8e597c5e8a0e372337f6a8ad27f6030af))
## [9.0.3](https://github.com/redis/go-redis/compare/v9.0.2...v9.0.3) (2023-04-02)
### New Features
- feat(scan): scan time.Time sets the default decoding (#2413)
- Add support for CLUSTER LINKS command (#2504)
- Add support for acl dryrun command (#2502)
- Add support for COMMAND GETKEYS & COMMAND GETKEYSANDFLAGS (#2500)
- Add support for LCS Command (#2480)
- Add support for BZMPOP (#2456)
- Adding support for ZMPOP command (#2408)
- Add support for LMPOP (#2440)
- feat: remove pool unused fields (#2438)
- Expiretime and PExpireTime (#2426)
- Implement `FUNCTION` group of commands (#2475)
- feat(zadd): add ZAddLT and ZAddGT (#2429)
- Add: Support for COMMAND LIST command (#2491)
- Add support for BLMPOP (#2442)
- feat: check pipeline.Do to prevent confusion with Exec (#2517)
- Function stats, function kill, fcall and fcall_ro (#2486)
- feat: Add support for CLUSTER SHARDS command (#2507)
- feat(cmd): support for adding byte,bit parameters to the bitpos command (#2498)
### Fixed
- fix: eval api cmd.SetFirstKeyPos (#2501)
- fix: limit the number of connections created (#2441)
- fixed #2462 v9 continue support dragonfly, it's Hello command return "NOAUTH Authentication required" error (#2479)
- Fix for internal/hscan/structmap.go:89:23: undefined: reflect.Pointer (#2458)
- fix: group lag can be null (#2448)
### Maintenance
- Updating to the latest version of redis (#2508)
- Allowing for running tests on a port other than the fixed 6380 (#2466)
- redis 7.0.8 in tests (#2450)
- docs: Update redisotel example for v9 (#2425)
- chore: update go mod, Upgrade golang.org/x/net version to 0.7.0 (#2476)
- chore: add Chinese translation (#2436)
- chore(deps): bump github.com/bsm/gomega from 1.20.0 to 1.26.0 (#2421)
- chore(deps): bump github.com/bsm/ginkgo/v2 from 2.5.0 to 2.7.0 (#2420)
- chore(deps): bump actions/setup-go from 3 to 4 (#2495)
- docs: add instructions for the HSet api (#2503)
- docs: add reading lag field comment (#2451)
- test: update go mod before testing(go mod tidy) (#2423)
- docs: fix comment typo (#2505)
- test: remove testify (#2463)
- refactor: change ListElementCmd to KeyValuesCmd. (#2443)
- fix(appendArg): appendArg case special type (#2489)
## [9.0.2](https://github.com/redis/go-redis/compare/v9.0.1...v9.0.2) (2023-02-01)
### Features
* upgrade OpenTelemetry, use the new metrics API. ([#2410](https://github.com/redis/go-redis/issues/2410)) ([e29e42c](https://github.com/redis/go-redis/commit/e29e42cde2755ab910d04185025dc43ce6f59c65))
## v9 2023-01-30
### Breaking
- Changed Pipelines to not be thread-safe any more.
### Added
- Added support for [RESP3](https://github.com/antirez/RESP3/blob/master/spec.md) protocol. It was
contributed by @monkey92t who has done the majority of work in this release.
- Added `ContextTimeoutEnabled` option that controls whether the client respects context timeouts
and deadlines. See
[Redis Timeouts](https://redis.uptrace.dev/guide/go-redis-debugging.html#timeouts) for details.
- Added `ParseClusterURL` to parse URLs into `ClusterOptions`, for example,
`redis://user:password@localhost:6789?dial_timeout=3&read_timeout=6s&addr=localhost:6790&addr=localhost:6791`.
- Added metrics instrumentation using `redisotel.IstrumentMetrics`. See
[documentation](https://redis.uptrace.dev/guide/go-redis-monitoring.html)
- Added `redis.HasErrorPrefix` to help working with errors.
### Changed
- Removed asynchronous cancellation based on the context timeout. It was racy in v8 and is
completely gone in v9.
- Reworked hook interface and added `DialHook`.
- Replaced `redisotel.NewTracingHook` with `redisotel.InstrumentTracing`. See
[example](example/otel) and
[documentation](https://redis.uptrace.dev/guide/go-redis-monitoring.html).
- Replaced `*redis.Z` with `redis.Z` since it is small enough to be passed as value without making
an allocation.
- Renamed the option `MaxConnAge` to `ConnMaxLifetime`.
- Renamed the option `IdleTimeout` to `ConnMaxIdleTime`.
- Removed connection reaper in favor of `MaxIdleConns`.
- Removed `WithContext` since `context.Context` can be passed directly as an arg.
- Removed `Pipeline.Close` since there is no real need to explicitly manage pipeline resources and
it can be safely reused via `sync.Pool` etc. `Pipeline.Discard` is still available if you want to
reset commands for some reason.
### Fixed
- Improved and fixed pipeline retries.
- As usually, added support for more commands and fixed some bugs.

View File

@ -1,4 +1,4 @@
Copyright (c) 2013 The github.com/go-redis/redis Authors.
Copyright (c) 2013 The github.com/redis/go-redis Authors.
All rights reserved.
Redistribution and use in source and binary forms, with or without

41
vendor/github.com/redis/go-redis/v9/Makefile generated vendored Normal file
View File

@ -0,0 +1,41 @@
GO_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | sort)
test: testdeps
set -e; for dir in $(GO_MOD_DIRS); do \
echo "go test in $${dir}"; \
(cd "$${dir}" && \
go mod tidy -compat=1.18 && \
go test && \
go test ./... -short -race && \
go test ./... -run=NONE -bench=. -benchmem && \
env GOOS=linux GOARCH=386 go test && \
go vet); \
done
cd internal/customvet && go build .
go vet -vettool ./internal/customvet/customvet
testdeps: testdata/redis/src/redis-server
bench: testdeps
go test ./... -test.run=NONE -test.bench=. -test.benchmem
.PHONY: all test testdeps bench
testdata/redis:
mkdir -p $@
wget -qO- https://download.redis.io/releases/redis-7.2-rc3.tar.gz | tar xvz --strip-components=1 -C $@
testdata/redis/src/redis-server: testdata/redis
cd $< && make all
fmt:
gofmt -w -s ./
goimports -w -local github.com/redis/go-redis ./
go_mod_tidy:
set -e; for dir in $(GO_MOD_DIRS); do \
echo "go mod tidy in $${dir}"; \
(cd "$${dir}" && \
go get -u ./... && \
go mod tidy -compat=1.18); \
done

224
vendor/github.com/redis/go-redis/v9/README.md generated vendored Normal file
View File

@ -0,0 +1,224 @@
# Redis client for Go
[![build workflow](https://github.com/redis/go-redis/actions/workflows/build.yml/badge.svg)](https://github.com/redis/go-redis/actions)
[![PkgGoDev](https://pkg.go.dev/badge/github.com/redis/go-redis/v9)](https://pkg.go.dev/github.com/redis/go-redis/v9?tab=doc)
[![Documentation](https://img.shields.io/badge/redis-documentation-informational)](https://redis.uptrace.dev/)
[![Chat](https://discordapp.com/api/guilds/752070105847955518/widget.png)](https://discord.gg/rWtp5Aj)
> go-redis is brought to you by :star: [**uptrace/uptrace**](https://github.com/uptrace/uptrace).
> Uptrace is an open-source APM tool that supports distributed tracing, metrics, and logs. You can
> use it to monitor applications and set up automatic alerts to receive notifications via email,
> Slack, Telegram, and others.
>
> See [OpenTelemetry](example/otel) example which demonstrates how you can use Uptrace to monitor
> go-redis.
## Documentation
- [English](https://redis.uptrace.dev)
- [简体中文](https://redis.uptrace.dev/zh/)
## Resources
- [Discussions](https://github.com/redis/go-redis/discussions)
- [Chat](https://discord.gg/rWtp5Aj)
- [Reference](https://pkg.go.dev/github.com/redis/go-redis/v9)
- [Examples](https://pkg.go.dev/github.com/redis/go-redis/v9#pkg-examples)
## Ecosystem
- [Redis Mock](https://github.com/go-redis/redismock)
- [Distributed Locks](https://github.com/bsm/redislock)
- [Redis Cache](https://github.com/go-redis/cache)
- [Rate limiting](https://github.com/go-redis/redis_rate)
This client also works with [Kvrocks](https://github.com/apache/incubator-kvrocks), a distributed
key value NoSQL database that uses RocksDB as storage engine and is compatible with Redis protocol.
## Features
- Redis 3 commands except QUIT, MONITOR, and SYNC.
- Automatic connection pooling with
- [Pub/Sub](https://redis.uptrace.dev/guide/go-redis-pubsub.html).
- [Pipelines and transactions](https://redis.uptrace.dev/guide/go-redis-pipelines.html).
- [Scripting](https://redis.uptrace.dev/guide/lua-scripting.html).
- [Redis Sentinel](https://redis.uptrace.dev/guide/go-redis-sentinel.html).
- [Redis Cluster](https://redis.uptrace.dev/guide/go-redis-cluster.html).
- [Redis Ring](https://redis.uptrace.dev/guide/ring.html).
- [Redis Performance Monitoring](https://redis.uptrace.dev/guide/redis-performance-monitoring.html).
- [Redis Probabilistic [RedisStack]](https://redis.io/docs/data-types/probabilistic/)
## Installation
go-redis supports 2 last Go versions and requires a Go version with
[modules](https://github.com/golang/go/wiki/Modules) support. So make sure to initialize a Go
module:
```shell
go mod init github.com/my/repo
```
Then install go-redis/**v9**:
```shell
go get github.com/redis/go-redis/v9
```
## Quickstart
```go
import (
"context"
"github.com/redis/go-redis/v9"
"fmt"
)
var ctx = context.Background()
func ExampleClient() {
rdb := redis.NewClient(&redis.Options{
Addr: "localhost:6379",
Password: "", // no password set
DB: 0, // use default DB
})
err := rdb.Set(ctx, "key", "value", 0).Err()
if err != nil {
panic(err)
}
val, err := rdb.Get(ctx, "key").Result()
if err != nil {
panic(err)
}
fmt.Println("key", val)
val2, err := rdb.Get(ctx, "key2").Result()
if err == redis.Nil {
fmt.Println("key2 does not exist")
} else if err != nil {
panic(err)
} else {
fmt.Println("key2", val2)
}
// Output: key value
// key2 does not exist
}
```
The above can be modified to specify the version of the RESP protocol by adding the `protocol` option to the `Options` struct:
```go
rdb := redis.NewClient(&redis.Options{
Addr: "localhost:6379",
Password: "", // no password set
DB: 0, // use default DB
Protocol: 3, // specify 2 for RESP 2 or 3 for RESP 3
})
```
### Connecting via a redis url
go-redis also supports connecting via the [redis uri specification](https://github.com/redis/redis-specifications/tree/master/uri/redis.txt). The example below demonstrates how the connection can easily be configured using a string, adhering to this specification.
```go
import (
"context"
"github.com/redis/go-redis/v9"
"fmt"
)
var ctx = context.Background()
func ExampleClient() {
url := "redis://localhost:6379?password=hello&protocol=3"
opts, err := redis.ParseURL(url)
if err != nil {
panic(err)
}
rdb := redis.NewClient(opts)
```
## Look and feel
Some corner cases:
```go
// SET key value EX 10 NX
set, err := rdb.SetNX(ctx, "key", "value", 10*time.Second).Result()
// SET key value keepttl NX
set, err := rdb.SetNX(ctx, "key", "value", redis.KeepTTL).Result()
// SORT list LIMIT 0 2 ASC
vals, err := rdb.Sort(ctx, "list", &redis.Sort{Offset: 0, Count: 2, Order: "ASC"}).Result()
// ZRANGEBYSCORE zset -inf +inf WITHSCORES LIMIT 0 2
vals, err := rdb.ZRangeByScoreWithScores(ctx, "zset", &redis.ZRangeBy{
Min: "-inf",
Max: "+inf",
Offset: 0,
Count: 2,
}).Result()
// ZINTERSTORE out 2 zset1 zset2 WEIGHTS 2 3 AGGREGATE SUM
vals, err := rdb.ZInterStore(ctx, "out", &redis.ZStore{
Keys: []string{"zset1", "zset2"},
Weights: []int64{2, 3}
}).Result()
// EVAL "return {KEYS[1],ARGV[1]}" 1 "key" "hello"
vals, err := rdb.Eval(ctx, "return {KEYS[1],ARGV[1]}", []string{"key"}, "hello").Result()
// custom command
res, err := rdb.Do(ctx, "set", "key", "value").Result()
```
## Run the test
go-redis will start a redis-server and run the test cases.
The paths of redis-server bin file and redis config file are defined in `main_test.go`:
```go
var (
redisServerBin, _ = filepath.Abs(filepath.Join("testdata", "redis", "src", "redis-server"))
redisServerConf, _ = filepath.Abs(filepath.Join("testdata", "redis", "redis.conf"))
)
```
For local testing, you can change the variables to refer to your local files, or create a soft link
to the corresponding folder for redis-server and copy the config file to `testdata/redis/`:
```shell
ln -s /usr/bin/redis-server ./go-redis/testdata/redis/src
cp ./go-redis/testdata/redis.conf ./go-redis/testdata/redis/
```
Lastly, run:
```shell
go test
```
Another option is to run your specific tests with an already running redis. The example below, tests against a redis running on port 9999.:
```shell
REDIS_PORT=9999 go test <your options>
```
## See also
- [Golang ORM](https://bun.uptrace.dev) for PostgreSQL, MySQL, MSSQL, and SQLite
- [Golang PostgreSQL](https://bun.uptrace.dev/postgres/)
- [Golang HTTP router](https://bunrouter.uptrace.dev/)
- [Golang ClickHouse ORM](https://github.com/uptrace/go-clickhouse)
## Contributors
Thanks to all the people who already contributed!
<a href="https://github.com/redis/go-redis/graphs/contributors">
<img src="https://contributors-img.web.app/image?repo=redis/go-redis" />
</a>

View File

@ -6,17 +6,19 @@ import (
"fmt"
"math"
"net"
"net/url"
"runtime"
"sort"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/go-redis/redis/v8/internal"
"github.com/go-redis/redis/v8/internal/hashtag"
"github.com/go-redis/redis/v8/internal/pool"
"github.com/go-redis/redis/v8/internal/proto"
"github.com/go-redis/redis/v8/internal/rand"
"github.com/redis/go-redis/v9/internal"
"github.com/redis/go-redis/v9/internal/hashtag"
"github.com/redis/go-redis/v9/internal/pool"
"github.com/redis/go-redis/v9/internal/proto"
"github.com/redis/go-redis/v9/internal/rand"
)
var errClusterNoNodes = fmt.Errorf("redis: cluster has no nodes")
@ -27,6 +29,9 @@ type ClusterOptions struct {
// A seed list of host:port addresses of cluster nodes.
Addrs []string
// ClientName will execute the `CLIENT SETNAME ClientName` command for each conn.
ClientName string
// NewClient creates a cluster node client with provided name and options.
NewClient func(opt *Options) *Client
@ -57,6 +62,7 @@ type ClusterOptions struct {
OnConnect func(ctx context.Context, cn *Conn) error
Protocol int
Username string
Password string
@ -64,20 +70,18 @@ type ClusterOptions struct {
MinRetryBackoff time.Duration
MaxRetryBackoff time.Duration
DialTimeout time.Duration
ReadTimeout time.Duration
WriteTimeout time.Duration
DialTimeout time.Duration
ReadTimeout time.Duration
WriteTimeout time.Duration
ContextTimeoutEnabled bool
// PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO).
PoolFIFO bool
// PoolSize applies per cluster node and not for the whole cluster.
PoolSize int
MinIdleConns int
MaxConnAge time.Duration
PoolTimeout time.Duration
IdleTimeout time.Duration
IdleCheckFrequency time.Duration
PoolFIFO bool
PoolSize int // applies per cluster node and not for the whole cluster
PoolTimeout time.Duration
MinIdleConns int
MaxIdleConns int
ConnMaxIdleTime time.Duration
ConnMaxLifetime time.Duration
TLSConfig *tls.Config
}
@ -131,13 +135,137 @@ func (opt *ClusterOptions) init() {
}
}
// ParseClusterURL parses a URL into ClusterOptions that can be used to connect to Redis.
// The URL must be in the form:
//
// redis://<user>:<password>@<host>:<port>
// or
// rediss://<user>:<password>@<host>:<port>
//
// To add additional addresses, specify the query parameter, "addr" one or more times. e.g:
//
// redis://<user>:<password>@<host>:<port>?addr=<host2>:<port2>&addr=<host3>:<port3>
// or
// rediss://<user>:<password>@<host>:<port>?addr=<host2>:<port2>&addr=<host3>:<port3>
//
// Most Option fields can be set using query parameters, with the following restrictions:
// - field names are mapped using snake-case conversion: to set MaxRetries, use max_retries
// - only scalar type fields are supported (bool, int, time.Duration)
// - for time.Duration fields, values must be a valid input for time.ParseDuration();
// additionally a plain integer as value (i.e. without unit) is intepreted as seconds
// - to disable a duration field, use value less than or equal to 0; to use the default
// value, leave the value blank or remove the parameter
// - only the last value is interpreted if a parameter is given multiple times
// - fields "network", "addr", "username" and "password" can only be set using other
// URL attributes (scheme, host, userinfo, resp.), query paremeters using these
// names will be treated as unknown parameters
// - unknown parameter names will result in an error
//
// Example:
//
// redis://user:password@localhost:6789?dial_timeout=3&read_timeout=6s&addr=localhost:6790&addr=localhost:6791
// is equivalent to:
// &ClusterOptions{
// Addr: ["localhost:6789", "localhost:6790", "localhost:6791"]
// DialTimeout: 3 * time.Second, // no time unit = seconds
// ReadTimeout: 6 * time.Second,
// }
func ParseClusterURL(redisURL string) (*ClusterOptions, error) {
o := &ClusterOptions{}
u, err := url.Parse(redisURL)
if err != nil {
return nil, err
}
// add base URL to the array of addresses
// more addresses may be added through the URL params
h, p := getHostPortWithDefaults(u)
o.Addrs = append(o.Addrs, net.JoinHostPort(h, p))
// setup username, password, and other configurations
o, err = setupClusterConn(u, h, o)
if err != nil {
return nil, err
}
return o, nil
}
// setupClusterConn gets the username and password from the URL and the query parameters.
func setupClusterConn(u *url.URL, host string, o *ClusterOptions) (*ClusterOptions, error) {
switch u.Scheme {
case "rediss":
o.TLSConfig = &tls.Config{ServerName: host}
fallthrough
case "redis":
o.Username, o.Password = getUserPassword(u)
default:
return nil, fmt.Errorf("redis: invalid URL scheme: %s", u.Scheme)
}
// retrieve the configuration from the query parameters
o, err := setupClusterQueryParams(u, o)
if err != nil {
return nil, err
}
return o, nil
}
// setupClusterQueryParams converts query parameters in u to option value in o.
func setupClusterQueryParams(u *url.URL, o *ClusterOptions) (*ClusterOptions, error) {
q := queryOptions{q: u.Query()}
o.Protocol = q.int("protocol")
o.ClientName = q.string("client_name")
o.MaxRedirects = q.int("max_redirects")
o.ReadOnly = q.bool("read_only")
o.RouteByLatency = q.bool("route_by_latency")
o.RouteRandomly = q.bool("route_randomly")
o.MaxRetries = q.int("max_retries")
o.MinRetryBackoff = q.duration("min_retry_backoff")
o.MaxRetryBackoff = q.duration("max_retry_backoff")
o.DialTimeout = q.duration("dial_timeout")
o.ReadTimeout = q.duration("read_timeout")
o.WriteTimeout = q.duration("write_timeout")
o.PoolFIFO = q.bool("pool_fifo")
o.PoolSize = q.int("pool_size")
o.MinIdleConns = q.int("min_idle_conns")
o.PoolTimeout = q.duration("pool_timeout")
o.ConnMaxLifetime = q.duration("conn_max_lifetime")
o.ConnMaxIdleTime = q.duration("conn_max_idle_time")
if q.err != nil {
return nil, q.err
}
// addr can be specified as many times as needed
addrs := q.strings("addr")
for _, addr := range addrs {
h, p, err := net.SplitHostPort(addr)
if err != nil || h == "" || p == "" {
return nil, fmt.Errorf("redis: unable to parse addr param: %s", addr)
}
o.Addrs = append(o.Addrs, net.JoinHostPort(h, p))
}
// any parameters left?
if r := q.remaining(); len(r) > 0 {
return nil, fmt.Errorf("redis: unexpected option: %s", strings.Join(r, ", "))
}
return o, nil
}
func (opt *ClusterOptions) clientOptions() *Options {
const disableIdleCheck = -1
return &Options{
Dialer: opt.Dialer,
OnConnect: opt.OnConnect,
ClientName: opt.ClientName,
Dialer: opt.Dialer,
OnConnect: opt.OnConnect,
Protocol: opt.Protocol,
Username: opt.Username,
Password: opt.Password,
@ -149,13 +277,13 @@ func (opt *ClusterOptions) clientOptions() *Options {
ReadTimeout: opt.ReadTimeout,
WriteTimeout: opt.WriteTimeout,
PoolFIFO: opt.PoolFIFO,
PoolSize: opt.PoolSize,
MinIdleConns: opt.MinIdleConns,
MaxConnAge: opt.MaxConnAge,
PoolTimeout: opt.PoolTimeout,
IdleTimeout: opt.IdleTimeout,
IdleCheckFrequency: disableIdleCheck,
PoolFIFO: opt.PoolFIFO,
PoolSize: opt.PoolSize,
PoolTimeout: opt.PoolTimeout,
MinIdleConns: opt.MinIdleConns,
MaxIdleConns: opt.MaxIdleConns,
ConnMaxIdleTime: opt.ConnMaxIdleTime,
ConnMaxLifetime: opt.ConnMaxLifetime,
TLSConfig: opt.TLSConfig,
// If ClusterSlots is populated, then we probably have an artificial
@ -204,15 +332,26 @@ func (n *clusterNode) updateLatency() {
const numProbe = 10
var dur uint64
successes := 0
for i := 0; i < numProbe; i++ {
time.Sleep(time.Duration(10+rand.Intn(10)) * time.Millisecond)
start := time.Now()
n.Client.Ping(context.TODO())
dur += uint64(time.Since(start) / time.Microsecond)
err := n.Client.Ping(context.TODO()).Err()
if err == nil {
dur += uint64(time.Since(start) / time.Microsecond)
successes++
}
}
latency := float64(dur) / float64(numProbe)
var latency float64
if successes == 0 {
// If none of the pings worked, set latency to some arbitrarily high value so this node gets
// least priority.
latency = float64((1 * time.Minute) / time.Microsecond)
} else {
latency = float64(dur) / float64(successes)
}
atomic.StoreUint32(&n.latency, uint32(latency+0.5))
}
@ -262,6 +401,7 @@ type clusterNodes struct {
nodes map[string]*clusterNode
activeAddrs []string
closed bool
onNewNode []func(rdb *Client)
_generation uint32 // atomic
}
@ -297,6 +437,12 @@ func (c *clusterNodes) Close() error {
return firstErr
}
func (c *clusterNodes) OnNewNode(fn func(rdb *Client)) {
c.mu.Lock()
c.onNewNode = append(c.onNewNode, fn)
c.mu.Unlock()
}
func (c *clusterNodes) Addrs() ([]string, error) {
var addrs []string
@ -374,6 +520,9 @@ func (c *clusterNodes) GetOrCreate(addr string) (*clusterNode, error) {
}
node = newClusterNode(c.opt, addr)
for _, fn := range c.onNewNode {
fn(node.Client)
}
c.addrs = appendIfNotExists(c.addrs, addr)
c.nodes[addr] = node
@ -683,21 +832,16 @@ func (c *clusterStateHolder) ReloadOrGet(ctx context.Context) (*clusterState, er
//------------------------------------------------------------------------------
type clusterClient struct {
opt *ClusterOptions
nodes *clusterNodes
state *clusterStateHolder //nolint:structcheck
cmdsInfoCache *cmdsInfoCache //nolint:structcheck
}
// ClusterClient is a Redis Cluster client representing a pool of zero
// or more underlying connections. It's safe for concurrent use by
// multiple goroutines.
type ClusterClient struct {
*clusterClient
opt *ClusterOptions
nodes *clusterNodes
state *clusterStateHolder
cmdsInfoCache *cmdsInfoCache
cmdable
hooks
ctx context.Context
hooksMixin
}
// NewClusterClient returns a Redis Cluster client as described in
@ -706,38 +850,24 @@ func NewClusterClient(opt *ClusterOptions) *ClusterClient {
opt.init()
c := &ClusterClient{
clusterClient: &clusterClient{
opt: opt,
nodes: newClusterNodes(opt),
},
ctx: context.Background(),
opt: opt,
nodes: newClusterNodes(opt),
}
c.state = newClusterStateHolder(c.loadState)
c.cmdsInfoCache = newCmdsInfoCache(c.cmdsInfo)
c.cmdable = c.Process
if opt.IdleCheckFrequency > 0 {
go c.reaper(opt.IdleCheckFrequency)
}
c.initHooks(hooks{
dial: nil,
process: c.process,
pipeline: c.processPipeline,
txPipeline: c.processTxPipeline,
})
return c
}
func (c *ClusterClient) Context() context.Context {
return c.ctx
}
func (c *ClusterClient) WithContext(ctx context.Context) *ClusterClient {
if ctx == nil {
panic("nil context")
}
clone := *c
clone.cmdable = clone.Process
clone.hooks.lock()
clone.ctx = ctx
return &clone
}
// Options returns read-only Options that were used to create the client.
func (c *ClusterClient) Options() *ClusterOptions {
return c.opt
@ -757,7 +887,7 @@ func (c *ClusterClient) Close() error {
return c.nodes.Close()
}
// Do creates a Cmd from the args and processes the cmd.
// Do create a Cmd from the args and processes the cmd.
func (c *ClusterClient) Do(ctx context.Context, args ...interface{}) *Cmd {
cmd := NewCmd(ctx, args...)
_ = c.Process(ctx, cmd)
@ -765,13 +895,14 @@ func (c *ClusterClient) Do(ctx context.Context, args ...interface{}) *Cmd {
}
func (c *ClusterClient) Process(ctx context.Context, cmd Cmder) error {
return c.hooks.process(ctx, cmd, c.process)
err := c.processHook(ctx, cmd)
cmd.SetErr(err)
return err
}
func (c *ClusterClient) process(ctx context.Context, cmd Cmder) error {
cmdInfo := c.cmdInfo(cmd.Name())
slot := c.cmdSlot(cmd)
cmdInfo := c.cmdInfo(ctx, cmd.Name())
slot := c.cmdSlot(ctx, cmd)
var node *clusterNode
var ask bool
var lastErr error
@ -791,12 +922,12 @@ func (c *ClusterClient) process(ctx context.Context, cmd Cmder) error {
}
if ask {
ask = false
pipe := node.Client.Pipeline()
_ = pipe.Process(ctx, NewCmd(ctx, "asking"))
_ = pipe.Process(ctx, cmd)
_, lastErr = pipe.Exec(ctx)
_ = pipe.Close()
ask = false
} else {
lastErr = node.Client.Process(ctx, cmd)
}
@ -851,6 +982,10 @@ func (c *ClusterClient) process(ctx context.Context, cmd Cmder) error {
return lastErr
}
func (c *ClusterClient) OnNewNode(fn func(rdb *Client)) {
c.nodes.OnNewNode(fn)
}
// ForEachMaster concurrently calls the fn on each master node in the cluster.
// It returns the first error if any.
func (c *ClusterClient) ForEachMaster(
@ -1056,30 +1191,9 @@ func (c *ClusterClient) loadState(ctx context.Context) (*clusterState, error) {
return nil, firstErr
}
// reaper closes idle connections to the cluster.
func (c *ClusterClient) reaper(idleCheckFrequency time.Duration) {
ticker := time.NewTicker(idleCheckFrequency)
defer ticker.Stop()
for range ticker.C {
nodes, err := c.nodes.All()
if err != nil {
break
}
for _, node := range nodes {
_, err := node.Client.connPool.(*pool.ConnPool).ReapStaleConns()
if err != nil {
internal.Logger.Printf(c.Context(), "ReapStaleConns failed: %s", err)
}
}
}
}
func (c *ClusterClient) Pipeline() Pipeliner {
pipe := Pipeline{
ctx: c.ctx,
exec: c.processPipeline,
exec: pipelineExecer(c.processPipelineHook),
}
pipe.init()
return &pipe
@ -1090,13 +1204,9 @@ func (c *ClusterClient) Pipelined(ctx context.Context, fn func(Pipeliner) error)
}
func (c *ClusterClient) processPipeline(ctx context.Context, cmds []Cmder) error {
return c.hooks.processPipeline(ctx, cmds, c._processPipeline)
}
func (c *ClusterClient) _processPipeline(ctx context.Context, cmds []Cmder) error {
cmdsMap := newCmdsMap()
err := c.mapCmdsByNode(ctx, cmdsMap, cmds)
if err != nil {
if err := c.mapCmdsByNode(ctx, cmdsMap, cmds); err != nil {
setCmdsErr(cmds, err)
return err
}
@ -1116,18 +1226,7 @@ func (c *ClusterClient) _processPipeline(ctx context.Context, cmds []Cmder) erro
wg.Add(1)
go func(node *clusterNode, cmds []Cmder) {
defer wg.Done()
err := c._processPipelineNode(ctx, node, cmds, failedCmds)
if err == nil {
return
}
if attempt < c.opt.MaxRedirects {
if err := c.mapCmdsByNode(ctx, failedCmds, cmds); err != nil {
setCmdsErr(cmds, err)
}
} else {
setCmdsErr(cmds, err)
}
c.processPipelineNode(ctx, node, cmds, failedCmds)
}(node, cmds)
}
@ -1147,9 +1246,9 @@ func (c *ClusterClient) mapCmdsByNode(ctx context.Context, cmdsMap *cmdsMap, cmd
return err
}
if c.opt.ReadOnly && c.cmdsAreReadOnly(cmds) {
if c.opt.ReadOnly && c.cmdsAreReadOnly(ctx, cmds) {
for _, cmd := range cmds {
slot := c.cmdSlot(cmd)
slot := c.cmdSlot(ctx, cmd)
node, err := c.slotReadOnlyNode(state, slot)
if err != nil {
return err
@ -1160,7 +1259,7 @@ func (c *ClusterClient) mapCmdsByNode(ctx context.Context, cmdsMap *cmdsMap, cmd
}
for _, cmd := range cmds {
slot := c.cmdSlot(cmd)
slot := c.cmdSlot(ctx, cmd)
node, err := state.slotMasterNode(slot)
if err != nil {
return err
@ -1170,9 +1269,9 @@ func (c *ClusterClient) mapCmdsByNode(ctx context.Context, cmdsMap *cmdsMap, cmd
return nil
}
func (c *ClusterClient) cmdsAreReadOnly(cmds []Cmder) bool {
func (c *ClusterClient) cmdsAreReadOnly(ctx context.Context, cmds []Cmder) bool {
for _, cmd := range cmds {
cmdInfo := c.cmdInfo(cmd.Name())
cmdInfo := c.cmdInfo(ctx, cmd.Name())
if cmdInfo == nil || !cmdInfo.ReadOnly {
return false
}
@ -1180,22 +1279,42 @@ func (c *ClusterClient) cmdsAreReadOnly(cmds []Cmder) bool {
return true
}
func (c *ClusterClient) _processPipelineNode(
func (c *ClusterClient) processPipelineNode(
ctx context.Context, node *clusterNode, cmds []Cmder, failedCmds *cmdsMap,
) error {
return node.Client.hooks.processPipeline(ctx, cmds, func(ctx context.Context, cmds []Cmder) error {
return node.Client.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
return writeCmds(wr, cmds)
})
if err != nil {
return err
}
) {
_ = node.Client.withProcessPipelineHook(ctx, cmds, func(ctx context.Context, cmds []Cmder) error {
cn, err := node.Client.getConn(ctx)
if err != nil {
_ = c.mapCmdsByNode(ctx, failedCmds, cmds)
setCmdsErr(cmds, err)
return err
}
return cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error {
return c.pipelineReadCmds(ctx, node, rd, cmds, failedCmds)
})
})
var processErr error
defer func() {
node.Client.releaseConn(ctx, cn, processErr)
}()
processErr = c.processPipelineNodeConn(ctx, node, cn, cmds, failedCmds)
return processErr
})
}
func (c *ClusterClient) processPipelineNodeConn(
ctx context.Context, node *clusterNode, cn *pool.Conn, cmds []Cmder, failedCmds *cmdsMap,
) error {
if err := cn.WithWriter(c.context(ctx), c.opt.WriteTimeout, func(wr *proto.Writer) error {
return writeCmds(wr, cmds)
}); err != nil {
if shouldRetry(err, true) {
_ = c.mapCmdsByNode(ctx, failedCmds, cmds)
}
setCmdsErr(cmds, err)
return err
}
return cn.WithReader(c.context(ctx), c.opt.ReadTimeout, func(rd *proto.Reader) error {
return c.pipelineReadCmds(ctx, node, rd, cmds, failedCmds)
})
}
@ -1206,7 +1325,7 @@ func (c *ClusterClient) pipelineReadCmds(
cmds []Cmder,
failedCmds *cmdsMap,
) error {
for _, cmd := range cmds {
for i, cmd := range cmds {
err := cmd.readReply(rd)
cmd.SetErr(err)
@ -1218,15 +1337,24 @@ func (c *ClusterClient) pipelineReadCmds(
continue
}
if c.opt.ReadOnly && isLoadingError(err) {
if c.opt.ReadOnly {
node.MarkAsFailing()
}
if !isRedisError(err) {
if shouldRetry(err, true) {
_ = c.mapCmdsByNode(ctx, failedCmds, cmds)
}
setCmdsErr(cmds[i+1:], err)
return err
}
if isRedisError(err) {
continue
}
}
if err := cmds[0].Err(); err != nil && shouldRetry(err, true) {
_ = c.mapCmdsByNode(ctx, failedCmds, cmds)
return err
}
return nil
}
@ -1260,8 +1388,10 @@ func (c *ClusterClient) checkMovedErr(
// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC.
func (c *ClusterClient) TxPipeline() Pipeliner {
pipe := Pipeline{
ctx: c.ctx,
exec: c.processTxPipeline,
exec: func(ctx context.Context, cmds []Cmder) error {
cmds = wrapMultiExec(ctx, cmds)
return c.processTxPipelineHook(ctx, cmds)
},
}
pipe.init()
return &pipe
@ -1272,10 +1402,6 @@ func (c *ClusterClient) TxPipelined(ctx context.Context, fn func(Pipeliner) erro
}
func (c *ClusterClient) processTxPipeline(ctx context.Context, cmds []Cmder) error {
return c.hooks.processTxPipeline(ctx, cmds, c._processTxPipeline)
}
func (c *ClusterClient) _processTxPipeline(ctx context.Context, cmds []Cmder) error {
// Trim multi .. exec.
cmds = cmds[1 : len(cmds)-1]
@ -1285,7 +1411,7 @@ func (c *ClusterClient) _processTxPipeline(ctx context.Context, cmds []Cmder) er
return err
}
cmdsMap := c.mapCmdsBySlot(cmds)
cmdsMap := c.mapCmdsBySlot(ctx, cmds)
for slot, cmds := range cmdsMap {
node, err := state.slotMasterNode(slot)
if err != nil {
@ -1309,19 +1435,7 @@ func (c *ClusterClient) _processTxPipeline(ctx context.Context, cmds []Cmder) er
wg.Add(1)
go func(node *clusterNode, cmds []Cmder) {
defer wg.Done()
err := c._processTxPipelineNode(ctx, node, cmds, failedCmds)
if err == nil {
return
}
if attempt < c.opt.MaxRedirects {
if err := c.mapCmdsByNode(ctx, failedCmds, cmds); err != nil {
setCmdsErr(cmds, err)
}
} else {
setCmdsErr(cmds, err)
}
c.processTxPipelineNode(ctx, node, cmds, failedCmds)
}(node, cmds)
}
@ -1336,44 +1450,69 @@ func (c *ClusterClient) _processTxPipeline(ctx context.Context, cmds []Cmder) er
return cmdsFirstErr(cmds)
}
func (c *ClusterClient) mapCmdsBySlot(cmds []Cmder) map[int][]Cmder {
func (c *ClusterClient) mapCmdsBySlot(ctx context.Context, cmds []Cmder) map[int][]Cmder {
cmdsMap := make(map[int][]Cmder)
for _, cmd := range cmds {
slot := c.cmdSlot(cmd)
slot := c.cmdSlot(ctx, cmd)
cmdsMap[slot] = append(cmdsMap[slot], cmd)
}
return cmdsMap
}
func (c *ClusterClient) _processTxPipelineNode(
func (c *ClusterClient) processTxPipelineNode(
ctx context.Context, node *clusterNode, cmds []Cmder, failedCmds *cmdsMap,
) {
cmds = wrapMultiExec(ctx, cmds)
_ = node.Client.withProcessPipelineHook(ctx, cmds, func(ctx context.Context, cmds []Cmder) error {
cn, err := node.Client.getConn(ctx)
if err != nil {
_ = c.mapCmdsByNode(ctx, failedCmds, cmds)
setCmdsErr(cmds, err)
return err
}
var processErr error
defer func() {
node.Client.releaseConn(ctx, cn, processErr)
}()
processErr = c.processTxPipelineNodeConn(ctx, node, cn, cmds, failedCmds)
return processErr
})
}
func (c *ClusterClient) processTxPipelineNodeConn(
ctx context.Context, node *clusterNode, cn *pool.Conn, cmds []Cmder, failedCmds *cmdsMap,
) error {
return node.Client.hooks.processTxPipeline(ctx, cmds, func(ctx context.Context, cmds []Cmder) error {
return node.Client.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
return writeCmds(wr, cmds)
})
if err != nil {
return err
if err := cn.WithWriter(c.context(ctx), c.opt.WriteTimeout, func(wr *proto.Writer) error {
return writeCmds(wr, cmds)
}); err != nil {
if shouldRetry(err, true) {
_ = c.mapCmdsByNode(ctx, failedCmds, cmds)
}
setCmdsErr(cmds, err)
return err
}
return cn.WithReader(c.context(ctx), c.opt.ReadTimeout, func(rd *proto.Reader) error {
statusCmd := cmds[0].(*StatusCmd)
// Trim multi and exec.
trimmedCmds := cmds[1 : len(cmds)-1]
if err := c.txPipelineReadQueued(
ctx, rd, statusCmd, trimmedCmds, failedCmds,
); err != nil {
setCmdsErr(cmds, err)
moved, ask, addr := isMovedError(err)
if moved || ask {
return c.cmdsMoved(ctx, trimmedCmds, moved, ask, addr, failedCmds)
}
return cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error {
statusCmd := cmds[0].(*StatusCmd)
// Trim multi and exec.
cmds = cmds[1 : len(cmds)-1]
return err
}
err := c.txPipelineReadQueued(ctx, rd, statusCmd, cmds, failedCmds)
if err != nil {
moved, ask, addr := isMovedError(err)
if moved || ask {
return c.cmdsMoved(ctx, cmds, moved, ask, addr, failedCmds)
}
return err
}
return pipelineReadCmds(rd, cmds)
})
})
return pipelineReadCmds(rd, trimmedCmds)
})
}
@ -1406,12 +1545,7 @@ func (c *ClusterClient) txPipelineReadQueued(
return err
}
switch line[0] {
case proto.ErrorReply:
return proto.ParseErrorReply(line)
case proto.ArrayReply:
// ok
default:
if line[0] != proto.RespArray {
return fmt.Errorf("redis: expected '*', but got line %q", line)
}
@ -1568,6 +1702,15 @@ func (c *ClusterClient) PSubscribe(ctx context.Context, channels ...string) *Pub
return pubsub
}
// SSubscribe Subscribes the client to the specified shard channels.
func (c *ClusterClient) SSubscribe(ctx context.Context, channels ...string) *PubSub {
pubsub := c.pubSub()
if len(channels) > 0 {
_ = pubsub.SSubscribe(ctx, channels...)
}
return pubsub
}
func (c *ClusterClient) retryBackoff(attempt int) time.Duration {
return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff)
}
@ -1614,26 +1757,27 @@ func (c *ClusterClient) cmdsInfo(ctx context.Context) (map[string]*CommandInfo,
return nil, firstErr
}
func (c *ClusterClient) cmdInfo(name string) *CommandInfo {
cmdsInfo, err := c.cmdsInfoCache.Get(c.ctx)
func (c *ClusterClient) cmdInfo(ctx context.Context, name string) *CommandInfo {
cmdsInfo, err := c.cmdsInfoCache.Get(ctx)
if err != nil {
internal.Logger.Printf(context.TODO(), "getting command info: %s", err)
return nil
}
info := cmdsInfo[name]
if info == nil {
internal.Logger.Printf(c.Context(), "info for cmd=%s not found", name)
internal.Logger.Printf(context.TODO(), "info for cmd=%s not found", name)
}
return info
}
func (c *ClusterClient) cmdSlot(cmd Cmder) int {
func (c *ClusterClient) cmdSlot(ctx context.Context, cmd Cmder) int {
args := cmd.Args()
if args[0] == "cluster" && args[1] == "getkeysinslot" {
return args[2].(int)
}
cmdInfo := c.cmdInfo(cmd.Name())
cmdInfo := c.cmdInfo(ctx, cmd.Name())
return cmdSlot(cmd, cmdFirstKeyPos(cmd, cmdInfo))
}
@ -1661,7 +1805,7 @@ func (c *ClusterClient) cmdNode(
return state.slotMasterNode(slot)
}
func (c *clusterClient) slotReadOnlyNode(state *clusterState, slot int) (*clusterNode, error) {
func (c *ClusterClient) slotReadOnlyNode(state *clusterState, slot int) (*clusterNode, error) {
if c.opt.RouteByLatency {
return state.slotClosestNode(slot)
}
@ -1708,6 +1852,13 @@ func (c *ClusterClient) MasterForKey(ctx context.Context, key string) (*Client,
return node.Client, err
}
func (c *ClusterClient) context(ctx context.Context) context.Context {
if c.opt.ContextTimeoutEnabled {
return ctx
}
return context.Background()
}
func appendUniqueNode(nodes []*clusterNode, node *clusterNode) []*clusterNode {
for _, n := range nodes {
if n == node {

View File

@ -8,7 +8,7 @@ import (
func (c *ClusterClient) DBSize(ctx context.Context) *IntCmd {
cmd := NewIntCmd(ctx, "dbsize")
_ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error {
_ = c.withProcessHook(ctx, cmd, func(ctx context.Context, _ Cmder) error {
var size int64
err := c.ForEachMaster(ctx, func(ctx context.Context, master *Client) error {
n, err := master.DBSize(ctx).Result()
@ -30,8 +30,8 @@ func (c *ClusterClient) DBSize(ctx context.Context) *IntCmd {
func (c *ClusterClient) ScriptLoad(ctx context.Context, script string) *StringCmd {
cmd := NewStringCmd(ctx, "script", "load", script)
_ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error {
mu := &sync.Mutex{}
_ = c.withProcessHook(ctx, cmd, func(ctx context.Context, _ Cmder) error {
var mu sync.Mutex
err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error {
val, err := shard.ScriptLoad(ctx, script).Result()
if err != nil {
@ -56,7 +56,7 @@ func (c *ClusterClient) ScriptLoad(ctx context.Context, script string) *StringCm
func (c *ClusterClient) ScriptFlush(ctx context.Context) *StatusCmd {
cmd := NewStatusCmd(ctx, "script", "flush")
_ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error {
_ = c.withProcessHook(ctx, cmd, func(ctx context.Context, _ Cmder) error {
err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error {
return shard.ScriptFlush(ctx).Err()
})
@ -82,8 +82,8 @@ func (c *ClusterClient) ScriptExists(ctx context.Context, hashes ...string) *Boo
result[i] = true
}
_ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error {
mu := &sync.Mutex{}
_ = c.withProcessHook(ctx, cmd, func(ctx context.Context, _ Cmder) error {
var mu sync.Mutex
err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error {
val, err := shard.ScriptExists(ctx, hashes...).Result()
if err != nil {

5235
vendor/github.com/redis/go-redis/v9/command.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -6,13 +6,24 @@ import (
"net"
"strings"
"github.com/go-redis/redis/v8/internal/pool"
"github.com/go-redis/redis/v8/internal/proto"
"github.com/redis/go-redis/v9/internal/pool"
"github.com/redis/go-redis/v9/internal/proto"
)
// ErrClosed performs any operation on the closed client will return this error.
var ErrClosed = pool.ErrClosed
// HasErrorPrefix checks if the err is a Redis error and the message contains a prefix.
func HasErrorPrefix(err error, prefix string) bool {
err, ok := err.(Error)
if !ok {
return false
}
msg := err.Error()
msg = strings.TrimPrefix(msg, "ERR ") // KVRocks adds such prefix
return strings.HasPrefix(msg, prefix)
}
type Error interface {
error
@ -91,7 +102,7 @@ func isBadConn(err error, allowTimeout bool, addr string) bool {
if allowTimeout {
if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
return !netErr.Temporary()
return false
}
}

View File

@ -4,6 +4,8 @@ import (
"fmt"
"strconv"
"time"
"github.com/redis/go-redis/v9/internal/util"
)
func AppendArg(b []byte, v interface{}) []byte {
@ -11,7 +13,7 @@ func AppendArg(b []byte, v interface{}) []byte {
case nil:
return append(b, "<nil>"...)
case string:
return appendUTF8String(b, Bytes(v))
return appendUTF8String(b, util.StringToBytes(v))
case []byte:
return appendUTF8String(b, v)
case int:

View File

@ -3,7 +3,7 @@ package hashtag
import (
"strings"
"github.com/go-redis/redis/v8/internal/rand"
"github.com/redis/go-redis/v9/internal/rand"
)
const slotNumber = 16384

View File

@ -10,6 +10,12 @@ import (
// decoderFunc represents decoding functions for default built-in types.
type decoderFunc func(reflect.Value, string) error
// Scanner is the interface implemented by themselves,
// which will override the decoding behavior of decoderFunc.
type Scanner interface {
ScanRedis(s string) error
}
var (
// List of built-in decoders indexed by their numeric constant values (eg: reflect.Bool = 1).
decoders = []decoderFunc{

View File

@ -1,10 +1,13 @@
package hscan
import (
"encoding"
"fmt"
"reflect"
"strings"
"sync"
"github.com/redis/go-redis/v9/internal/util"
)
// structMap contains the map of struct fields for target structs
@ -84,7 +87,32 @@ func (s StructValue) Scan(key string, value string) error {
if !ok {
return nil
}
if err := field.fn(s.value.Field(field.index), value); err != nil {
v := s.value.Field(field.index)
isPtr := v.Kind() == reflect.Ptr
if isPtr && v.IsNil() {
v.Set(reflect.New(v.Type().Elem()))
}
if !isPtr && v.Type().Name() != "" && v.CanAddr() {
v = v.Addr()
isPtr = true
}
if isPtr && v.Type().NumMethod() > 0 && v.CanInterface() {
switch scan := v.Interface().(type) {
case Scanner:
return scan.ScanRedis(value)
case encoding.TextUnmarshaler:
return scan.UnmarshalText(util.StringToBytes(value))
}
}
if isPtr {
v = v.Elem()
}
if err := field.fn(v, value); err != nil {
t := s.value.Type()
return fmt.Errorf("cannot scan redis.result %s into struct field %s.%s of type %s, error-%s",
value, t.Name(), t.Field(field.index).Name, t.Field(field.index).Type, err.Error())

View File

@ -3,7 +3,7 @@ package internal
import (
"time"
"github.com/go-redis/redis/v8/internal/rand"
"github.com/redis/go-redis/v9/internal/rand"
)
func RetryBackoff(retry int, minBackoff, maxBackoff time.Duration) time.Duration {

View File

@ -32,7 +32,9 @@ type Once struct {
// Do calls the function f if and only if Do has not been invoked
// without error for this instance of Once. In other words, given
// var once Once
//
// var once Once
//
// if once.Do(f) is called multiple times, only the first call will
// invoke f, even if f has a different value in each invocation unless
// f returns an error. A new instance of Once is required for each
@ -41,7 +43,8 @@ type Once struct {
// Do is intended for initialization that must be run exactly once. Since f
// is niladic, it may be necessary to use a function literal to capture the
// arguments to a function to be invoked by Do:
// err := config.once.Do(func() error { return config.init(filename) })
//
// err := config.once.Do(func() error { return config.init(filename) })
func (o *Once) Do(f func() error) error {
if atomic.LoadUint32(&o.done) == 1 {
return nil

View File

@ -7,7 +7,7 @@ import (
"sync/atomic"
"time"
"github.com/go-redis/redis/v8/internal/proto"
"github.com/redis/go-redis/v9/internal/proto"
)
var noDeadline = time.Time{}
@ -63,9 +63,13 @@ func (cn *Conn) RemoteAddr() net.Addr {
return nil
}
func (cn *Conn) WithReader(ctx context.Context, timeout time.Duration, fn func(rd *proto.Reader) error) error {
if err := cn.netConn.SetReadDeadline(cn.deadline(ctx, timeout)); err != nil {
return err
func (cn *Conn) WithReader(
ctx context.Context, timeout time.Duration, fn func(rd *proto.Reader) error,
) error {
if timeout >= 0 {
if err := cn.netConn.SetReadDeadline(cn.deadline(ctx, timeout)); err != nil {
return err
}
}
return fn(cn.rd)
}
@ -73,8 +77,10 @@ func (cn *Conn) WithReader(ctx context.Context, timeout time.Duration, fn func(r
func (cn *Conn) WithWriter(
ctx context.Context, timeout time.Duration, fn func(wr *proto.Writer) error,
) error {
if err := cn.netConn.SetWriteDeadline(cn.deadline(ctx, timeout)); err != nil {
return err
if timeout >= 0 {
if err := cn.netConn.SetWriteDeadline(cn.deadline(ctx, timeout)); err != nil {
return err
}
}
if cn.bw.Buffered() > 0 {

View File

@ -0,0 +1,50 @@
//go:build linux || darwin || dragonfly || freebsd || netbsd || openbsd || solaris || illumos
// +build linux darwin dragonfly freebsd netbsd openbsd solaris illumos
package pool
import (
"errors"
"io"
"net"
"syscall"
"time"
)
var errUnexpectedRead = errors.New("unexpected read from socket")
func connCheck(conn net.Conn) error {
// Reset previous timeout.
_ = conn.SetDeadline(time.Time{})
sysConn, ok := conn.(syscall.Conn)
if !ok {
return nil
}
rawConn, err := sysConn.SyscallConn()
if err != nil {
return err
}
var sysErr error
if err := rawConn.Read(func(fd uintptr) bool {
var buf [1]byte
n, err := syscall.Read(int(fd), buf[:])
switch {
case n == 0 && err == nil:
sysErr = io.EOF
case n > 0:
sysErr = errUnexpectedRead
case err == syscall.EAGAIN || err == syscall.EWOULDBLOCK:
sysErr = nil
default:
sysErr = err
}
return true
}); err != nil {
return err
}
return sysErr
}

View File

@ -0,0 +1,10 @@
//go:build !linux && !darwin && !dragonfly && !freebsd && !netbsd && !openbsd && !solaris && !illumos
// +build !linux,!darwin,!dragonfly,!freebsd,!netbsd,!openbsd,!solaris,!illumos
package pool
import "net"
func connCheck(conn net.Conn) error {
return nil
}

View File

@ -8,7 +8,7 @@ import (
"sync/atomic"
"time"
"github.com/go-redis/redis/v8/internal"
"github.com/redis/go-redis/v9/internal"
)
var (
@ -54,16 +54,15 @@ type Pooler interface {
}
type Options struct {
Dialer func(context.Context) (net.Conn, error)
OnClose func(*Conn) error
Dialer func(context.Context) (net.Conn, error)
PoolFIFO bool
PoolSize int
MinIdleConns int
MaxConnAge time.Duration
PoolTimeout time.Duration
IdleTimeout time.Duration
IdleCheckFrequency time.Duration
PoolFIFO bool
PoolSize int
PoolTimeout time.Duration
MinIdleConns int
MaxIdleConns int
ConnMaxIdleTime time.Duration
ConnMaxLifetime time.Duration
}
type lastDialErrorWrap struct {
@ -71,66 +70,67 @@ type lastDialErrorWrap struct {
}
type ConnPool struct {
opt *Options
cfg *Options
dialErrorsNum uint32 // atomic
lastDialError atomic.Value
queue chan struct{}
connsMu sync.Mutex
conns []*Conn
idleConns []*Conn
connsMu sync.Mutex
conns []*Conn
idleConns []*Conn
poolSize int
idleConnsLen int
stats Stats
_closed uint32 // atomic
closedCh chan struct{}
_closed uint32 // atomic
}
var _ Pooler = (*ConnPool)(nil)
func NewConnPool(opt *Options) *ConnPool {
p := &ConnPool{
opt: opt,
cfg: opt,
queue: make(chan struct{}, opt.PoolSize),
conns: make([]*Conn, 0, opt.PoolSize),
idleConns: make([]*Conn, 0, opt.PoolSize),
closedCh: make(chan struct{}),
}
p.connsMu.Lock()
p.checkMinIdleConns()
p.connsMu.Unlock()
if opt.IdleTimeout > 0 && opt.IdleCheckFrequency > 0 {
go p.reaper(opt.IdleCheckFrequency)
}
return p
}
func (p *ConnPool) checkMinIdleConns() {
if p.opt.MinIdleConns == 0 {
if p.cfg.MinIdleConns == 0 {
return
}
for p.poolSize < p.opt.PoolSize && p.idleConnsLen < p.opt.MinIdleConns {
p.poolSize++
p.idleConnsLen++
for p.poolSize < p.cfg.PoolSize && p.idleConnsLen < p.cfg.MinIdleConns {
select {
case p.queue <- struct{}{}:
p.poolSize++
p.idleConnsLen++
go func() {
err := p.addIdleConn()
if err != nil && err != ErrClosed {
p.connsMu.Lock()
p.poolSize--
p.idleConnsLen--
p.connsMu.Unlock()
}
}()
go func() {
err := p.addIdleConn()
if err != nil && err != ErrClosed {
p.connsMu.Lock()
p.poolSize--
p.idleConnsLen--
p.connsMu.Unlock()
}
p.freeTurn()
}()
default:
return
}
}
}
@ -176,7 +176,7 @@ func (p *ConnPool) newConn(ctx context.Context, pooled bool) (*Conn, error) {
p.conns = append(p.conns, cn)
if pooled {
// If pool is full remove the cn on next Put.
if p.poolSize >= p.opt.PoolSize {
if p.poolSize >= p.cfg.PoolSize {
cn.pooled = false
} else {
p.poolSize++
@ -191,14 +191,14 @@ func (p *ConnPool) dialConn(ctx context.Context, pooled bool) (*Conn, error) {
return nil, ErrClosed
}
if atomic.LoadUint32(&p.dialErrorsNum) >= uint32(p.opt.PoolSize) {
if atomic.LoadUint32(&p.dialErrorsNum) >= uint32(p.cfg.PoolSize) {
return nil, p.getLastDialError()
}
netConn, err := p.opt.Dialer(ctx)
netConn, err := p.cfg.Dialer(ctx)
if err != nil {
p.setLastDialError(err)
if atomic.AddUint32(&p.dialErrorsNum, 1) == uint32(p.opt.PoolSize) {
if atomic.AddUint32(&p.dialErrorsNum, 1) == uint32(p.cfg.PoolSize) {
go p.tryDial()
}
return nil, err
@ -215,7 +215,7 @@ func (p *ConnPool) tryDial() {
return
}
conn, err := p.opt.Dialer(context.Background())
conn, err := p.cfg.Dialer(context.Background())
if err != nil {
p.setLastDialError(err)
time.Sleep(time.Second)
@ -263,7 +263,7 @@ func (p *ConnPool) Get(ctx context.Context) (*Conn, error) {
break
}
if p.isStaleConn(cn) {
if !p.isHealthyConn(cn) {
_ = p.CloseConn(cn)
continue
}
@ -283,10 +283,6 @@ func (p *ConnPool) Get(ctx context.Context) (*Conn, error) {
return newcn, nil
}
func (p *ConnPool) getTurn() {
p.queue <- struct{}{}
}
func (p *ConnPool) waitTurn(ctx context.Context) error {
select {
case <-ctx.Done():
@ -301,7 +297,7 @@ func (p *ConnPool) waitTurn(ctx context.Context) error {
}
timer := timers.Get().(*time.Timer)
timer.Reset(p.opt.PoolTimeout)
timer.Reset(p.cfg.PoolTimeout)
select {
case <-ctx.Done():
@ -337,7 +333,7 @@ func (p *ConnPool) popIdle() (*Conn, error) {
}
var cn *Conn
if p.opt.PoolFIFO {
if p.cfg.PoolFIFO {
cn = p.idleConns[0]
copy(p.idleConns, p.idleConns[1:])
p.idleConns = p.idleConns[:n-1]
@ -363,14 +359,28 @@ func (p *ConnPool) Put(ctx context.Context, cn *Conn) {
return
}
var shouldCloseConn bool
p.connsMu.Lock()
p.idleConns = append(p.idleConns, cn)
p.idleConnsLen++
if p.cfg.MaxIdleConns == 0 || p.idleConnsLen < p.cfg.MaxIdleConns {
p.idleConns = append(p.idleConns, cn)
p.idleConnsLen++
} else {
p.removeConn(cn)
shouldCloseConn = true
}
p.connsMu.Unlock()
p.freeTurn()
if shouldCloseConn {
_ = p.closeConn(cn)
}
}
func (p *ConnPool) Remove(ctx context.Context, cn *Conn, reason error) {
func (p *ConnPool) Remove(_ context.Context, cn *Conn, reason error) {
p.removeConnWithLock(cn)
p.freeTurn()
_ = p.closeConn(cn)
@ -383,8 +393,8 @@ func (p *ConnPool) CloseConn(cn *Conn) error {
func (p *ConnPool) removeConnWithLock(cn *Conn) {
p.connsMu.Lock()
defer p.connsMu.Unlock()
p.removeConn(cn)
p.connsMu.Unlock()
}
func (p *ConnPool) removeConn(cn *Conn) {
@ -395,15 +405,13 @@ func (p *ConnPool) removeConn(cn *Conn) {
p.poolSize--
p.checkMinIdleConns()
}
return
break
}
}
atomic.AddUint32(&p.stats.StaleConns, 1)
}
func (p *ConnPool) closeConn(cn *Conn) error {
if p.opt.OnClose != nil {
_ = p.opt.OnClose(cn)
}
return cn.Close()
}
@ -424,14 +432,13 @@ func (p *ConnPool) IdleLen() int {
}
func (p *ConnPool) Stats() *Stats {
idleLen := p.IdleLen()
return &Stats{
Hits: atomic.LoadUint32(&p.stats.Hits),
Misses: atomic.LoadUint32(&p.stats.Misses),
Timeouts: atomic.LoadUint32(&p.stats.Timeouts),
TotalConns: uint32(p.Len()),
IdleConns: uint32(idleLen),
IdleConns: uint32(p.IdleLen()),
StaleConns: atomic.LoadUint32(&p.stats.StaleConns),
}
}
@ -459,7 +466,6 @@ func (p *ConnPool) Close() error {
if !atomic.CompareAndSwapUint32(&p._closed, 0, 1) {
return ErrClosed
}
close(p.closedCh)
var firstErr error
p.connsMu.Lock()
@ -477,81 +483,20 @@ func (p *ConnPool) Close() error {
return firstErr
}
func (p *ConnPool) reaper(frequency time.Duration) {
ticker := time.NewTicker(frequency)
defer ticker.Stop()
func (p *ConnPool) isHealthyConn(cn *Conn) bool {
now := time.Now()
for {
select {
case <-ticker.C:
// It is possible that ticker and closedCh arrive together,
// and select pseudo-randomly pick ticker case, we double
// check here to prevent being executed after closed.
if p.closed() {
return
}
_, err := p.ReapStaleConns()
if err != nil {
internal.Logger.Printf(context.Background(), "ReapStaleConns failed: %s", err)
continue
}
case <-p.closedCh:
return
}
if p.cfg.ConnMaxLifetime > 0 && now.Sub(cn.createdAt) >= p.cfg.ConnMaxLifetime {
return false
}
}
func (p *ConnPool) ReapStaleConns() (int, error) {
var n int
for {
p.getTurn()
p.connsMu.Lock()
cn := p.reapStaleConn()
p.connsMu.Unlock()
p.freeTurn()
if cn != nil {
_ = p.closeConn(cn)
n++
} else {
break
}
}
atomic.AddUint32(&p.stats.StaleConns, uint32(n))
return n, nil
}
func (p *ConnPool) reapStaleConn() *Conn {
if len(p.idleConns) == 0 {
return nil
}
cn := p.idleConns[0]
if !p.isStaleConn(cn) {
return nil
}
p.idleConns = append(p.idleConns[:0], p.idleConns[1:]...)
p.idleConnsLen--
p.removeConn(cn)
return cn
}
func (p *ConnPool) isStaleConn(cn *Conn) bool {
if p.opt.IdleTimeout == 0 && p.opt.MaxConnAge == 0 {
if p.cfg.ConnMaxIdleTime > 0 && now.Sub(cn.UsedAt()) >= p.cfg.ConnMaxIdleTime {
return false
}
now := time.Now()
if p.opt.IdleTimeout > 0 && now.Sub(cn.UsedAt()) >= p.opt.IdleTimeout {
return true
}
if p.opt.MaxConnAge > 0 && now.Sub(cn.createdAt) >= p.opt.MaxConnAge {
return true
if connCheck(cn.netConn) != nil {
return false
}
return false
cn.SetUsedAt(now)
return true
}

View File

@ -0,0 +1,552 @@
package proto
import (
"bufio"
"errors"
"fmt"
"io"
"math"
"math/big"
"strconv"
"github.com/redis/go-redis/v9/internal/util"
)
// redis resp protocol data type.
const (
RespStatus = '+' // +<string>\r\n
RespError = '-' // -<string>\r\n
RespString = '$' // $<length>\r\n<bytes>\r\n
RespInt = ':' // :<number>\r\n
RespNil = '_' // _\r\n
RespFloat = ',' // ,<floating-point-number>\r\n (golang float)
RespBool = '#' // true: #t\r\n false: #f\r\n
RespBlobError = '!' // !<length>\r\n<bytes>\r\n
RespVerbatim = '=' // =<length>\r\nFORMAT:<bytes>\r\n
RespBigInt = '(' // (<big number>\r\n
RespArray = '*' // *<len>\r\n... (same as resp2)
RespMap = '%' // %<len>\r\n(key)\r\n(value)\r\n... (golang map)
RespSet = '~' // ~<len>\r\n... (same as Array)
RespAttr = '|' // |<len>\r\n(key)\r\n(value)\r\n... + command reply
RespPush = '>' // ><len>\r\n... (same as Array)
)
// Not used temporarily.
// Redis has not used these two data types for the time being, and will implement them later.
// Streamed = "EOF:"
// StreamedAggregated = '?'
//------------------------------------------------------------------------------
const Nil = RedisError("redis: nil") // nolint:errname
type RedisError string
func (e RedisError) Error() string { return string(e) }
func (RedisError) RedisError() {}
func ParseErrorReply(line []byte) error {
return RedisError(line[1:])
}
//------------------------------------------------------------------------------
type Reader struct {
rd *bufio.Reader
}
func NewReader(rd io.Reader) *Reader {
return &Reader{
rd: bufio.NewReader(rd),
}
}
func (r *Reader) Buffered() int {
return r.rd.Buffered()
}
func (r *Reader) Peek(n int) ([]byte, error) {
return r.rd.Peek(n)
}
func (r *Reader) Reset(rd io.Reader) {
r.rd.Reset(rd)
}
// PeekReplyType returns the data type of the next response without advancing the Reader,
// and discard the attribute type.
func (r *Reader) PeekReplyType() (byte, error) {
b, err := r.rd.Peek(1)
if err != nil {
return 0, err
}
if b[0] == RespAttr {
if err = r.DiscardNext(); err != nil {
return 0, err
}
return r.PeekReplyType()
}
return b[0], nil
}
// ReadLine Return a valid reply, it will check the protocol or redis error,
// and discard the attribute type.
func (r *Reader) ReadLine() ([]byte, error) {
line, err := r.readLine()
if err != nil {
return nil, err
}
switch line[0] {
case RespError:
return nil, ParseErrorReply(line)
case RespNil:
return nil, Nil
case RespBlobError:
var blobErr string
blobErr, err = r.readStringReply(line)
if err == nil {
err = RedisError(blobErr)
}
return nil, err
case RespAttr:
if err = r.Discard(line); err != nil {
return nil, err
}
return r.ReadLine()
}
// Compatible with RESP2
if IsNilReply(line) {
return nil, Nil
}
return line, nil
}
// readLine returns an error if:
// - there is a pending read error;
// - or line does not end with \r\n.
func (r *Reader) readLine() ([]byte, error) {
b, err := r.rd.ReadSlice('\n')
if err != nil {
if err != bufio.ErrBufferFull {
return nil, err
}
full := make([]byte, len(b))
copy(full, b)
b, err = r.rd.ReadBytes('\n')
if err != nil {
return nil, err
}
full = append(full, b...) //nolint:makezero
b = full
}
if len(b) <= 2 || b[len(b)-1] != '\n' || b[len(b)-2] != '\r' {
return nil, fmt.Errorf("redis: invalid reply: %q", b)
}
return b[:len(b)-2], nil
}
func (r *Reader) ReadReply() (interface{}, error) {
line, err := r.ReadLine()
if err != nil {
return nil, err
}
switch line[0] {
case RespStatus:
return string(line[1:]), nil
case RespInt:
return util.ParseInt(line[1:], 10, 64)
case RespFloat:
return r.readFloat(line)
case RespBool:
return r.readBool(line)
case RespBigInt:
return r.readBigInt(line)
case RespString:
return r.readStringReply(line)
case RespVerbatim:
return r.readVerb(line)
case RespArray, RespSet, RespPush:
return r.readSlice(line)
case RespMap:
return r.readMap(line)
}
return nil, fmt.Errorf("redis: can't parse %.100q", line)
}
func (r *Reader) readFloat(line []byte) (float64, error) {
v := string(line[1:])
switch string(line[1:]) {
case "inf":
return math.Inf(1), nil
case "-inf":
return math.Inf(-1), nil
case "nan", "-nan":
return math.NaN(), nil
}
return strconv.ParseFloat(v, 64)
}
func (r *Reader) readBool(line []byte) (bool, error) {
switch string(line[1:]) {
case "t":
return true, nil
case "f":
return false, nil
}
return false, fmt.Errorf("redis: can't parse bool reply: %q", line)
}
func (r *Reader) readBigInt(line []byte) (*big.Int, error) {
i := new(big.Int)
if i, ok := i.SetString(string(line[1:]), 10); ok {
return i, nil
}
return nil, fmt.Errorf("redis: can't parse bigInt reply: %q", line)
}
func (r *Reader) readStringReply(line []byte) (string, error) {
n, err := replyLen(line)
if err != nil {
return "", err
}
b := make([]byte, n+2)
_, err = io.ReadFull(r.rd, b)
if err != nil {
return "", err
}
return util.BytesToString(b[:n]), nil
}
func (r *Reader) readVerb(line []byte) (string, error) {
s, err := r.readStringReply(line)
if err != nil {
return "", err
}
if len(s) < 4 || s[3] != ':' {
return "", fmt.Errorf("redis: can't parse verbatim string reply: %q", line)
}
return s[4:], nil
}
func (r *Reader) readSlice(line []byte) ([]interface{}, error) {
n, err := replyLen(line)
if err != nil {
return nil, err
}
val := make([]interface{}, n)
for i := 0; i < len(val); i++ {
v, err := r.ReadReply()
if err != nil {
if err == Nil {
val[i] = nil
continue
}
if err, ok := err.(RedisError); ok {
val[i] = err
continue
}
return nil, err
}
val[i] = v
}
return val, nil
}
func (r *Reader) readMap(line []byte) (map[interface{}]interface{}, error) {
n, err := replyLen(line)
if err != nil {
return nil, err
}
m := make(map[interface{}]interface{}, n)
for i := 0; i < n; i++ {
k, err := r.ReadReply()
if err != nil {
return nil, err
}
v, err := r.ReadReply()
if err != nil {
if err == Nil {
m[k] = nil
continue
}
if err, ok := err.(RedisError); ok {
m[k] = err
continue
}
return nil, err
}
m[k] = v
}
return m, nil
}
// -------------------------------
func (r *Reader) ReadInt() (int64, error) {
line, err := r.ReadLine()
if err != nil {
return 0, err
}
switch line[0] {
case RespInt, RespStatus:
return util.ParseInt(line[1:], 10, 64)
case RespString:
s, err := r.readStringReply(line)
if err != nil {
return 0, err
}
return util.ParseInt([]byte(s), 10, 64)
case RespBigInt:
b, err := r.readBigInt(line)
if err != nil {
return 0, err
}
if !b.IsInt64() {
return 0, fmt.Errorf("bigInt(%s) value out of range", b.String())
}
return b.Int64(), nil
}
return 0, fmt.Errorf("redis: can't parse int reply: %.100q", line)
}
func (r *Reader) ReadUint() (uint64, error) {
line, err := r.ReadLine()
if err != nil {
return 0, err
}
switch line[0] {
case RespInt, RespStatus:
return util.ParseUint(line[1:], 10, 64)
case RespString:
s, err := r.readStringReply(line)
if err != nil {
return 0, err
}
return util.ParseUint([]byte(s), 10, 64)
case RespBigInt:
b, err := r.readBigInt(line)
if err != nil {
return 0, err
}
if !b.IsUint64() {
return 0, fmt.Errorf("bigInt(%s) value out of range", b.String())
}
return b.Uint64(), nil
}
return 0, fmt.Errorf("redis: can't parse uint reply: %.100q", line)
}
func (r *Reader) ReadFloat() (float64, error) {
line, err := r.ReadLine()
if err != nil {
return 0, err
}
switch line[0] {
case RespFloat:
return r.readFloat(line)
case RespStatus:
return strconv.ParseFloat(string(line[1:]), 64)
case RespString:
s, err := r.readStringReply(line)
if err != nil {
return 0, err
}
return strconv.ParseFloat(s, 64)
}
return 0, fmt.Errorf("redis: can't parse float reply: %.100q", line)
}
func (r *Reader) ReadString() (string, error) {
line, err := r.ReadLine()
if err != nil {
return "", err
}
switch line[0] {
case RespStatus, RespInt, RespFloat:
return string(line[1:]), nil
case RespString:
return r.readStringReply(line)
case RespBool:
b, err := r.readBool(line)
return strconv.FormatBool(b), err
case RespVerbatim:
return r.readVerb(line)
case RespBigInt:
b, err := r.readBigInt(line)
if err != nil {
return "", err
}
return b.String(), nil
}
return "", fmt.Errorf("redis: can't parse reply=%.100q reading string", line)
}
func (r *Reader) ReadBool() (bool, error) {
s, err := r.ReadString()
if err != nil {
return false, err
}
return s == "OK" || s == "1" || s == "true", nil
}
func (r *Reader) ReadSlice() ([]interface{}, error) {
line, err := r.ReadLine()
if err != nil {
return nil, err
}
return r.readSlice(line)
}
// ReadFixedArrayLen read fixed array length.
func (r *Reader) ReadFixedArrayLen(fixedLen int) error {
n, err := r.ReadArrayLen()
if err != nil {
return err
}
if n != fixedLen {
return fmt.Errorf("redis: got %d elements in the array, wanted %d", n, fixedLen)
}
return nil
}
// ReadArrayLen Read and return the length of the array.
func (r *Reader) ReadArrayLen() (int, error) {
line, err := r.ReadLine()
if err != nil {
return 0, err
}
switch line[0] {
case RespArray, RespSet, RespPush:
return replyLen(line)
default:
return 0, fmt.Errorf("redis: can't parse array/set/push reply: %.100q", line)
}
}
// ReadFixedMapLen reads fixed map length.
func (r *Reader) ReadFixedMapLen(fixedLen int) error {
n, err := r.ReadMapLen()
if err != nil {
return err
}
if n != fixedLen {
return fmt.Errorf("redis: got %d elements in the map, wanted %d", n, fixedLen)
}
return nil
}
// ReadMapLen reads the length of the map type.
// If responding to the array type (RespArray/RespSet/RespPush),
// it must be a multiple of 2 and return n/2.
// Other types will return an error.
func (r *Reader) ReadMapLen() (int, error) {
line, err := r.ReadLine()
if err != nil {
return 0, err
}
switch line[0] {
case RespMap:
return replyLen(line)
case RespArray, RespSet, RespPush:
// Some commands and RESP2 protocol may respond to array types.
n, err := replyLen(line)
if err != nil {
return 0, err
}
if n%2 != 0 {
return 0, fmt.Errorf("redis: the length of the array must be a multiple of 2, got: %d", n)
}
return n / 2, nil
default:
return 0, fmt.Errorf("redis: can't parse map reply: %.100q", line)
}
}
// DiscardNext read and discard the data represented by the next line.
func (r *Reader) DiscardNext() error {
line, err := r.readLine()
if err != nil {
return err
}
return r.Discard(line)
}
// Discard the data represented by line.
func (r *Reader) Discard(line []byte) (err error) {
if len(line) == 0 {
return errors.New("redis: invalid line")
}
switch line[0] {
case RespStatus, RespError, RespInt, RespNil, RespFloat, RespBool, RespBigInt:
return nil
}
n, err := replyLen(line)
if err != nil && err != Nil {
return err
}
switch line[0] {
case RespBlobError, RespString, RespVerbatim:
// +\r\n
_, err = r.rd.Discard(n + 2)
return err
case RespArray, RespSet, RespPush:
for i := 0; i < n; i++ {
if err = r.DiscardNext(); err != nil {
return err
}
}
return nil
case RespMap, RespAttr:
// Read key & value.
for i := 0; i < n*2; i++ {
if err = r.DiscardNext(); err != nil {
return err
}
}
return nil
}
return fmt.Errorf("redis: can't parse %.100q", line)
}
func replyLen(line []byte) (n int, err error) {
n, err = util.Atoi(line[1:])
if err != nil {
return 0, err
}
if n < -1 {
return 0, fmt.Errorf("redis: invalid reply: %q", line)
}
switch line[0] {
case RespString, RespVerbatim, RespBlobError,
RespArray, RespSet, RespPush, RespMap, RespAttr:
if n == -1 {
return 0, Nil
}
}
return n, nil
}
// IsNilReply detects redis.Nil of RESP2.
func IsNilReply(line []byte) bool {
return len(line) == 3 &&
(line[0] == RespString || line[0] == RespArray) &&
line[1] == '-' && line[2] == '1'
}

View File

@ -3,13 +3,15 @@ package proto
import (
"encoding"
"fmt"
"net"
"reflect"
"time"
"github.com/go-redis/redis/v8/internal/util"
"github.com/redis/go-redis/v9/internal/util"
)
// Scan parses bytes `b` to `v` with appropriate type.
//
//nolint:gocyclo
func Scan(b []byte, v interface{}) error {
switch v := v.(type) {
@ -115,6 +117,9 @@ func Scan(b []byte, v interface{}) error {
return nil
case encoding.BinaryUnmarshaler:
return v.UnmarshalBinary(b)
case *net.IP:
*v = b
return nil
default:
return fmt.Errorf(
"redis: can't unmarshal %T (consider implementing BinaryUnmarshaler)", v)

View File

@ -4,16 +4,17 @@ import (
"encoding"
"fmt"
"io"
"net"
"strconv"
"time"
"github.com/go-redis/redis/v8/internal/util"
"github.com/redis/go-redis/v9/internal/util"
)
type writer interface {
io.Writer
io.ByteWriter
// io.StringWriter
// WriteString implement io.StringWriter.
WriteString(s string) (n int, err error)
}
@ -34,7 +35,7 @@ func NewWriter(wr writer) *Writer {
}
func (w *Writer) WriteArgs(args []interface{}) error {
if err := w.WriteByte(ArrayReply); err != nil {
if err := w.WriteByte(RespArray); err != nil {
return err
}
@ -106,6 +107,8 @@ func (w *Writer) WriteArg(v interface{}) error {
return err
}
return w.bytes(b)
case net.IP:
return w.bytes(v)
default:
return fmt.Errorf(
"redis: can't marshal %T (implement encoding.BinaryMarshaler)", v)
@ -113,7 +116,7 @@ func (w *Writer) WriteArg(v interface{}) error {
}
func (w *Writer) bytes(b []byte) error {
if err := w.WriteByte(StringReply); err != nil {
if err := w.WriteByte(RespString); err != nil {
return err
}

View File

@ -4,7 +4,7 @@ import (
"context"
"time"
"github.com/go-redis/redis/v8/internal/util"
"github.com/redis/go-redis/v9/internal/util"
)
func Sleep(ctx context.Context, dur time.Duration) error {

View File

@ -2,30 +2,21 @@ package redis
import (
"context"
"sync"
)
// ScanIterator is used to incrementally iterate over a collection of elements.
// It's safe for concurrent use by multiple goroutines.
type ScanIterator struct {
mu sync.Mutex // protects Scanner and pos
cmd *ScanCmd
pos int
}
// Err returns the last iterator error, if any.
func (it *ScanIterator) Err() error {
it.mu.Lock()
err := it.cmd.Err()
it.mu.Unlock()
return err
return it.cmd.Err()
}
// Next advances the cursor and returns true if more values can be read.
func (it *ScanIterator) Next(ctx context.Context) bool {
it.mu.Lock()
defer it.mu.Unlock()
// Instantly return on errors.
if it.cmd.Err() != nil {
return false
@ -68,10 +59,8 @@ func (it *ScanIterator) Next(ctx context.Context) bool {
// Val returns the key/field at the current cursor position.
func (it *ScanIterator) Val() string {
var v string
it.mu.Lock()
if it.cmd.Err() == nil && it.pos > 0 && it.pos <= len(it.cmd.page) {
v = it.cmd.page[it.pos-1]
}
it.mu.Unlock()
return v
}

View File

@ -13,7 +13,7 @@ import (
"strings"
"time"
"github.com/go-redis/redis/v8/internal/pool"
"github.com/redis/go-redis/v9/internal/pool"
)
// Limiter is the interface of a rate limiter or a circuit breaker.
@ -27,7 +27,7 @@ type Limiter interface {
ReportResult(result error)
}
// Options keeps the settings to setup redis connection.
// Options keeps the settings to set up redis connection.
type Options struct {
// The network type, either tcp or unix.
// Default is tcp.
@ -35,6 +35,9 @@ type Options struct {
// host:port address.
Addr string
// ClientName will execute the `CLIENT SETNAME ClientName` command for each conn.
ClientName string
// Dialer creates new network connection and has priority over
// Network and Addr options.
Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
@ -42,6 +45,9 @@ type Options struct {
// Hook that is called when new connection is established.
OnConnect func(ctx context.Context, cn *Conn) error
// Protocol 2 or 3. Use the version to negotiate RESP version with redis-server.
// Default is 3.
Protocol int
// Use the specified Username to authenticate the current connection
// with one of the connections defined in the ACL list when connecting
// to a Redis 6.0 instance, or greater, that is using the Redis ACL system.
@ -51,6 +57,9 @@ type Options struct {
// or the User Password when connecting to a Redis 6.0 instance, or greater,
// that is using the Redis ACL system.
Password string
// CredentialsProvider allows the username and password to be updated
// before reconnecting. It should return the current username and password.
CredentialsProvider func() (username string, password string)
// Database to be selected after connecting to the server.
DB int
@ -69,49 +78,64 @@ type Options struct {
// Default is 5 seconds.
DialTimeout time.Duration
// Timeout for socket reads. If reached, commands will fail
// with a timeout instead of blocking. Use value -1 for no timeout and 0 for default.
// Default is 3 seconds.
// with a timeout instead of blocking. Supported values:
// - `0` - default timeout (3 seconds).
// - `-1` - no timeout (block indefinitely).
// - `-2` - disables SetReadDeadline calls completely.
ReadTimeout time.Duration
// Timeout for socket writes. If reached, commands will fail
// with a timeout instead of blocking.
// Default is ReadTimeout.
// with a timeout instead of blocking. Supported values:
// - `0` - default timeout (3 seconds).
// - `-1` - no timeout (block indefinitely).
// - `-2` - disables SetWriteDeadline calls completely.
WriteTimeout time.Duration
// ContextTimeoutEnabled controls whether the client respects context timeouts and deadlines.
// See https://redis.uptrace.dev/guide/go-redis-debugging.html#timeouts
ContextTimeoutEnabled bool
// Type of connection pool.
// true for FIFO pool, false for LIFO pool.
// Note that fifo has higher overhead compared to lifo.
// Note that FIFO has slightly higher overhead compared to LIFO,
// but it helps closing idle connections faster reducing the pool size.
PoolFIFO bool
// Maximum number of socket connections.
// Default is 10 connections per every available CPU as reported by runtime.GOMAXPROCS.
PoolSize int
// Minimum number of idle connections which is useful when establishing
// new connection is slow.
MinIdleConns int
// Connection age at which client retires (closes) the connection.
// Default is to not close aged connections.
MaxConnAge time.Duration
// Amount of time client waits for connection if all connections
// are busy before returning an error.
// Default is ReadTimeout + 1 second.
PoolTimeout time.Duration
// Amount of time after which client closes idle connections.
// Minimum number of idle connections which is useful when establishing
// new connection is slow.
// Default is 0. the idle connections are not closed by default.
MinIdleConns int
// Maximum number of idle connections.
// Default is 0. the idle connections are not closed by default.
MaxIdleConns int
// ConnMaxIdleTime is the maximum amount of time a connection may be idle.
// Should be less than server's timeout.
// Default is 5 minutes. -1 disables idle timeout check.
IdleTimeout time.Duration
// Frequency of idle checks made by idle connections reaper.
// Default is 1 minute. -1 disables idle connections reaper,
// but idle connections are still discarded by the client
// if IdleTimeout is set.
IdleCheckFrequency time.Duration
//
// Expired connections may be closed lazily before reuse.
// If d <= 0, connections are not closed due to a connection's idle time.
//
// Default is 30 minutes. -1 disables idle timeout check.
ConnMaxIdleTime time.Duration
// ConnMaxLifetime is the maximum amount of time a connection may be reused.
//
// Expired connections may be closed lazily before reuse.
// If <= 0, connections are not closed due to a connection's age.
//
// Default is to not close idle connections.
ConnMaxLifetime time.Duration
// Enables read only queries on slave nodes.
readOnly bool
// TLS Config to use. When set TLS will be negotiated.
// TLS Config to use. When set, TLS will be negotiated.
TLSConfig *tls.Config
// Limiter interface used to implemented circuit breaker or rate limiter.
// Limiter interface used to implement circuit breaker or rate limiter.
Limiter Limiter
// Enables read only queries on slave/follower nodes.
readOnly bool
}
func (opt *Options) init() {
@ -129,40 +153,36 @@ func (opt *Options) init() {
opt.DialTimeout = 5 * time.Second
}
if opt.Dialer == nil {
opt.Dialer = func(ctx context.Context, network, addr string) (net.Conn, error) {
netDialer := &net.Dialer{
Timeout: opt.DialTimeout,
KeepAlive: 5 * time.Minute,
}
if opt.TLSConfig == nil {
return netDialer.DialContext(ctx, network, addr)
}
return tls.DialWithDialer(netDialer, network, addr, opt.TLSConfig)
}
opt.Dialer = NewDialer(opt)
}
if opt.PoolSize == 0 {
opt.PoolSize = 10 * runtime.GOMAXPROCS(0)
}
switch opt.ReadTimeout {
case -2:
opt.ReadTimeout = -1
case -1:
opt.ReadTimeout = 0
case 0:
opt.ReadTimeout = 3 * time.Second
}
switch opt.WriteTimeout {
case -2:
opt.WriteTimeout = -1
case -1:
opt.WriteTimeout = 0
case 0:
opt.WriteTimeout = opt.ReadTimeout
}
if opt.PoolTimeout == 0 {
opt.PoolTimeout = opt.ReadTimeout + time.Second
if opt.ReadTimeout > 0 {
opt.PoolTimeout = opt.ReadTimeout + time.Second
} else {
opt.PoolTimeout = 30 * time.Second
}
}
if opt.IdleTimeout == 0 {
opt.IdleTimeout = 5 * time.Minute
}
if opt.IdleCheckFrequency == 0 {
opt.IdleCheckFrequency = time.Minute
if opt.ConnMaxIdleTime == 0 {
opt.ConnMaxIdleTime = 30 * time.Minute
}
if opt.MaxRetries == -1 {
@ -189,36 +209,57 @@ func (opt *Options) clone() *Options {
return &clone
}
// NewDialer returns a function that will be used as the default dialer
// when none is specified in Options.Dialer.
func NewDialer(opt *Options) func(context.Context, string, string) (net.Conn, error) {
return func(ctx context.Context, network, addr string) (net.Conn, error) {
netDialer := &net.Dialer{
Timeout: opt.DialTimeout,
KeepAlive: 5 * time.Minute,
}
if opt.TLSConfig == nil {
return netDialer.DialContext(ctx, network, addr)
}
return tls.DialWithDialer(netDialer, network, addr, opt.TLSConfig)
}
}
// ParseURL parses an URL into Options that can be used to connect to Redis.
// Scheme is required.
// There are two connection types: by tcp socket and by unix socket.
// Tcp connection:
// redis://<user>:<password>@<host>:<port>/<db_number>
//
// redis://<user>:<password>@<host>:<port>/<db_number>
//
// Unix connection:
// unix://<user>:<password>@</path/to/redis.sock>?db=<db_number>
//
// unix://<user>:<password>@</path/to/redis.sock>?db=<db_number>
//
// Most Option fields can be set using query parameters, with the following restrictions:
// - field names are mapped using snake-case conversion: to set MaxRetries, use max_retries
// - only scalar type fields are supported (bool, int, time.Duration)
// - for time.Duration fields, values must be a valid input for time.ParseDuration();
// additionally a plain integer as value (i.e. without unit) is intepreted as seconds
// - to disable a duration field, use value less than or equal to 0; to use the default
// value, leave the value blank or remove the parameter
// - only the last value is interpreted if a parameter is given multiple times
// - fields "network", "addr", "username" and "password" can only be set using other
// URL attributes (scheme, host, userinfo, resp.), query paremeters using these
// names will be treated as unknown parameters
// - unknown parameter names will result in an error
// - field names are mapped using snake-case conversion: to set MaxRetries, use max_retries
// - only scalar type fields are supported (bool, int, time.Duration)
// - for time.Duration fields, values must be a valid input for time.ParseDuration();
// additionally a plain integer as value (i.e. without unit) is intepreted as seconds
// - to disable a duration field, use value less than or equal to 0; to use the default
// value, leave the value blank or remove the parameter
// - only the last value is interpreted if a parameter is given multiple times
// - fields "network", "addr", "username" and "password" can only be set using other
// URL attributes (scheme, host, userinfo, resp.), query paremeters using these
// names will be treated as unknown parameters
// - unknown parameter names will result in an error
//
// Examples:
// redis://user:password@localhost:6789/3?dial_timeout=3&db=1&read_timeout=6s&max_retries=2
// is equivalent to:
// &Options{
// Network: "tcp",
// Addr: "localhost:6789",
// DB: 1, // path "/3" was overridden by "&db=1"
// DialTimeout: 3 * time.Second, // no time unit = seconds
// ReadTimeout: 6 * time.Second,
// MaxRetries: 2,
// }
//
// redis://user:password@localhost:6789/3?dial_timeout=3&db=1&read_timeout=6s&max_retries=2
// is equivalent to:
// &Options{
// Network: "tcp",
// Addr: "localhost:6789",
// DB: 1, // path "/3" was overridden by "&db=1"
// DialTimeout: 3 * time.Second, // no time unit = seconds
// ReadTimeout: 6 * time.Second,
// MaxRetries: 2,
// }
func ParseURL(redisURL string) (*Options, error) {
u, err := url.Parse(redisURL)
if err != nil {
@ -240,16 +281,7 @@ func setupTCPConn(u *url.URL) (*Options, error) {
o.Username, o.Password = getUserPassword(u)
h, p, err := net.SplitHostPort(u.Host)
if err != nil {
h = u.Host
}
if h == "" {
h = "localhost"
}
if p == "" {
p = "6379"
}
h, p := getHostPortWithDefaults(u)
o.Addr = net.JoinHostPort(h, p)
f := strings.FieldsFunc(u.Path, func(r rune) bool {
@ -259,6 +291,7 @@ func setupTCPConn(u *url.URL) (*Options, error) {
case 0:
o.DB = 0
case 1:
var err error
if o.DB, err = strconv.Atoi(f[0]); err != nil {
return nil, fmt.Errorf("redis: invalid database number: %q", f[0])
}
@ -267,12 +300,32 @@ func setupTCPConn(u *url.URL) (*Options, error) {
}
if u.Scheme == "rediss" {
o.TLSConfig = &tls.Config{ServerName: h}
o.TLSConfig = &tls.Config{
ServerName: h,
MinVersion: tls.VersionTLS12,
}
}
return setupConnParams(u, o)
}
// getHostPortWithDefaults is a helper function that splits the url into
// a host and a port. If the host is missing, it defaults to localhost
// and if the port is missing, it defaults to 6379.
func getHostPortWithDefaults(u *url.URL) (string, string) {
host, port, err := net.SplitHostPort(u.Host)
if err != nil {
host = u.Host
}
if host == "" {
host = "localhost"
}
if port == "" {
port = "6379"
}
return host, port
}
func setupUnixConn(u *url.URL) (*Options, error) {
o := &Options{
Network: "unix",
@ -291,6 +344,10 @@ type queryOptions struct {
err error
}
func (o *queryOptions) has(name string) bool {
return len(o.q[name]) > 0
}
func (o *queryOptions) string(name string) string {
vs := o.q[name]
if len(vs) == 0 {
@ -300,6 +357,12 @@ func (o *queryOptions) string(name string) string {
return vs[len(vs)-1]
}
func (o *queryOptions) strings(name string) []string {
vs := o.q[name]
delete(o.q, name)
return vs
}
func (o *queryOptions) int(name string) int {
s := o.string(name)
if s == "" {
@ -377,6 +440,8 @@ func setupConnParams(u *url.URL, o *Options) (*Options, error) {
o.DB = db
}
o.Protocol = q.int("protocol")
o.ClientName = q.string("client_name")
o.MaxRetries = q.int("max_retries")
o.MinRetryBackoff = q.duration("min_retry_backoff")
o.MaxRetryBackoff = q.duration("max_retry_backoff")
@ -385,11 +450,19 @@ func setupConnParams(u *url.URL, o *Options) (*Options, error) {
o.WriteTimeout = q.duration("write_timeout")
o.PoolFIFO = q.bool("pool_fifo")
o.PoolSize = q.int("pool_size")
o.MinIdleConns = q.int("min_idle_conns")
o.MaxConnAge = q.duration("max_conn_age")
o.PoolTimeout = q.duration("pool_timeout")
o.IdleTimeout = q.duration("idle_timeout")
o.IdleCheckFrequency = q.duration("idle_check_frequency")
o.MinIdleConns = q.int("min_idle_conns")
o.MaxIdleConns = q.int("max_idle_conns")
if q.has("conn_max_idle_time") {
o.ConnMaxIdleTime = q.duration("conn_max_idle_time")
} else {
o.ConnMaxIdleTime = q.duration("idle_timeout")
}
if q.has("conn_max_lifetime") {
o.ConnMaxLifetime = q.duration("conn_max_lifetime")
} else {
o.ConnMaxLifetime = q.duration("max_conn_age")
}
if q.err != nil {
return nil, q.err
}
@ -413,17 +486,20 @@ func getUserPassword(u *url.URL) (string, string) {
return user, password
}
func newConnPool(opt *Options) *pool.ConnPool {
func newConnPool(
opt *Options,
dialer func(ctx context.Context, network, addr string) (net.Conn, error),
) *pool.ConnPool {
return pool.NewConnPool(&pool.Options{
Dialer: func(ctx context.Context) (net.Conn, error) {
return opt.Dialer(ctx, opt.Network, opt.Addr)
return dialer(ctx, opt.Network, opt.Addr)
},
PoolFIFO: opt.PoolFIFO,
PoolSize: opt.PoolSize,
MinIdleConns: opt.MinIdleConns,
MaxConnAge: opt.MaxConnAge,
PoolTimeout: opt.PoolTimeout,
IdleTimeout: opt.IdleTimeout,
IdleCheckFrequency: opt.IdleCheckFrequency,
PoolFIFO: opt.PoolFIFO,
PoolSize: opt.PoolSize,
PoolTimeout: opt.PoolTimeout,
MinIdleConns: opt.MinIdleConns,
MaxIdleConns: opt.MaxIdleConns,
ConnMaxIdleTime: opt.ConnMaxIdleTime,
ConnMaxLifetime: opt.ConnMaxLifetime,
})
}

View File

@ -1,8 +1,8 @@
{
"name": "redis",
"version": "8.11.5",
"version": "9.1.0",
"main": "index.js",
"repository": "git@github.com:go-redis/redis.git",
"repository": "git@github.com:redis/go-redis.git",
"author": "Vladimir Mihailenco <vladimir.webdev@gmail.com>",
"license": "BSD-2-clause"
}

View File

@ -2,9 +2,7 @@ package redis
import (
"context"
"sync"
"github.com/go-redis/redis/v8/internal/pool"
"errors"
)
type pipelineExecer func(context.Context, []Cmder) error
@ -13,7 +11,7 @@ type pipelineExecer func(context.Context, []Cmder) error
//
// Pipelining is a technique to extremely speed up processing by packing
// operations to batches, send them at once to Redis and read a replies in a
// singe step.
// single step.
// See https://redis.io/topics/pipelining
//
// Pay attention, that Pipeline is not a transaction, so you can get unexpected
@ -24,29 +22,35 @@ type pipelineExecer func(context.Context, []Cmder) error
// depends of your batch size and/or use TxPipeline.
type Pipeliner interface {
StatefulCmdable
// Len is to obtain the number of commands in the pipeline that have not yet been executed.
Len() int
// Do is an API for executing any command.
// If a certain Redis command is not yet supported, you can use Do to execute it.
Do(ctx context.Context, args ...interface{}) *Cmd
// Process is to put the commands to be executed into the pipeline buffer.
Process(ctx context.Context, cmd Cmder) error
Close() error
Discard() error
// Discard is to discard all commands in the cache that have not yet been executed.
Discard()
// Exec is to send all the commands buffered in the pipeline to the redis-server.
Exec(ctx context.Context) ([]Cmder, error)
}
var _ Pipeliner = (*Pipeline)(nil)
// Pipeline implements pipelining as described in
// http://redis.io/topics/pipelining. It's safe for concurrent use
// by multiple goroutines.
// http://redis.io/topics/pipelining.
// Please note: it is not safe for concurrent use by multiple goroutines.
type Pipeline struct {
cmdable
statefulCmdable
ctx context.Context
exec pipelineExecer
mu sync.Mutex
cmds []Cmder
closed bool
cmds []Cmder
}
func (c *Pipeline) init() {
@ -56,50 +60,29 @@ func (c *Pipeline) init() {
// Len returns the number of queued commands.
func (c *Pipeline) Len() int {
c.mu.Lock()
ln := len(c.cmds)
c.mu.Unlock()
return ln
return len(c.cmds)
}
// Do queues the custom command for later execution.
func (c *Pipeline) Do(ctx context.Context, args ...interface{}) *Cmd {
cmd := NewCmd(ctx, args...)
if len(args) == 0 {
cmd.SetErr(errors.New("redis: please enter the command to be executed"))
return cmd
}
_ = c.Process(ctx, cmd)
return cmd
}
// Process queues the cmd for later execution.
func (c *Pipeline) Process(ctx context.Context, cmd Cmder) error {
c.mu.Lock()
c.cmds = append(c.cmds, cmd)
c.mu.Unlock()
return nil
}
// Close closes the pipeline, releasing any open resources.
func (c *Pipeline) Close() error {
c.mu.Lock()
_ = c.discard()
c.closed = true
c.mu.Unlock()
return nil
}
// Discard resets the pipeline and discards queued commands.
func (c *Pipeline) Discard() error {
c.mu.Lock()
err := c.discard()
c.mu.Unlock()
return err
}
func (c *Pipeline) discard() error {
if c.closed {
return pool.ErrClosed
}
func (c *Pipeline) Discard() {
c.cmds = c.cmds[:0]
return nil
}
// Exec executes all previously queued commands using one
@ -108,13 +91,6 @@ func (c *Pipeline) discard() error {
// Exec always returns list of commands and error of the first failed
// command if any.
func (c *Pipeline) Exec(ctx context.Context) ([]Cmder, error) {
c.mu.Lock()
defer c.mu.Unlock()
if c.closed {
return nil, pool.ErrClosed
}
if len(c.cmds) == 0 {
return nil, nil
}
@ -129,9 +105,7 @@ func (c *Pipeline) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]C
if err := fn(c); err != nil {
return nil, err
}
cmds, err := c.Exec(ctx)
_ = c.Close()
return cmds, err
return c.Exec(ctx)
}
func (c *Pipeline) Pipeline() Pipeliner {

1433
vendor/github.com/redis/go-redis/v9/probabilistic.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -7,9 +7,9 @@ import (
"sync"
"time"
"github.com/go-redis/redis/v8/internal"
"github.com/go-redis/redis/v8/internal/pool"
"github.com/go-redis/redis/v8/internal/proto"
"github.com/redis/go-redis/v9/internal"
"github.com/redis/go-redis/v9/internal/pool"
"github.com/redis/go-redis/v9/internal/proto"
)
// PubSub implements Pub/Sub commands as described in
@ -24,10 +24,11 @@ type PubSub struct {
newConn func(ctx context.Context, channels []string) (*pool.Conn, error)
closeConn func(*pool.Conn) error
mu sync.Mutex
cn *pool.Conn
channels map[string]struct{}
patterns map[string]struct{}
mu sync.Mutex
cn *pool.Conn
channels map[string]struct{}
patterns map[string]struct{}
schannels map[string]struct{}
closed bool
exit chan struct{}
@ -46,6 +47,7 @@ func (c *PubSub) init() {
func (c *PubSub) String() string {
channels := mapKeys(c.channels)
channels = append(channels, mapKeys(c.patterns)...)
channels = append(channels, mapKeys(c.schannels)...)
return fmt.Sprintf("PubSub(%s)", strings.Join(channels, ", "))
}
@ -82,7 +84,7 @@ func (c *PubSub) conn(ctx context.Context, newChannels []string) (*pool.Conn, er
}
func (c *PubSub) writeCmd(ctx context.Context, cn *pool.Conn, cmd Cmder) error {
return cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
return cn.WithWriter(context.Background(), c.opt.WriteTimeout, func(wr *proto.Writer) error {
return writeCmd(wr, cmd)
})
}
@ -101,6 +103,13 @@ func (c *PubSub) resubscribe(ctx context.Context, cn *pool.Conn) error {
}
}
if len(c.schannels) > 0 {
err := c._subscribe(ctx, cn, "ssubscribe", mapKeys(c.schannels))
if err != nil && firstErr == nil {
firstErr = err
}
}
return firstErr
}
@ -208,15 +217,38 @@ func (c *PubSub) PSubscribe(ctx context.Context, patterns ...string) error {
return err
}
// SSubscribe Subscribes the client to the specified shard channels.
func (c *PubSub) SSubscribe(ctx context.Context, channels ...string) error {
c.mu.Lock()
defer c.mu.Unlock()
err := c.subscribe(ctx, "ssubscribe", channels...)
if c.schannels == nil {
c.schannels = make(map[string]struct{})
}
for _, s := range channels {
c.schannels[s] = struct{}{}
}
return err
}
// Unsubscribe the client from the given channels, or from all of
// them if none is given.
func (c *PubSub) Unsubscribe(ctx context.Context, channels ...string) error {
c.mu.Lock()
defer c.mu.Unlock()
for _, channel := range channels {
delete(c.channels, channel)
if len(channels) > 0 {
for _, channel := range channels {
delete(c.channels, channel)
}
} else {
// Unsubscribe from all channels.
for channel := range c.channels {
delete(c.channels, channel)
}
}
err := c.subscribe(ctx, "unsubscribe", channels...)
return err
}
@ -227,13 +259,42 @@ func (c *PubSub) PUnsubscribe(ctx context.Context, patterns ...string) error {
c.mu.Lock()
defer c.mu.Unlock()
for _, pattern := range patterns {
delete(c.patterns, pattern)
if len(patterns) > 0 {
for _, pattern := range patterns {
delete(c.patterns, pattern)
}
} else {
// Unsubscribe from all patterns.
for pattern := range c.patterns {
delete(c.patterns, pattern)
}
}
err := c.subscribe(ctx, "punsubscribe", patterns...)
return err
}
// SUnsubscribe unsubscribes the client from the given shard channels,
// or from all of them if none is given.
func (c *PubSub) SUnsubscribe(ctx context.Context, channels ...string) error {
c.mu.Lock()
defer c.mu.Unlock()
if len(channels) > 0 {
for _, channel := range channels {
delete(c.schannels, channel)
}
} else {
// Unsubscribe from all channels.
for channel := range c.schannels {
delete(c.schannels, channel)
}
}
err := c.subscribe(ctx, "sunsubscribe", channels...)
return err
}
func (c *PubSub) subscribe(ctx context.Context, redisCmd string, channels ...string) error {
cn, err := c.conn(ctx, channels)
if err != nil {
@ -311,7 +372,7 @@ func (c *PubSub) newMessage(reply interface{}) (interface{}, error) {
}, nil
case []interface{}:
switch kind := reply[0].(string); kind {
case "subscribe", "unsubscribe", "psubscribe", "punsubscribe":
case "subscribe", "unsubscribe", "psubscribe", "punsubscribe", "ssubscribe", "sunsubscribe":
// Can be nil in case of "unsubscribe".
channel, _ := reply[1].(string)
return &Subscription{
@ -319,7 +380,7 @@ func (c *PubSub) newMessage(reply interface{}) (interface{}, error) {
Channel: channel,
Count: int(reply[2].(int64)),
}, nil
case "message":
case "message", "smessage":
switch payload := reply[2].(type) {
case string:
return &Message{
@ -371,7 +432,7 @@ func (c *PubSub) ReceiveTimeout(ctx context.Context, timeout time.Duration) (int
return nil, err
}
err = cn.WithReader(ctx, timeout, func(rd *proto.Reader) error {
err = cn.WithReader(context.Background(), timeout, func(rd *proto.Reader) error {
return c.cmd.readReply(rd)
})
@ -456,9 +517,9 @@ func (c *PubSub) ChannelSize(size int) <-chan *Message {
// reconnections.
//
// ChannelWithSubscriptions can not be used together with Channel or ChannelSize.
func (c *PubSub) ChannelWithSubscriptions(_ context.Context, size int) <-chan interface{} {
func (c *PubSub) ChannelWithSubscriptions(opts ...ChannelOption) <-chan interface{} {
c.chOnce.Do(func() {
c.allCh = newChannel(c, WithChannelSize(size))
c.allCh = newChannel(c, opts...)
c.allCh.initAllChan()
})
if c.allCh == nil {

827
vendor/github.com/redis/go-redis/v9/redis.go generated vendored Normal file
View File

@ -0,0 +1,827 @@
package redis
import (
"context"
"errors"
"fmt"
"net"
"sync/atomic"
"time"
"github.com/redis/go-redis/v9/internal"
"github.com/redis/go-redis/v9/internal/hscan"
"github.com/redis/go-redis/v9/internal/pool"
"github.com/redis/go-redis/v9/internal/proto"
)
// Scanner internal/hscan.Scanner exposed interface.
type Scanner = hscan.Scanner
// Nil reply returned by Redis when key does not exist.
const Nil = proto.Nil
// SetLogger set custom log
func SetLogger(logger internal.Logging) {
internal.Logger = logger
}
//------------------------------------------------------------------------------
type Hook interface {
DialHook(next DialHook) DialHook
ProcessHook(next ProcessHook) ProcessHook
ProcessPipelineHook(next ProcessPipelineHook) ProcessPipelineHook
}
type (
DialHook func(ctx context.Context, network, addr string) (net.Conn, error)
ProcessHook func(ctx context.Context, cmd Cmder) error
ProcessPipelineHook func(ctx context.Context, cmds []Cmder) error
)
type hooksMixin struct {
slice []Hook
initial hooks
current hooks
}
func (hs *hooksMixin) initHooks(hooks hooks) {
hs.initial = hooks
hs.chain()
}
type hooks struct {
dial DialHook
process ProcessHook
pipeline ProcessPipelineHook
txPipeline ProcessPipelineHook
}
func (h *hooks) setDefaults() {
if h.dial == nil {
h.dial = func(ctx context.Context, network, addr string) (net.Conn, error) { return nil, nil }
}
if h.process == nil {
h.process = func(ctx context.Context, cmd Cmder) error { return nil }
}
if h.pipeline == nil {
h.pipeline = func(ctx context.Context, cmds []Cmder) error { return nil }
}
if h.txPipeline == nil {
h.txPipeline = func(ctx context.Context, cmds []Cmder) error { return nil }
}
}
// AddHook is to add a hook to the queue.
// Hook is a function executed during network connection, command execution, and pipeline,
// it is a first-in-first-out stack queue (FIFO).
// You need to execute the next hook in each hook, unless you want to terminate the execution of the command.
// For example, you added hook-1, hook-2:
//
// client.AddHook(hook-1, hook-2)
//
// hook-1:
//
// func (Hook1) ProcessHook(next redis.ProcessHook) redis.ProcessHook {
// return func(ctx context.Context, cmd Cmder) error {
// print("hook-1 start")
// next(ctx, cmd)
// print("hook-1 end")
// return nil
// }
// }
//
// hook-2:
//
// func (Hook2) ProcessHook(next redis.ProcessHook) redis.ProcessHook {
// return func(ctx context.Context, cmd redis.Cmder) error {
// print("hook-2 start")
// next(ctx, cmd)
// print("hook-2 end")
// return nil
// }
// }
//
// The execution sequence is:
//
// hook-1 start -> hook-2 start -> exec redis cmd -> hook-2 end -> hook-1 end
//
// Please note: "next(ctx, cmd)" is very important, it will call the next hook,
// if "next(ctx, cmd)" is not executed, the redis command will not be executed.
func (hs *hooksMixin) AddHook(hook Hook) {
hs.slice = append(hs.slice, hook)
hs.chain()
}
func (hs *hooksMixin) chain() {
hs.initial.setDefaults()
hs.current.dial = hs.initial.dial
hs.current.process = hs.initial.process
hs.current.pipeline = hs.initial.pipeline
hs.current.txPipeline = hs.initial.txPipeline
for i := len(hs.slice) - 1; i >= 0; i-- {
if wrapped := hs.slice[i].DialHook(hs.current.dial); wrapped != nil {
hs.current.dial = wrapped
}
if wrapped := hs.slice[i].ProcessHook(hs.current.process); wrapped != nil {
hs.current.process = wrapped
}
if wrapped := hs.slice[i].ProcessPipelineHook(hs.current.pipeline); wrapped != nil {
hs.current.pipeline = wrapped
}
if wrapped := hs.slice[i].ProcessPipelineHook(hs.current.txPipeline); wrapped != nil {
hs.current.txPipeline = wrapped
}
}
}
func (hs *hooksMixin) clone() hooksMixin {
clone := *hs
l := len(clone.slice)
clone.slice = clone.slice[:l:l]
return clone
}
func (hs *hooksMixin) withProcessHook(ctx context.Context, cmd Cmder, hook ProcessHook) error {
for i := len(hs.slice) - 1; i >= 0; i-- {
if wrapped := hs.slice[i].ProcessHook(hook); wrapped != nil {
hook = wrapped
}
}
return hook(ctx, cmd)
}
func (hs *hooksMixin) withProcessPipelineHook(
ctx context.Context, cmds []Cmder, hook ProcessPipelineHook,
) error {
for i := len(hs.slice) - 1; i >= 0; i-- {
if wrapped := hs.slice[i].ProcessPipelineHook(hook); wrapped != nil {
hook = wrapped
}
}
return hook(ctx, cmds)
}
func (hs *hooksMixin) dialHook(ctx context.Context, network, addr string) (net.Conn, error) {
return hs.current.dial(ctx, network, addr)
}
func (hs *hooksMixin) processHook(ctx context.Context, cmd Cmder) error {
return hs.current.process(ctx, cmd)
}
func (hs *hooksMixin) processPipelineHook(ctx context.Context, cmds []Cmder) error {
return hs.current.pipeline(ctx, cmds)
}
func (hs *hooksMixin) processTxPipelineHook(ctx context.Context, cmds []Cmder) error {
return hs.current.txPipeline(ctx, cmds)
}
//------------------------------------------------------------------------------
type baseClient struct {
opt *Options
connPool pool.Pooler
onClose func() error // hook called when client is closed
}
func (c *baseClient) clone() *baseClient {
clone := *c
return &clone
}
func (c *baseClient) withTimeout(timeout time.Duration) *baseClient {
opt := c.opt.clone()
opt.ReadTimeout = timeout
opt.WriteTimeout = timeout
clone := c.clone()
clone.opt = opt
return clone
}
func (c *baseClient) String() string {
return fmt.Sprintf("Redis<%s db:%d>", c.getAddr(), c.opt.DB)
}
func (c *baseClient) newConn(ctx context.Context) (*pool.Conn, error) {
cn, err := c.connPool.NewConn(ctx)
if err != nil {
return nil, err
}
err = c.initConn(ctx, cn)
if err != nil {
_ = c.connPool.CloseConn(cn)
return nil, err
}
return cn, nil
}
func (c *baseClient) getConn(ctx context.Context) (*pool.Conn, error) {
if c.opt.Limiter != nil {
err := c.opt.Limiter.Allow()
if err != nil {
return nil, err
}
}
cn, err := c._getConn(ctx)
if err != nil {
if c.opt.Limiter != nil {
c.opt.Limiter.ReportResult(err)
}
return nil, err
}
return cn, nil
}
func (c *baseClient) _getConn(ctx context.Context) (*pool.Conn, error) {
cn, err := c.connPool.Get(ctx)
if err != nil {
return nil, err
}
if cn.Inited {
return cn, nil
}
if err := c.initConn(ctx, cn); err != nil {
c.connPool.Remove(ctx, cn, err)
if err := errors.Unwrap(err); err != nil {
return nil, err
}
return nil, err
}
return cn, nil
}
func (c *baseClient) initConn(ctx context.Context, cn *pool.Conn) error {
if cn.Inited {
return nil
}
cn.Inited = true
username, password := c.opt.Username, c.opt.Password
if c.opt.CredentialsProvider != nil {
username, password = c.opt.CredentialsProvider()
}
connPool := pool.NewSingleConnPool(c.connPool, cn)
conn := newConn(c.opt, connPool)
var auth bool
protocol := c.opt.Protocol
// By default, use RESP3 in current version.
if protocol < 2 {
protocol = 3
}
// for redis-server versions that do not support the HELLO command,
// RESP2 will continue to be used.
if err := conn.Hello(ctx, protocol, username, password, "").Err(); err == nil {
auth = true
} else if !isRedisError(err) {
// When the server responds with the RESP protocol and the result is not a normal
// execution result of the HELLO command, we consider it to be an indication that
// the server does not support the HELLO command.
// The server may be a redis-server that does not support the HELLO command,
// or it could be DragonflyDB or a third-party redis-proxy. They all respond
// with different error string results for unsupported commands, making it
// difficult to rely on error strings to determine all results.
return err
}
_, err := conn.Pipelined(ctx, func(pipe Pipeliner) error {
if !auth && password != "" {
if username != "" {
pipe.AuthACL(ctx, username, password)
} else {
pipe.Auth(ctx, password)
}
}
if c.opt.DB > 0 {
pipe.Select(ctx, c.opt.DB)
}
if c.opt.readOnly {
pipe.ReadOnly(ctx)
}
if c.opt.ClientName != "" {
pipe.ClientSetName(ctx, c.opt.ClientName)
}
return nil
})
if err != nil {
return err
}
if c.opt.OnConnect != nil {
return c.opt.OnConnect(ctx, conn)
}
return nil
}
func (c *baseClient) releaseConn(ctx context.Context, cn *pool.Conn, err error) {
if c.opt.Limiter != nil {
c.opt.Limiter.ReportResult(err)
}
if isBadConn(err, false, c.opt.Addr) {
c.connPool.Remove(ctx, cn, err)
} else {
c.connPool.Put(ctx, cn)
}
}
func (c *baseClient) withConn(
ctx context.Context, fn func(context.Context, *pool.Conn) error,
) error {
cn, err := c.getConn(ctx)
if err != nil {
return err
}
var fnErr error
defer func() {
c.releaseConn(ctx, cn, fnErr)
}()
fnErr = fn(ctx, cn)
return fnErr
}
func (c *baseClient) dial(ctx context.Context, network, addr string) (net.Conn, error) {
return c.opt.Dialer(ctx, network, addr)
}
func (c *baseClient) process(ctx context.Context, cmd Cmder) error {
var lastErr error
for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ {
attempt := attempt
retry, err := c._process(ctx, cmd, attempt)
if err == nil || !retry {
return err
}
lastErr = err
}
return lastErr
}
func (c *baseClient) _process(ctx context.Context, cmd Cmder, attempt int) (bool, error) {
if attempt > 0 {
if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
return false, err
}
}
retryTimeout := uint32(0)
if err := c.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
if err := cn.WithWriter(c.context(ctx), c.opt.WriteTimeout, func(wr *proto.Writer) error {
return writeCmd(wr, cmd)
}); err != nil {
atomic.StoreUint32(&retryTimeout, 1)
return err
}
if err := cn.WithReader(c.context(ctx), c.cmdTimeout(cmd), cmd.readReply); err != nil {
if cmd.readTimeout() == nil {
atomic.StoreUint32(&retryTimeout, 1)
} else {
atomic.StoreUint32(&retryTimeout, 0)
}
return err
}
return nil
}); err != nil {
retry := shouldRetry(err, atomic.LoadUint32(&retryTimeout) == 1)
return retry, err
}
return false, nil
}
func (c *baseClient) retryBackoff(attempt int) time.Duration {
return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff)
}
func (c *baseClient) cmdTimeout(cmd Cmder) time.Duration {
if timeout := cmd.readTimeout(); timeout != nil {
t := *timeout
if t == 0 {
return 0
}
return t + 10*time.Second
}
return c.opt.ReadTimeout
}
// Close closes the client, releasing any open resources.
//
// It is rare to Close a Client, as the Client is meant to be
// long-lived and shared between many goroutines.
func (c *baseClient) Close() error {
var firstErr error
if c.onClose != nil {
if err := c.onClose(); err != nil {
firstErr = err
}
}
if err := c.connPool.Close(); err != nil && firstErr == nil {
firstErr = err
}
return firstErr
}
func (c *baseClient) getAddr() string {
return c.opt.Addr
}
func (c *baseClient) processPipeline(ctx context.Context, cmds []Cmder) error {
if err := c.generalProcessPipeline(ctx, cmds, c.pipelineProcessCmds); err != nil {
return err
}
return cmdsFirstErr(cmds)
}
func (c *baseClient) processTxPipeline(ctx context.Context, cmds []Cmder) error {
if err := c.generalProcessPipeline(ctx, cmds, c.txPipelineProcessCmds); err != nil {
return err
}
return cmdsFirstErr(cmds)
}
type pipelineProcessor func(context.Context, *pool.Conn, []Cmder) (bool, error)
func (c *baseClient) generalProcessPipeline(
ctx context.Context, cmds []Cmder, p pipelineProcessor,
) error {
var lastErr error
for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ {
if attempt > 0 {
if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
setCmdsErr(cmds, err)
return err
}
}
// Enable retries by default to retry dial errors returned by withConn.
canRetry := true
lastErr = c.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
var err error
canRetry, err = p(ctx, cn, cmds)
return err
})
if lastErr == nil || !canRetry || !shouldRetry(lastErr, true) {
return lastErr
}
}
return lastErr
}
func (c *baseClient) pipelineProcessCmds(
ctx context.Context, cn *pool.Conn, cmds []Cmder,
) (bool, error) {
if err := cn.WithWriter(c.context(ctx), c.opt.WriteTimeout, func(wr *proto.Writer) error {
return writeCmds(wr, cmds)
}); err != nil {
setCmdsErr(cmds, err)
return true, err
}
if err := cn.WithReader(c.context(ctx), c.opt.ReadTimeout, func(rd *proto.Reader) error {
return pipelineReadCmds(rd, cmds)
}); err != nil {
return true, err
}
return false, nil
}
func pipelineReadCmds(rd *proto.Reader, cmds []Cmder) error {
for i, cmd := range cmds {
err := cmd.readReply(rd)
cmd.SetErr(err)
if err != nil && !isRedisError(err) {
setCmdsErr(cmds[i+1:], err)
return err
}
}
// Retry errors like "LOADING redis is loading the dataset in memory".
return cmds[0].Err()
}
func (c *baseClient) txPipelineProcessCmds(
ctx context.Context, cn *pool.Conn, cmds []Cmder,
) (bool, error) {
if err := cn.WithWriter(c.context(ctx), c.opt.WriteTimeout, func(wr *proto.Writer) error {
return writeCmds(wr, cmds)
}); err != nil {
setCmdsErr(cmds, err)
return true, err
}
if err := cn.WithReader(c.context(ctx), c.opt.ReadTimeout, func(rd *proto.Reader) error {
statusCmd := cmds[0].(*StatusCmd)
// Trim multi and exec.
trimmedCmds := cmds[1 : len(cmds)-1]
if err := txPipelineReadQueued(rd, statusCmd, trimmedCmds); err != nil {
setCmdsErr(cmds, err)
return err
}
return pipelineReadCmds(rd, trimmedCmds)
}); err != nil {
return false, err
}
return false, nil
}
func txPipelineReadQueued(rd *proto.Reader, statusCmd *StatusCmd, cmds []Cmder) error {
// Parse +OK.
if err := statusCmd.readReply(rd); err != nil {
return err
}
// Parse +QUEUED.
for range cmds {
if err := statusCmd.readReply(rd); err != nil && !isRedisError(err) {
return err
}
}
// Parse number of replies.
line, err := rd.ReadLine()
if err != nil {
if err == Nil {
err = TxFailedErr
}
return err
}
if line[0] != proto.RespArray {
return fmt.Errorf("redis: expected '*', but got line %q", line)
}
return nil
}
func (c *baseClient) context(ctx context.Context) context.Context {
if c.opt.ContextTimeoutEnabled {
return ctx
}
return context.Background()
}
//------------------------------------------------------------------------------
// Client is a Redis client representing a pool of zero or more underlying connections.
// It's safe for concurrent use by multiple goroutines.
//
// Client creates and frees connections automatically; it also maintains a free pool
// of idle connections. You can control the pool size with Config.PoolSize option.
type Client struct {
*baseClient
cmdable
hooksMixin
}
// NewClient returns a client to the Redis Server specified by Options.
func NewClient(opt *Options) *Client {
opt.init()
c := Client{
baseClient: &baseClient{
opt: opt,
},
}
c.init()
c.connPool = newConnPool(opt, c.dialHook)
return &c
}
func (c *Client) init() {
c.cmdable = c.Process
c.initHooks(hooks{
dial: c.baseClient.dial,
process: c.baseClient.process,
pipeline: c.baseClient.processPipeline,
txPipeline: c.baseClient.processTxPipeline,
})
}
func (c *Client) WithTimeout(timeout time.Duration) *Client {
clone := *c
clone.baseClient = c.baseClient.withTimeout(timeout)
clone.init()
return &clone
}
func (c *Client) Conn() *Conn {
return newConn(c.opt, pool.NewStickyConnPool(c.connPool))
}
// Do create a Cmd from the args and processes the cmd.
func (c *Client) Do(ctx context.Context, args ...interface{}) *Cmd {
cmd := NewCmd(ctx, args...)
_ = c.Process(ctx, cmd)
return cmd
}
func (c *Client) Process(ctx context.Context, cmd Cmder) error {
err := c.processHook(ctx, cmd)
cmd.SetErr(err)
return err
}
// Options returns read-only Options that were used to create the client.
func (c *Client) Options() *Options {
return c.opt
}
type PoolStats pool.Stats
// PoolStats returns connection pool stats.
func (c *Client) PoolStats() *PoolStats {
stats := c.connPool.Stats()
return (*PoolStats)(stats)
}
func (c *Client) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
return c.Pipeline().Pipelined(ctx, fn)
}
func (c *Client) Pipeline() Pipeliner {
pipe := Pipeline{
exec: pipelineExecer(c.processPipelineHook),
}
pipe.init()
return &pipe
}
func (c *Client) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
return c.TxPipeline().Pipelined(ctx, fn)
}
// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC.
func (c *Client) TxPipeline() Pipeliner {
pipe := Pipeline{
exec: func(ctx context.Context, cmds []Cmder) error {
cmds = wrapMultiExec(ctx, cmds)
return c.processTxPipelineHook(ctx, cmds)
},
}
pipe.init()
return &pipe
}
func (c *Client) pubSub() *PubSub {
pubsub := &PubSub{
opt: c.opt,
newConn: func(ctx context.Context, channels []string) (*pool.Conn, error) {
return c.newConn(ctx)
},
closeConn: c.connPool.CloseConn,
}
pubsub.init()
return pubsub
}
// Subscribe subscribes the client to the specified channels.
// Channels can be omitted to create empty subscription.
// Note that this method does not wait on a response from Redis, so the
// subscription may not be active immediately. To force the connection to wait,
// you may call the Receive() method on the returned *PubSub like so:
//
// sub := client.Subscribe(queryResp)
// iface, err := sub.Receive()
// if err != nil {
// // handle error
// }
//
// // Should be *Subscription, but others are possible if other actions have been
// // taken on sub since it was created.
// switch iface.(type) {
// case *Subscription:
// // subscribe succeeded
// case *Message:
// // received first message
// case *Pong:
// // pong received
// default:
// // handle error
// }
//
// ch := sub.Channel()
func (c *Client) Subscribe(ctx context.Context, channels ...string) *PubSub {
pubsub := c.pubSub()
if len(channels) > 0 {
_ = pubsub.Subscribe(ctx, channels...)
}
return pubsub
}
// PSubscribe subscribes the client to the given patterns.
// Patterns can be omitted to create empty subscription.
func (c *Client) PSubscribe(ctx context.Context, channels ...string) *PubSub {
pubsub := c.pubSub()
if len(channels) > 0 {
_ = pubsub.PSubscribe(ctx, channels...)
}
return pubsub
}
// SSubscribe Subscribes the client to the specified shard channels.
// Channels can be omitted to create empty subscription.
func (c *Client) SSubscribe(ctx context.Context, channels ...string) *PubSub {
pubsub := c.pubSub()
if len(channels) > 0 {
_ = pubsub.SSubscribe(ctx, channels...)
}
return pubsub
}
//------------------------------------------------------------------------------
// Conn represents a single Redis connection rather than a pool of connections.
// Prefer running commands from Client unless there is a specific need
// for a continuous single Redis connection.
type Conn struct {
baseClient
cmdable
statefulCmdable
hooksMixin
}
func newConn(opt *Options, connPool pool.Pooler) *Conn {
c := Conn{
baseClient: baseClient{
opt: opt,
connPool: connPool,
},
}
c.cmdable = c.Process
c.statefulCmdable = c.Process
c.initHooks(hooks{
dial: c.baseClient.dial,
process: c.baseClient.process,
pipeline: c.baseClient.processPipeline,
txPipeline: c.baseClient.processTxPipeline,
})
return &c
}
func (c *Conn) Process(ctx context.Context, cmd Cmder) error {
err := c.processHook(ctx, cmd)
cmd.SetErr(err)
return err
}
func (c *Conn) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
return c.Pipeline().Pipelined(ctx, fn)
}
func (c *Conn) Pipeline() Pipeliner {
pipe := Pipeline{
exec: c.processPipelineHook,
}
pipe.init()
return &pipe
}
func (c *Conn) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
return c.TxPipeline().Pipelined(ctx, fn)
}
// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC.
func (c *Conn) TxPipeline() Pipeliner {
pipe := Pipeline{
exec: func(ctx context.Context, cmds []Cmder) error {
cmds = wrapMultiExec(ctx, cmds)
return c.processTxPipelineHook(ctx, cmds)
},
}
pipe.init()
return &pipe
}

161
vendor/github.com/redis/go-redis/v9/redis_gears.go generated vendored Normal file
View File

@ -0,0 +1,161 @@
package redis
import (
"context"
"fmt"
"strings"
)
type gearsCmdable interface {
TFunctionLoad(ctx context.Context, lib string) *StatusCmd
TFunctionLoadArgs(ctx context.Context, lib string, options *TFunctionLoadOptions) *StatusCmd
TFunctionDelete(ctx context.Context, libName string) *StatusCmd
TFunctionList(ctx context.Context) *MapStringInterfaceSliceCmd
TFunctionListArgs(ctx context.Context, options *TFunctionListOptions) *MapStringInterfaceSliceCmd
TFCall(ctx context.Context, libName string, funcName string, numKeys int) *Cmd
TFCallArgs(ctx context.Context, libName string, funcName string, numKeys int, options *TFCallOptions) *Cmd
TFCallASYNC(ctx context.Context, libName string, funcName string, numKeys int) *Cmd
TFCallASYNCArgs(ctx context.Context, libName string, funcName string, numKeys int, options *TFCallOptions) *Cmd
}
type TFunctionLoadOptions struct {
Replace bool
Config string
}
type TFunctionListOptions struct {
Withcode bool
Verbose int
Library string
}
type TFCallOptions struct {
Keys []string
Arguments []string
}
// TFunctionLoad - load a new JavaScript library into Redis.
// For more information - https://redis.io/commands/tfunction-load/
func (c cmdable) TFunctionLoad(ctx context.Context, lib string) *StatusCmd {
args := []interface{}{"TFUNCTION", "LOAD", lib}
cmd := NewStatusCmd(ctx, args...)
_ = c(ctx, cmd)
return cmd
}
func (c cmdable) TFunctionLoadArgs(ctx context.Context, lib string, options *TFunctionLoadOptions) *StatusCmd {
args := []interface{}{"TFUNCTION", "LOAD"}
if options != nil {
if options.Replace {
args = append(args, "REPLACE")
}
if options.Config != "" {
args = append(args, "CONFIG", options.Config)
}
}
args = append(args, lib)
cmd := NewStatusCmd(ctx, args...)
_ = c(ctx, cmd)
return cmd
}
// TFunctionDelete - delete a JavaScript library from Redis.
// For more information - https://redis.io/commands/tfunction-delete/
func (c cmdable) TFunctionDelete(ctx context.Context, libName string) *StatusCmd {
args := []interface{}{"TFUNCTION", "DELETE", libName}
cmd := NewStatusCmd(ctx, args...)
_ = c(ctx, cmd)
return cmd
}
// TFunctionList - list the functions with additional information about each function.
// For more information - https://redis.io/commands/tfunction-list/
func (c cmdable) TFunctionList(ctx context.Context) *MapStringInterfaceSliceCmd {
args := []interface{}{"TFUNCTION", "LIST"}
cmd := NewMapStringInterfaceSliceCmd(ctx, args...)
_ = c(ctx, cmd)
return cmd
}
func (c cmdable) TFunctionListArgs(ctx context.Context, options *TFunctionListOptions) *MapStringInterfaceSliceCmd {
args := []interface{}{"TFUNCTION", "LIST"}
if options != nil {
if options.Withcode {
args = append(args, "WITHCODE")
}
if options.Verbose != 0 {
v := strings.Repeat("v", options.Verbose)
args = append(args, v)
}
if options.Library != "" {
args = append(args, "LIBRARY", options.Library)
}
}
cmd := NewMapStringInterfaceSliceCmd(ctx, args...)
_ = c(ctx, cmd)
return cmd
}
// TFCall - invoke a function.
// For more information - https://redis.io/commands/tfcall/
func (c cmdable) TFCall(ctx context.Context, libName string, funcName string, numKeys int) *Cmd {
lf := libName + "." + funcName
args := []interface{}{"TFCALL", lf, numKeys}
cmd := NewCmd(ctx, args...)
_ = c(ctx, cmd)
return cmd
}
func (c cmdable) TFCallArgs(ctx context.Context, libName string, funcName string, numKeys int, options *TFCallOptions) *Cmd {
lf := libName + "." + funcName
args := []interface{}{"TFCALL", lf, numKeys}
if options != nil {
if options.Keys != nil {
for _, key := range options.Keys {
args = append(args, key)
}
}
if options.Arguments != nil {
for _, key := range options.Arguments {
args = append(args, key)
}
}
}
cmd := NewCmd(ctx, args...)
_ = c(ctx, cmd)
return cmd
}
// TFCallASYNC - invoke an asynchronous JavaScript function (coroutine).
// For more information - https://redis.io/commands/TFCallASYNC/
func (c cmdable) TFCallASYNC(ctx context.Context, libName string, funcName string, numKeys int) *Cmd {
lf := fmt.Sprintf("%s.%s", libName, funcName)
args := []interface{}{"TFCALLASYNC", lf, numKeys}
cmd := NewCmd(ctx, args...)
_ = c(ctx, cmd)
return cmd
}
func (c cmdable) TFCallASYNCArgs(ctx context.Context, libName string, funcName string, numKeys int, options *TFCallOptions) *Cmd {
lf := fmt.Sprintf("%s.%s", libName, funcName)
args := []interface{}{"TFCALLASYNC", lf, numKeys}
if options != nil {
if options.Keys != nil {
for _, key := range options.Keys {
args = append(args, key)
}
}
if options.Arguments != nil {
for _, key := range options.Arguments {
args = append(args, key)
}
}
}
cmd := NewCmd(ctx, args...)
_ = c(ctx, cmd)
return cmd
}

View File

@ -82,17 +82,17 @@ func NewBoolSliceResult(val []bool, err error) *BoolSliceCmd {
return &cmd
}
// NewStringStringMapResult returns a StringStringMapCmd initialised with val and err for testing.
func NewStringStringMapResult(val map[string]string, err error) *StringStringMapCmd {
var cmd StringStringMapCmd
// NewMapStringStringResult returns a MapStringStringCmd initialised with val and err for testing.
func NewMapStringStringResult(val map[string]string, err error) *MapStringStringCmd {
var cmd MapStringStringCmd
cmd.val = val
cmd.SetErr(err)
return &cmd
}
// NewStringIntMapCmdResult returns a StringIntMapCmd initialised with val and err for testing.
func NewStringIntMapCmdResult(val map[string]int64, err error) *StringIntMapCmd {
var cmd StringIntMapCmd
// NewMapStringIntCmdResult returns a MapStringIntCmd initialised with val and err for testing.
func NewMapStringIntCmdResult(val map[string]int64, err error) *MapStringIntCmd {
var cmd MapStringIntCmd
cmd.val = val
cmd.SetErr(err)
return &cmd
@ -114,7 +114,7 @@ func NewZSliceCmdResult(val []Z, err error) *ZSliceCmd {
return &cmd
}
// NewZWithKeyCmdResult returns a NewZWithKeyCmd initialised with val and err for testing.
// NewZWithKeyCmdResult returns a ZWithKeyCmd initialised with val and err for testing.
func NewZWithKeyCmdResult(val *ZWithKey, err error) *ZWithKeyCmd {
var cmd ZWithKeyCmd
cmd.val = val
@ -178,3 +178,11 @@ func NewXStreamSliceCmdResult(val []XStream, err error) *XStreamSliceCmd {
cmd.SetErr(err)
return &cmd
}
// NewXPendingResult returns a XPendingCmd initialised with val and err for testing.
func NewXPendingResult(val *XPending, err error) *XPendingCmd {
var cmd XPendingCmd
cmd.val = val
cmd.SetErr(err)
return &cmd
}

View File

@ -12,12 +12,12 @@ import (
"time"
"github.com/cespare/xxhash/v2"
rendezvous "github.com/dgryski/go-rendezvous" //nolint
"github.com/dgryski/go-rendezvous" //nolint
"github.com/go-redis/redis/v8/internal"
"github.com/go-redis/redis/v8/internal/hashtag"
"github.com/go-redis/redis/v8/internal/pool"
"github.com/go-redis/redis/v8/internal/rand"
"github.com/redis/go-redis/v9/internal"
"github.com/redis/go-redis/v9/internal/hashtag"
"github.com/redis/go-redis/v9/internal/pool"
"github.com/redis/go-redis/v9/internal/rand"
)
var errRingShardsDown = errors.New("redis: all ring shards are down")
@ -48,8 +48,11 @@ type RingOptions struct {
// Map of name => host:port addresses of ring shards.
Addrs map[string]string
// NewClient creates a shard client with provided name and options.
NewClient func(name string, opt *Options) *Client
// NewClient creates a shard client with provided options.
NewClient func(opt *Options) *Client
// ClientName will execute the `CLIENT SETNAME ClientName` command for each conn.
ClientName string
// Frequency of PING commands sent to check shards availability.
// Shard is considered down after 3 subsequent failed checks.
@ -67,6 +70,7 @@ type RingOptions struct {
Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
OnConnect func(ctx context.Context, cn *Conn) error
Protocol int
Username string
Password string
DB int
@ -82,12 +86,12 @@ type RingOptions struct {
// PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO).
PoolFIFO bool
PoolSize int
MinIdleConns int
MaxConnAge time.Duration
PoolTimeout time.Duration
IdleTimeout time.Duration
IdleCheckFrequency time.Duration
PoolSize int
PoolTimeout time.Duration
MinIdleConns int
MaxIdleConns int
ConnMaxIdleTime time.Duration
ConnMaxLifetime time.Duration
TLSConfig *tls.Config
Limiter Limiter
@ -95,7 +99,7 @@ type RingOptions struct {
func (opt *RingOptions) init() {
if opt.NewClient == nil {
opt.NewClient = func(name string, opt *Options) *Client {
opt.NewClient = func(opt *Options) *Client {
return NewClient(opt)
}
}
@ -129,9 +133,11 @@ func (opt *RingOptions) init() {
func (opt *RingOptions) clientOptions() *Options {
return &Options{
Dialer: opt.Dialer,
OnConnect: opt.OnConnect,
ClientName: opt.ClientName,
Dialer: opt.Dialer,
OnConnect: opt.OnConnect,
Protocol: opt.Protocol,
Username: opt.Username,
Password: opt.Password,
DB: opt.DB,
@ -142,13 +148,13 @@ func (opt *RingOptions) clientOptions() *Options {
ReadTimeout: opt.ReadTimeout,
WriteTimeout: opt.WriteTimeout,
PoolFIFO: opt.PoolFIFO,
PoolSize: opt.PoolSize,
MinIdleConns: opt.MinIdleConns,
MaxConnAge: opt.MaxConnAge,
PoolTimeout: opt.PoolTimeout,
IdleTimeout: opt.IdleTimeout,
IdleCheckFrequency: opt.IdleCheckFrequency,
PoolFIFO: opt.PoolFIFO,
PoolSize: opt.PoolSize,
PoolTimeout: opt.PoolTimeout,
MinIdleConns: opt.MinIdleConns,
MaxIdleConns: opt.MaxIdleConns,
ConnMaxIdleTime: opt.ConnMaxIdleTime,
ConnMaxLifetime: opt.ConnMaxLifetime,
TLSConfig: opt.TLSConfig,
Limiter: opt.Limiter,
@ -160,14 +166,16 @@ func (opt *RingOptions) clientOptions() *Options {
type ringShard struct {
Client *Client
down int32
addr string
}
func newRingShard(opt *RingOptions, name, addr string) *ringShard {
func newRingShard(opt *RingOptions, addr string) *ringShard {
clopt := opt.clientOptions()
clopt.Addr = addr
return &ringShard{
Client: opt.NewClient(name, clopt),
Client: opt.NewClient(clopt),
addr: addr,
}
}
@ -208,161 +216,238 @@ func (shard *ringShard) Vote(up bool) bool {
//------------------------------------------------------------------------------
type ringShards struct {
type ringSharding struct {
opt *RingOptions
mu sync.RWMutex
hash ConsistentHash
shards map[string]*ringShard // read only
list []*ringShard // read only
numShard int
closed bool
mu sync.RWMutex
shards *ringShards
closed bool
hash ConsistentHash
numShard int
onNewNode []func(rdb *Client)
// ensures exclusive access to SetAddrs so there is no need
// to hold mu for the duration of potentially long shard creation
setAddrsMu sync.Mutex
}
func newRingShards(opt *RingOptions) *ringShards {
shards := make(map[string]*ringShard, len(opt.Addrs))
list := make([]*ringShard, 0, len(shards))
type ringShards struct {
m map[string]*ringShard
list []*ringShard
}
for name, addr := range opt.Addrs {
shard := newRingShard(opt, name, addr)
shards[name] = shard
list = append(list, shard)
}
c := &ringShards{
func newRingSharding(opt *RingOptions) *ringSharding {
c := &ringSharding{
opt: opt,
shards: shards,
list: list,
}
c.rebalance()
c.SetAddrs(opt.Addrs)
return c
}
func (c *ringShards) List() []*ringShard {
func (c *ringSharding) OnNewNode(fn func(rdb *Client)) {
c.mu.Lock()
c.onNewNode = append(c.onNewNode, fn)
c.mu.Unlock()
}
// SetAddrs replaces the shards in use, such that you can increase and
// decrease number of shards, that you use. It will reuse shards that
// existed before and close the ones that will not be used anymore.
func (c *ringSharding) SetAddrs(addrs map[string]string) {
c.setAddrsMu.Lock()
defer c.setAddrsMu.Unlock()
cleanup := func(shards map[string]*ringShard) {
for addr, shard := range shards {
if err := shard.Client.Close(); err != nil {
internal.Logger.Printf(context.Background(), "shard.Close %s failed: %s", addr, err)
}
}
}
c.mu.RLock()
if c.closed {
c.mu.RUnlock()
return
}
existing := c.shards
c.mu.RUnlock()
shards, created, unused := c.newRingShards(addrs, existing)
c.mu.Lock()
if c.closed {
cleanup(created)
c.mu.Unlock()
return
}
c.shards = shards
c.rebalanceLocked()
c.mu.Unlock()
cleanup(unused)
}
func (c *ringSharding) newRingShards(
addrs map[string]string, existing *ringShards,
) (shards *ringShards, created, unused map[string]*ringShard) {
shards = &ringShards{m: make(map[string]*ringShard, len(addrs))}
created = make(map[string]*ringShard) // indexed by addr
unused = make(map[string]*ringShard) // indexed by addr
if existing != nil {
for _, shard := range existing.list {
unused[shard.addr] = shard
}
}
for name, addr := range addrs {
if shard, ok := unused[addr]; ok {
shards.m[name] = shard
delete(unused, addr)
} else {
shard := newRingShard(c.opt, addr)
shards.m[name] = shard
created[addr] = shard
for _, fn := range c.onNewNode {
fn(shard.Client)
}
}
}
for _, shard := range shards.m {
shards.list = append(shards.list, shard)
}
return
}
func (c *ringSharding) List() []*ringShard {
var list []*ringShard
c.mu.RLock()
if !c.closed {
list = c.list
list = c.shards.list
}
c.mu.RUnlock()
return list
}
func (c *ringShards) Hash(key string) string {
func (c *ringSharding) Hash(key string) string {
key = hashtag.Key(key)
var hash string
c.mu.RLock()
defer c.mu.RUnlock()
if c.numShard > 0 {
hash = c.hash.Get(key)
}
c.mu.RUnlock()
return hash
}
func (c *ringShards) GetByKey(key string) (*ringShard, error) {
func (c *ringSharding) GetByKey(key string) (*ringShard, error) {
key = hashtag.Key(key)
c.mu.RLock()
defer c.mu.RUnlock()
if c.closed {
c.mu.RUnlock()
return nil, pool.ErrClosed
}
if c.numShard == 0 {
c.mu.RUnlock()
return nil, errRingShardsDown
}
hash := c.hash.Get(key)
if hash == "" {
c.mu.RUnlock()
shardName := c.hash.Get(key)
if shardName == "" {
return nil, errRingShardsDown
}
shard := c.shards[hash]
c.mu.RUnlock()
return shard, nil
return c.shards.m[shardName], nil
}
func (c *ringShards) GetByName(shardName string) (*ringShard, error) {
func (c *ringSharding) GetByName(shardName string) (*ringShard, error) {
if shardName == "" {
return c.Random()
}
c.mu.RLock()
shard := c.shards[shardName]
c.mu.RUnlock()
return shard, nil
defer c.mu.RUnlock()
return c.shards.m[shardName], nil
}
func (c *ringShards) Random() (*ringShard, error) {
func (c *ringSharding) Random() (*ringShard, error) {
return c.GetByKey(strconv.Itoa(rand.Int()))
}
// heartbeat monitors state of each shard in the ring.
func (c *ringShards) Heartbeat(frequency time.Duration) {
// Heartbeat monitors state of each shard in the ring.
func (c *ringSharding) Heartbeat(ctx context.Context, frequency time.Duration) {
ticker := time.NewTicker(frequency)
defer ticker.Stop()
ctx := context.Background()
for range ticker.C {
var rebalance bool
for {
select {
case <-ticker.C:
var rebalance bool
for _, shard := range c.List() {
err := shard.Client.Ping(ctx).Err()
isUp := err == nil || err == pool.ErrPoolTimeout
if shard.Vote(isUp) {
internal.Logger.Printf(context.Background(), "ring shard state changed: %s", shard)
rebalance = true
for _, shard := range c.List() {
err := shard.Client.Ping(ctx).Err()
isUp := err == nil || err == pool.ErrPoolTimeout
if shard.Vote(isUp) {
internal.Logger.Printf(ctx, "ring shard state changed: %s", shard)
rebalance = true
}
}
}
if rebalance {
c.rebalance()
if rebalance {
c.mu.Lock()
c.rebalanceLocked()
c.mu.Unlock()
}
case <-ctx.Done():
return
}
}
}
// rebalance removes dead shards from the Ring.
func (c *ringShards) rebalance() {
c.mu.RLock()
shards := c.shards
c.mu.RUnlock()
// rebalanceLocked removes dead shards from the Ring.
// Requires c.mu locked.
func (c *ringSharding) rebalanceLocked() {
if c.closed {
return
}
if c.shards == nil {
return
}
liveShards := make([]string, 0, len(shards))
liveShards := make([]string, 0, len(c.shards.m))
for name, shard := range shards {
for name, shard := range c.shards.m {
if shard.IsUp() {
liveShards = append(liveShards, name)
}
}
hash := c.opt.NewConsistentHash(liveShards)
c.mu.Lock()
c.hash = hash
c.hash = c.opt.NewConsistentHash(liveShards)
c.numShard = len(liveShards)
c.mu.Unlock()
}
func (c *ringShards) Len() int {
func (c *ringSharding) Len() int {
c.mu.RLock()
l := c.numShard
c.mu.RUnlock()
return l
defer c.mu.RUnlock()
return c.numShard
}
func (c *ringShards) Close() error {
func (c *ringSharding) Close() error {
c.mu.Lock()
defer c.mu.Unlock()
@ -372,26 +457,22 @@ func (c *ringShards) Close() error {
c.closed = true
var firstErr error
for _, shard := range c.shards {
for _, shard := range c.shards.list {
if err := shard.Client.Close(); err != nil && firstErr == nil {
firstErr = err
}
}
c.hash = nil
c.shards = nil
c.list = nil
c.numShard = 0
return firstErr
}
//------------------------------------------------------------------------------
type ring struct {
opt *RingOptions
shards *ringShards
cmdsInfoCache *cmdsInfoCache //nolint:structcheck
}
// Ring is a Redis client that uses consistent hashing to distribute
// keys across multiple Redis servers (shards). It's safe for
// concurrent use by multiple goroutines.
@ -407,47 +488,49 @@ type ring struct {
// and can tolerate losing data when one of the servers dies.
// Otherwise you should use Redis Cluster.
type Ring struct {
*ring
cmdable
hooks
ctx context.Context
hooksMixin
opt *RingOptions
sharding *ringSharding
cmdsInfoCache *cmdsInfoCache
heartbeatCancelFn context.CancelFunc
}
func NewRing(opt *RingOptions) *Ring {
opt.init()
hbCtx, hbCancel := context.WithCancel(context.Background())
ring := Ring{
ring: &ring{
opt: opt,
shards: newRingShards(opt),
},
ctx: context.Background(),
opt: opt,
sharding: newRingSharding(opt),
heartbeatCancelFn: hbCancel,
}
ring.cmdsInfoCache = newCmdsInfoCache(ring.cmdsInfo)
ring.cmdable = ring.Process
go ring.shards.Heartbeat(opt.HeartbeatFrequency)
ring.initHooks(hooks{
process: ring.process,
pipeline: func(ctx context.Context, cmds []Cmder) error {
return ring.generalProcessPipeline(ctx, cmds, false)
},
txPipeline: func(ctx context.Context, cmds []Cmder) error {
return ring.generalProcessPipeline(ctx, cmds, true)
},
})
go ring.sharding.Heartbeat(hbCtx, opt.HeartbeatFrequency)
return &ring
}
func (c *Ring) Context() context.Context {
return c.ctx
func (c *Ring) SetAddrs(addrs map[string]string) {
c.sharding.SetAddrs(addrs)
}
func (c *Ring) WithContext(ctx context.Context) *Ring {
if ctx == nil {
panic("nil context")
}
clone := *c
clone.cmdable = clone.Process
clone.hooks.lock()
clone.ctx = ctx
return &clone
}
// Do creates a Cmd from the args and processes the cmd.
// Do create a Cmd from the args and processes the cmd.
func (c *Ring) Do(ctx context.Context, args ...interface{}) *Cmd {
cmd := NewCmd(ctx, args...)
_ = c.Process(ctx, cmd)
@ -455,7 +538,9 @@ func (c *Ring) Do(ctx context.Context, args ...interface{}) *Cmd {
}
func (c *Ring) Process(ctx context.Context, cmd Cmder) error {
return c.hooks.process(ctx, cmd, c.process)
err := c.processHook(ctx, cmd)
cmd.SetErr(err)
return err
}
// Options returns read-only Options that were used to create the client.
@ -469,7 +554,7 @@ func (c *Ring) retryBackoff(attempt int) time.Duration {
// PoolStats returns accumulated connection pool stats.
func (c *Ring) PoolStats() *PoolStats {
shards := c.shards.List()
shards := c.sharding.List()
var acc PoolStats
for _, shard := range shards {
s := shard.Client.connPool.Stats()
@ -484,7 +569,7 @@ func (c *Ring) PoolStats() *PoolStats {
// Len returns the current number of shards in the ring.
func (c *Ring) Len() int {
return c.shards.Len()
return c.sharding.Len()
}
// Subscribe subscribes the client to the specified channels.
@ -493,7 +578,7 @@ func (c *Ring) Subscribe(ctx context.Context, channels ...string) *PubSub {
panic("at least one channel is required")
}
shard, err := c.shards.GetByKey(channels[0])
shard, err := c.sharding.GetByKey(channels[0])
if err != nil {
// TODO: return PubSub with sticky error
panic(err)
@ -507,7 +592,7 @@ func (c *Ring) PSubscribe(ctx context.Context, channels ...string) *PubSub {
panic("at least one channel is required")
}
shard, err := c.shards.GetByKey(channels[0])
shard, err := c.sharding.GetByKey(channels[0])
if err != nil {
// TODO: return PubSub with sticky error
panic(err)
@ -515,13 +600,30 @@ func (c *Ring) PSubscribe(ctx context.Context, channels ...string) *PubSub {
return shard.Client.PSubscribe(ctx, channels...)
}
// SSubscribe Subscribes the client to the specified shard channels.
func (c *Ring) SSubscribe(ctx context.Context, channels ...string) *PubSub {
if len(channels) == 0 {
panic("at least one channel is required")
}
shard, err := c.sharding.GetByKey(channels[0])
if err != nil {
// TODO: return PubSub with sticky error
panic(err)
}
return shard.Client.SSubscribe(ctx, channels...)
}
func (c *Ring) OnNewNode(fn func(rdb *Client)) {
c.sharding.OnNewNode(fn)
}
// ForEachShard concurrently calls the fn on each live shard in the ring.
// It returns the first error if any.
func (c *Ring) ForEachShard(
ctx context.Context,
fn func(ctx context.Context, client *Client) error,
) error {
shards := c.shards.List()
shards := c.sharding.List()
var wg sync.WaitGroup
errCh := make(chan error, 1)
for _, shard := range shards {
@ -552,7 +654,7 @@ func (c *Ring) ForEachShard(
}
func (c *Ring) cmdsInfo(ctx context.Context) (map[string]*CommandInfo, error) {
shards := c.shards.List()
shards := c.sharding.List()
var firstErr error
for _, shard := range shards {
cmdsInfo, err := shard.Client.Command(ctx).Result()
@ -585,10 +687,10 @@ func (c *Ring) cmdShard(ctx context.Context, cmd Cmder) (*ringShard, error) {
cmdInfo := c.cmdInfo(ctx, cmd.Name())
pos := cmdFirstKeyPos(cmd, cmdInfo)
if pos == 0 {
return c.shards.Random()
return c.sharding.Random()
}
firstKey := cmd.stringArg(pos)
return c.shards.GetByKey(firstKey)
return c.sharding.GetByKey(firstKey)
}
func (c *Ring) process(ctx context.Context, cmd Cmder) error {
@ -619,47 +721,42 @@ func (c *Ring) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder
func (c *Ring) Pipeline() Pipeliner {
pipe := Pipeline{
ctx: c.ctx,
exec: c.processPipeline,
exec: pipelineExecer(c.processPipelineHook),
}
pipe.init()
return &pipe
}
func (c *Ring) processPipeline(ctx context.Context, cmds []Cmder) error {
return c.hooks.processPipeline(ctx, cmds, func(ctx context.Context, cmds []Cmder) error {
return c.generalProcessPipeline(ctx, cmds, false)
})
}
func (c *Ring) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
return c.TxPipeline().Pipelined(ctx, fn)
}
func (c *Ring) TxPipeline() Pipeliner {
pipe := Pipeline{
ctx: c.ctx,
exec: c.processTxPipeline,
exec: func(ctx context.Context, cmds []Cmder) error {
cmds = wrapMultiExec(ctx, cmds)
return c.processTxPipelineHook(ctx, cmds)
},
}
pipe.init()
return &pipe
}
func (c *Ring) processTxPipeline(ctx context.Context, cmds []Cmder) error {
return c.hooks.processPipeline(ctx, cmds, func(ctx context.Context, cmds []Cmder) error {
return c.generalProcessPipeline(ctx, cmds, true)
})
}
func (c *Ring) generalProcessPipeline(
ctx context.Context, cmds []Cmder, tx bool,
) error {
if tx {
// Trim multi .. exec.
cmds = cmds[1 : len(cmds)-1]
}
cmdsMap := make(map[string][]Cmder)
for _, cmd := range cmds {
cmdInfo := c.cmdInfo(ctx, cmd.Name())
hash := cmd.stringArg(cmdFirstKeyPos(cmd, cmdInfo))
if hash != "" {
hash = c.shards.Hash(hash)
hash = c.sharding.Hash(hash)
}
cmdsMap[hash] = append(cmdsMap[hash], cmd)
}
@ -670,7 +767,19 @@ func (c *Ring) generalProcessPipeline(
go func(hash string, cmds []Cmder) {
defer wg.Done()
_ = c.processShardPipeline(ctx, hash, cmds, tx)
// TODO: retry?
shard, err := c.sharding.GetByName(hash)
if err != nil {
setCmdsErr(cmds, err)
return
}
if tx {
cmds = wrapMultiExec(ctx, cmds)
_ = shard.Client.processTxPipelineHook(ctx, cmds)
} else {
_ = shard.Client.processPipelineHook(ctx, cmds)
}
}(hash, cmds)
}
@ -678,31 +787,16 @@ func (c *Ring) generalProcessPipeline(
return cmdsFirstErr(cmds)
}
func (c *Ring) processShardPipeline(
ctx context.Context, hash string, cmds []Cmder, tx bool,
) error {
// TODO: retry?
shard, err := c.shards.GetByName(hash)
if err != nil {
setCmdsErr(cmds, err)
return err
}
if tx {
return shard.Client.processTxPipeline(ctx, cmds)
}
return shard.Client.processPipeline(ctx, cmds)
}
func (c *Ring) Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error {
if len(keys) == 0 {
return fmt.Errorf("redis: Watch requires at least one key")
}
var shards []*ringShard
for _, key := range keys {
if key != "" {
shard, err := c.shards.GetByKey(hashtag.Key(key))
shard, err := c.sharding.GetByKey(hashtag.Key(key))
if err != nil {
return err
}
@ -732,5 +826,7 @@ func (c *Ring) Watch(ctx context.Context, fn func(*Tx) error, keys ...string) er
// It is rare to Close a Ring, as the Ring is meant to be long-lived
// and shared between many goroutines.
func (c *Ring) Close() error {
return c.shards.Close()
c.heartbeatCancelFn()
return c.sharding.Close()
}

View File

@ -5,12 +5,13 @@ import (
"crypto/sha1"
"encoding/hex"
"io"
"strings"
)
type Scripter interface {
Eval(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd
EvalSha(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd
EvalRO(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd
EvalShaRO(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd
ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd
ScriptLoad(ctx context.Context, script string) *StringCmd
}
@ -50,16 +51,34 @@ func (s *Script) Eval(ctx context.Context, c Scripter, keys []string, args ...in
return c.Eval(ctx, s.src, keys, args...)
}
func (s *Script) EvalRO(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd {
return c.EvalRO(ctx, s.src, keys, args...)
}
func (s *Script) EvalSha(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd {
return c.EvalSha(ctx, s.hash, keys, args...)
}
func (s *Script) EvalShaRO(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd {
return c.EvalShaRO(ctx, s.hash, keys, args...)
}
// Run optimistically uses EVALSHA to run the script. If script does not exist
// it is retried using EVAL.
func (s *Script) Run(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd {
r := s.EvalSha(ctx, c, keys, args...)
if err := r.Err(); err != nil && strings.HasPrefix(err.Error(), "NOSCRIPT ") {
if HasErrorPrefix(r.Err(), "NOSCRIPT") {
return s.Eval(ctx, c, keys, args...)
}
return r
}
// RunRO optimistically uses EVALSHA_RO to run the script. If script does not exist
// it is retried using EVAL_RO.
func (s *Script) RunRO(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd {
r := s.EvalShaRO(ctx, c, keys, args...)
if HasErrorPrefix(r.Err(), "NOSCRIPT") {
return s.EvalRO(ctx, c, keys, args...)
}
return r
}

View File

@ -9,9 +9,9 @@ import (
"sync"
"time"
"github.com/go-redis/redis/v8/internal"
"github.com/go-redis/redis/v8/internal/pool"
"github.com/go-redis/redis/v8/internal/rand"
"github.com/redis/go-redis/v9/internal"
"github.com/redis/go-redis/v9/internal/pool"
"github.com/redis/go-redis/v9/internal/rand"
)
//------------------------------------------------------------------------------
@ -24,6 +24,9 @@ type FailoverOptions struct {
// A seed list of host:port addresses of sentinel nodes.
SentinelAddrs []string
// ClientName will execute the `CLIENT SETNAME ClientName` command for each conn.
ClientName string
// If specified with SentinelPassword, enables ACL-based authentication (via
// AUTH <user> <pass>).
SentinelUsername string
@ -32,25 +35,26 @@ type FailoverOptions struct {
// authentication.
SentinelPassword string
// Allows routing read-only commands to the closest master or slave node.
// Allows routing read-only commands to the closest master or replica node.
// This option only works with NewFailoverClusterClient.
RouteByLatency bool
// Allows routing read-only commands to the random master or slave node.
// Allows routing read-only commands to the random master or replica node.
// This option only works with NewFailoverClusterClient.
RouteRandomly bool
// Route all commands to slave read-only nodes.
SlaveOnly bool
// Route all commands to replica read-only nodes.
ReplicaOnly bool
// Use slaves disconnected with master when cannot get connected slaves
// Now, this option only works in RandomSlaveAddr function.
UseDisconnectedSlaves bool
// Use replicas disconnected with master when cannot get connected replicas
// Now, this option only works in RandomReplicaAddr function.
UseDisconnectedReplicas bool
// Following options are copied from Options struct.
Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
OnConnect func(ctx context.Context, cn *Conn) error
Protocol int
Username string
Password string
DB int
@ -59,31 +63,33 @@ type FailoverOptions struct {
MinRetryBackoff time.Duration
MaxRetryBackoff time.Duration
DialTimeout time.Duration
ReadTimeout time.Duration
WriteTimeout time.Duration
DialTimeout time.Duration
ReadTimeout time.Duration
WriteTimeout time.Duration
ContextTimeoutEnabled bool
// PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO).
PoolFIFO bool
PoolSize int
MinIdleConns int
MaxConnAge time.Duration
PoolTimeout time.Duration
IdleTimeout time.Duration
IdleCheckFrequency time.Duration
PoolSize int
PoolTimeout time.Duration
MinIdleConns int
MaxIdleConns int
ConnMaxIdleTime time.Duration
ConnMaxLifetime time.Duration
TLSConfig *tls.Config
}
func (opt *FailoverOptions) clientOptions() *Options {
return &Options{
Addr: "FailoverClient",
Addr: "FailoverClient",
ClientName: opt.ClientName,
Dialer: opt.Dialer,
OnConnect: opt.OnConnect,
DB: opt.DB,
Protocol: opt.Protocol,
Username: opt.Username,
Password: opt.Password,
@ -91,17 +97,18 @@ func (opt *FailoverOptions) clientOptions() *Options {
MinRetryBackoff: opt.MinRetryBackoff,
MaxRetryBackoff: opt.MaxRetryBackoff,
DialTimeout: opt.DialTimeout,
ReadTimeout: opt.ReadTimeout,
WriteTimeout: opt.WriteTimeout,
DialTimeout: opt.DialTimeout,
ReadTimeout: opt.ReadTimeout,
WriteTimeout: opt.WriteTimeout,
ContextTimeoutEnabled: opt.ContextTimeoutEnabled,
PoolFIFO: opt.PoolFIFO,
PoolSize: opt.PoolSize,
PoolTimeout: opt.PoolTimeout,
IdleTimeout: opt.IdleTimeout,
IdleCheckFrequency: opt.IdleCheckFrequency,
MinIdleConns: opt.MinIdleConns,
MaxConnAge: opt.MaxConnAge,
PoolFIFO: opt.PoolFIFO,
PoolSize: opt.PoolSize,
PoolTimeout: opt.PoolTimeout,
MinIdleConns: opt.MinIdleConns,
MaxIdleConns: opt.MaxIdleConns,
ConnMaxIdleTime: opt.ConnMaxIdleTime,
ConnMaxLifetime: opt.ConnMaxLifetime,
TLSConfig: opt.TLSConfig,
}
@ -109,7 +116,8 @@ func (opt *FailoverOptions) clientOptions() *Options {
func (opt *FailoverOptions) sentinelOptions(addr string) *Options {
return &Options{
Addr: addr,
Addr: addr,
ClientName: opt.ClientName,
Dialer: opt.Dialer,
OnConnect: opt.OnConnect,
@ -126,13 +134,13 @@ func (opt *FailoverOptions) sentinelOptions(addr string) *Options {
ReadTimeout: opt.ReadTimeout,
WriteTimeout: opt.WriteTimeout,
PoolFIFO: opt.PoolFIFO,
PoolSize: opt.PoolSize,
PoolTimeout: opt.PoolTimeout,
IdleTimeout: opt.IdleTimeout,
IdleCheckFrequency: opt.IdleCheckFrequency,
MinIdleConns: opt.MinIdleConns,
MaxConnAge: opt.MaxConnAge,
PoolFIFO: opt.PoolFIFO,
PoolSize: opt.PoolSize,
PoolTimeout: opt.PoolTimeout,
MinIdleConns: opt.MinIdleConns,
MaxIdleConns: opt.MaxIdleConns,
ConnMaxIdleTime: opt.ConnMaxIdleTime,
ConnMaxLifetime: opt.ConnMaxLifetime,
TLSConfig: opt.TLSConfig,
}
@ -140,9 +148,12 @@ func (opt *FailoverOptions) sentinelOptions(addr string) *Options {
func (opt *FailoverOptions) clusterOptions() *ClusterOptions {
return &ClusterOptions{
ClientName: opt.ClientName,
Dialer: opt.Dialer,
OnConnect: opt.OnConnect,
Protocol: opt.Protocol,
Username: opt.Username,
Password: opt.Password,
@ -158,13 +169,13 @@ func (opt *FailoverOptions) clusterOptions() *ClusterOptions {
ReadTimeout: opt.ReadTimeout,
WriteTimeout: opt.WriteTimeout,
PoolFIFO: opt.PoolFIFO,
PoolSize: opt.PoolSize,
PoolTimeout: opt.PoolTimeout,
IdleTimeout: opt.IdleTimeout,
IdleCheckFrequency: opt.IdleCheckFrequency,
MinIdleConns: opt.MinIdleConns,
MaxConnAge: opt.MaxConnAge,
PoolFIFO: opt.PoolFIFO,
PoolSize: opt.PoolSize,
PoolTimeout: opt.PoolTimeout,
MinIdleConns: opt.MinIdleConns,
MaxIdleConns: opt.MaxIdleConns,
ConnMaxIdleTime: opt.ConnMaxIdleTime,
ConnMaxLifetime: opt.ConnMaxLifetime,
TLSConfig: opt.TLSConfig,
}
@ -194,10 +205,21 @@ func NewFailoverClient(failoverOpt *FailoverOptions) *Client {
}
opt := failoverOpt.clientOptions()
opt.Dialer = masterSlaveDialer(failover)
opt.Dialer = masterReplicaDialer(failover)
opt.init()
connPool := newConnPool(opt)
var connPool *pool.ConnPool
rdb := &Client{
baseClient: &baseClient{
opt: opt,
},
}
rdb.init()
connPool = newConnPool(opt, rdb.dialHook)
rdb.connPool = connPool
rdb.onClose = failover.Close
failover.mu.Lock()
failover.onFailover = func(ctx context.Context, addr string) {
@ -207,25 +229,18 @@ func NewFailoverClient(failoverOpt *FailoverOptions) *Client {
}
failover.mu.Unlock()
c := Client{
baseClient: newBaseClient(opt, connPool),
ctx: context.Background(),
}
c.cmdable = c.Process
c.onClose = failover.Close
return &c
return rdb
}
func masterSlaveDialer(
func masterReplicaDialer(
failover *sentinelFailover,
) func(ctx context.Context, network, addr string) (net.Conn, error) {
return func(ctx context.Context, network, _ string) (net.Conn, error) {
var addr string
var err error
if failover.opt.SlaveOnly {
addr, err = failover.RandomSlaveAddr(ctx)
if failover.opt.ReplicaOnly {
addr, err = failover.RandomReplicaAddr(ctx)
} else {
addr, err = failover.MasterAddr(ctx)
if err == nil {
@ -255,37 +270,30 @@ func masterSlaveDialer(
// SentinelClient is a client for a Redis Sentinel.
type SentinelClient struct {
*baseClient
hooks
ctx context.Context
hooksMixin
}
func NewSentinelClient(opt *Options) *SentinelClient {
opt.init()
c := &SentinelClient{
baseClient: &baseClient{
opt: opt,
connPool: newConnPool(opt),
opt: opt,
},
ctx: context.Background(),
}
c.initHooks(hooks{
dial: c.baseClient.dial,
process: c.baseClient.process,
})
c.connPool = newConnPool(opt, c.dialHook)
return c
}
func (c *SentinelClient) Context() context.Context {
return c.ctx
}
func (c *SentinelClient) WithContext(ctx context.Context) *SentinelClient {
if ctx == nil {
panic("nil context")
}
clone := *c
clone.ctx = ctx
return &clone
}
func (c *SentinelClient) Process(ctx context.Context, cmd Cmder) error {
return c.hooks.process(ctx, cmd, c.baseClient.process)
err := c.processHook(ctx, cmd)
cmd.SetErr(err)
return err
}
func (c *SentinelClient) pubSub() *PubSub {
@ -335,8 +343,8 @@ func (c *SentinelClient) GetMasterAddrByName(ctx context.Context, name string) *
return cmd
}
func (c *SentinelClient) Sentinels(ctx context.Context, name string) *SliceCmd {
cmd := NewSliceCmd(ctx, "sentinel", "sentinels", name)
func (c *SentinelClient) Sentinels(ctx context.Context, name string) *MapStringStringSliceCmd {
cmd := NewMapStringStringSliceCmd(ctx, "sentinel", "sentinels", name)
_ = c.Process(ctx, cmd)
return cmd
}
@ -351,7 +359,7 @@ func (c *SentinelClient) Failover(ctx context.Context, name string) *StatusCmd {
// Reset resets all the masters with matching name. The pattern argument is a
// glob-style pattern. The reset process clears any previous state in a master
// (including a failover in progress), and removes every slave and sentinel
// (including a failover in progress), and removes every replica and sentinel
// already discovered and associated with the master.
func (c *SentinelClient) Reset(ctx context.Context, pattern string) *IntCmd {
cmd := NewIntCmd(ctx, "sentinel", "reset", pattern)
@ -368,8 +376,8 @@ func (c *SentinelClient) FlushConfig(ctx context.Context) *StatusCmd {
}
// Master shows the state and info of the specified master.
func (c *SentinelClient) Master(ctx context.Context, name string) *StringStringMapCmd {
cmd := NewStringStringMapCmd(ctx, "sentinel", "master", name)
func (c *SentinelClient) Master(ctx context.Context, name string) *MapStringStringCmd {
cmd := NewMapStringStringCmd(ctx, "sentinel", "master", name)
_ = c.Process(ctx, cmd)
return cmd
}
@ -381,9 +389,9 @@ func (c *SentinelClient) Masters(ctx context.Context) *SliceCmd {
return cmd
}
// Slaves shows a list of slaves for the specified master and their state.
func (c *SentinelClient) Slaves(ctx context.Context, name string) *SliceCmd {
cmd := NewSliceCmd(ctx, "sentinel", "slaves", name)
// Replicas shows a list of replicas for the specified master and their state.
func (c *SentinelClient) Replicas(ctx context.Context, name string) *MapStringStringSliceCmd {
cmd := NewMapStringStringSliceCmd(ctx, "sentinel", "replicas", name)
_ = c.Process(ctx, cmd)
return cmd
}
@ -460,18 +468,18 @@ func (c *sentinelFailover) closeSentinel() error {
return firstErr
}
func (c *sentinelFailover) RandomSlaveAddr(ctx context.Context) (string, error) {
func (c *sentinelFailover) RandomReplicaAddr(ctx context.Context) (string, error) {
if c.opt == nil {
return "", errors.New("opt is nil")
}
addresses, err := c.slaveAddrs(ctx, false)
addresses, err := c.replicaAddrs(ctx, false)
if err != nil {
return "", err
}
if len(addresses) == 0 && c.opt.UseDisconnectedSlaves {
addresses, err = c.slaveAddrs(ctx, true)
if len(addresses) == 0 && c.opt.UseDisconnectedReplicas {
addresses, err = c.replicaAddrs(ctx, true)
if err != nil {
return "", err
}
@ -489,8 +497,15 @@ func (c *sentinelFailover) MasterAddr(ctx context.Context) (string, error) {
c.mu.RUnlock()
if sentinel != nil {
addr := c.getMasterAddr(ctx, sentinel)
if addr != "" {
addr, err := c.getMasterAddr(ctx, sentinel)
if err != nil {
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
return "", err
}
// Continue on other errors
internal.Logger.Printf(ctx, "sentinel: GetMasterAddrByName name=%q failed: %s",
c.opt.MasterName, err)
} else {
return addr, nil
}
}
@ -499,11 +514,18 @@ func (c *sentinelFailover) MasterAddr(ctx context.Context) (string, error) {
defer c.mu.Unlock()
if c.sentinel != nil {
addr := c.getMasterAddr(ctx, c.sentinel)
if addr != "" {
addr, err := c.getMasterAddr(ctx, c.sentinel)
if err != nil {
_ = c.closeSentinel()
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
return "", err
}
// Continue on other errors
internal.Logger.Printf(ctx, "sentinel: GetMasterAddrByName name=%q failed: %s",
c.opt.MasterName, err)
} else {
return addr, nil
}
_ = c.closeSentinel()
}
for i, sentinelAddr := range c.sentinelAddrs {
@ -511,9 +533,12 @@ func (c *sentinelFailover) MasterAddr(ctx context.Context) (string, error) {
masterAddr, err := sentinel.GetMasterAddrByName(ctx, c.opt.MasterName).Result()
if err != nil {
_ = sentinel.Close()
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
return "", err
}
internal.Logger.Printf(ctx, "sentinel: GetMasterAddrByName master=%q failed: %s",
c.opt.MasterName, err)
_ = sentinel.Close()
continue
}
@ -528,14 +553,21 @@ func (c *sentinelFailover) MasterAddr(ctx context.Context) (string, error) {
return "", errors.New("redis: all sentinels specified in configuration are unreachable")
}
func (c *sentinelFailover) slaveAddrs(ctx context.Context, useDisconnected bool) ([]string, error) {
func (c *sentinelFailover) replicaAddrs(ctx context.Context, useDisconnected bool) ([]string, error) {
c.mu.RLock()
sentinel := c.sentinel
c.mu.RUnlock()
if sentinel != nil {
addrs := c.getSlaveAddrs(ctx, sentinel)
if len(addrs) > 0 {
addrs, err := c.getReplicaAddrs(ctx, sentinel)
if err != nil {
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
return nil, err
}
// Continue on other errors
internal.Logger.Printf(ctx, "sentinel: Replicas name=%q failed: %s",
c.opt.MasterName, err)
} else if len(addrs) > 0 {
return addrs, nil
}
}
@ -544,11 +576,21 @@ func (c *sentinelFailover) slaveAddrs(ctx context.Context, useDisconnected bool)
defer c.mu.Unlock()
if c.sentinel != nil {
addrs := c.getSlaveAddrs(ctx, c.sentinel)
if len(addrs) > 0 {
addrs, err := c.getReplicaAddrs(ctx, c.sentinel)
if err != nil {
_ = c.closeSentinel()
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
return nil, err
}
// Continue on other errors
internal.Logger.Printf(ctx, "sentinel: Replicas name=%q failed: %s",
c.opt.MasterName, err)
} else if len(addrs) > 0 {
return addrs, nil
} else {
// No error and no replicas.
_ = c.closeSentinel()
}
_ = c.closeSentinel()
}
var sentinelReachable bool
@ -556,15 +598,18 @@ func (c *sentinelFailover) slaveAddrs(ctx context.Context, useDisconnected bool)
for i, sentinelAddr := range c.sentinelAddrs {
sentinel := NewSentinelClient(c.opt.sentinelOptions(sentinelAddr))
slaves, err := sentinel.Slaves(ctx, c.opt.MasterName).Result()
replicas, err := sentinel.Replicas(ctx, c.opt.MasterName).Result()
if err != nil {
internal.Logger.Printf(ctx, "sentinel: Slaves master=%q failed: %s",
c.opt.MasterName, err)
_ = sentinel.Close()
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
return nil, err
}
internal.Logger.Printf(ctx, "sentinel: Replicas master=%q failed: %s",
c.opt.MasterName, err)
continue
}
sentinelReachable = true
addrs := parseSlaveAddrs(slaves, useDisconnected)
addrs := parseReplicaAddrs(replicas, useDisconnected)
if len(addrs) == 0 {
continue
}
@ -581,60 +626,42 @@ func (c *sentinelFailover) slaveAddrs(ctx context.Context, useDisconnected bool)
return []string{}, errors.New("redis: all sentinels specified in configuration are unreachable")
}
func (c *sentinelFailover) getMasterAddr(ctx context.Context, sentinel *SentinelClient) string {
func (c *sentinelFailover) getMasterAddr(ctx context.Context, sentinel *SentinelClient) (string, error) {
addr, err := sentinel.GetMasterAddrByName(ctx, c.opt.MasterName).Result()
if err != nil {
internal.Logger.Printf(ctx, "sentinel: GetMasterAddrByName name=%q failed: %s",
c.opt.MasterName, err)
return ""
return "", err
}
return net.JoinHostPort(addr[0], addr[1])
return net.JoinHostPort(addr[0], addr[1]), nil
}
func (c *sentinelFailover) getSlaveAddrs(ctx context.Context, sentinel *SentinelClient) []string {
addrs, err := sentinel.Slaves(ctx, c.opt.MasterName).Result()
func (c *sentinelFailover) getReplicaAddrs(ctx context.Context, sentinel *SentinelClient) ([]string, error) {
addrs, err := sentinel.Replicas(ctx, c.opt.MasterName).Result()
if err != nil {
internal.Logger.Printf(ctx, "sentinel: Slaves name=%q failed: %s",
internal.Logger.Printf(ctx, "sentinel: Replicas name=%q failed: %s",
c.opt.MasterName, err)
return []string{}
return nil, err
}
return parseSlaveAddrs(addrs, false)
return parseReplicaAddrs(addrs, false), nil
}
func parseSlaveAddrs(addrs []interface{}, keepDisconnected bool) []string {
func parseReplicaAddrs(addrs []map[string]string, keepDisconnected bool) []string {
nodes := make([]string, 0, len(addrs))
for _, node := range addrs {
ip := ""
port := ""
flags := []string{}
lastkey := ""
isDown := false
for _, key := range node.([]interface{}) {
switch lastkey {
case "ip":
ip = key.(string)
case "port":
port = key.(string)
case "flags":
flags = strings.Split(key.(string), ",")
}
lastkey = key.(string)
}
for _, flag := range flags {
switch flag {
case "s_down", "o_down":
isDown = true
case "disconnected":
if !keepDisconnected {
if flags, ok := node["flags"]; ok {
for _, flag := range strings.Split(flags, ",") {
switch flag {
case "s_down", "o_down":
isDown = true
case "disconnected":
if !keepDisconnected {
isDown = true
}
}
}
}
if !isDown {
nodes = append(nodes, net.JoinHostPort(ip, port))
if !isDown && node["ip"] != "" && node["port"] != "" {
nodes = append(nodes, net.JoinHostPort(node["ip"], node["port"]))
}
}
@ -672,7 +699,7 @@ func (c *sentinelFailover) setSentinel(ctx context.Context, sentinel *SentinelCl
c.sentinel = sentinel
c.discoverSentinels(ctx)
c.pubsub = sentinel.Subscribe(ctx, "+switch-master", "+slave-reconf-done")
c.pubsub = sentinel.Subscribe(ctx, "+switch-master", "+replica-reconf-done")
go c.listen(c.pubsub)
}
@ -683,16 +710,13 @@ func (c *sentinelFailover) discoverSentinels(ctx context.Context) {
return
}
for _, sentinel := range sentinels {
vals := sentinel.([]interface{})
var ip, port string
for i := 0; i < len(vals); i += 2 {
key := vals[i].(string)
switch key {
case "ip":
ip = vals[i+1].(string)
case "port":
port = vals[i+1].(string)
}
ip, ok := sentinel["ip"]
if !ok {
continue
}
port, ok := sentinel["port"]
if !ok {
continue
}
if ip != "" && port != "" {
sentinelAddr := net.JoinHostPort(ip, port)
@ -742,7 +766,7 @@ func contains(slice []string, str string) bool {
//------------------------------------------------------------------------------
// NewFailoverClusterClient returns a client that supports routing read-only commands
// to a slave node.
// to a replica node.
func NewFailoverClusterClient(failoverOpt *FailoverOptions) *ClusterClient {
sentinelAddrs := make([]string, len(failoverOpt.SentinelAddrs))
copy(sentinelAddrs, failoverOpt.SentinelAddrs)
@ -763,14 +787,14 @@ func NewFailoverClusterClient(failoverOpt *FailoverOptions) *ClusterClient {
Addr: masterAddr,
}}
slaveAddrs, err := failover.slaveAddrs(ctx, false)
replicaAddrs, err := failover.replicaAddrs(ctx, false)
if err != nil {
return nil, err
}
for _, slaveAddr := range slaveAddrs {
for _, replicaAddr := range replicaAddrs {
nodes = append(nodes, ClusterNode{
Addr: slaveAddr,
Addr: replicaAddr,
})
}

View File

@ -3,8 +3,8 @@ package redis
import (
"context"
"github.com/go-redis/redis/v8/internal/pool"
"github.com/go-redis/redis/v8/internal/proto"
"github.com/redis/go-redis/v9/internal/pool"
"github.com/redis/go-redis/v9/internal/proto"
)
// TxFailedErr transaction redis failed.
@ -19,18 +19,16 @@ type Tx struct {
baseClient
cmdable
statefulCmdable
hooks
ctx context.Context
hooksMixin
}
func (c *Client) newTx(ctx context.Context) *Tx {
func (c *Client) newTx() *Tx {
tx := Tx{
baseClient: baseClient{
opt: c.opt,
connPool: pool.NewStickyConnPool(c.connPool),
},
hooks: c.hooks.clone(),
ctx: ctx,
hooksMixin: c.hooksMixin.clone(),
}
tx.init()
return &tx
@ -39,25 +37,19 @@ func (c *Client) newTx(ctx context.Context) *Tx {
func (c *Tx) init() {
c.cmdable = c.Process
c.statefulCmdable = c.Process
}
func (c *Tx) Context() context.Context {
return c.ctx
}
func (c *Tx) WithContext(ctx context.Context) *Tx {
if ctx == nil {
panic("nil context")
}
clone := *c
clone.init()
clone.hooks.lock()
clone.ctx = ctx
return &clone
c.initHooks(hooks{
dial: c.baseClient.dial,
process: c.baseClient.process,
pipeline: c.baseClient.processPipeline,
txPipeline: c.baseClient.processTxPipeline,
})
}
func (c *Tx) Process(ctx context.Context, cmd Cmder) error {
return c.hooks.process(ctx, cmd, c.baseClient.process)
err := c.processHook(ctx, cmd)
cmd.SetErr(err)
return err
}
// Watch prepares a transaction and marks the keys to be watched
@ -65,7 +57,7 @@ func (c *Tx) Process(ctx context.Context, cmd Cmder) error {
//
// The transaction is automatically closed when fn exits.
func (c *Client) Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error {
tx := c.newTx(ctx)
tx := c.newTx()
defer tx.Close(ctx)
if len(keys) > 0 {
if err := tx.Watch(ctx, keys...).Err(); err != nil {
@ -109,9 +101,8 @@ func (c *Tx) Unwatch(ctx context.Context, keys ...string) *StatusCmd {
// Pipeline creates a pipeline. Usually it is more convenient to use Pipelined.
func (c *Tx) Pipeline() Pipeliner {
pipe := Pipeline{
ctx: c.ctx,
exec: func(ctx context.Context, cmds []Cmder) error {
return c.hooks.processPipeline(ctx, cmds, c.baseClient.processPipeline)
return c.processPipelineHook(ctx, cmds)
},
}
pipe.init()
@ -139,11 +130,22 @@ func (c *Tx) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder
// TxPipeline creates a pipeline. Usually it is more convenient to use TxPipelined.
func (c *Tx) TxPipeline() Pipeliner {
pipe := Pipeline{
ctx: c.ctx,
exec: func(ctx context.Context, cmds []Cmder) error {
return c.hooks.processTxPipeline(ctx, cmds, c.baseClient.processTxPipeline)
cmds = wrapMultiExec(ctx, cmds)
return c.processTxPipelineHook(ctx, cmds)
},
}
pipe.init()
return &pipe
}
func wrapMultiExec(ctx context.Context, cmds []Cmder) []Cmder {
if len(cmds) == 0 {
panic("not reached")
}
cmdsCopy := make([]Cmder, len(cmds)+2)
cmdsCopy[0] = NewStatusCmd(ctx, "multi")
copy(cmdsCopy[1:], cmds)
cmdsCopy[len(cmdsCopy)-1] = NewSliceCmd(ctx, "exec")
return cmdsCopy
}

View File

@ -14,6 +14,9 @@ type UniversalOptions struct {
// of cluster/sentinel nodes.
Addrs []string
// ClientName will execute the `CLIENT SETNAME ClientName` command for each conn.
ClientName string
// Database to be selected after connecting to the server.
// Only single-node and failover clients.
DB int
@ -23,6 +26,7 @@ type UniversalOptions struct {
Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
OnConnect func(ctx context.Context, cn *Conn) error
Protocol int
Username string
Password string
SentinelUsername string
@ -32,19 +36,20 @@ type UniversalOptions struct {
MinRetryBackoff time.Duration
MaxRetryBackoff time.Duration
DialTimeout time.Duration
ReadTimeout time.Duration
WriteTimeout time.Duration
DialTimeout time.Duration
ReadTimeout time.Duration
WriteTimeout time.Duration
ContextTimeoutEnabled bool
// PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO).
PoolFIFO bool
PoolSize int
MinIdleConns int
MaxConnAge time.Duration
PoolTimeout time.Duration
IdleTimeout time.Duration
IdleCheckFrequency time.Duration
PoolSize int
PoolTimeout time.Duration
MinIdleConns int
MaxIdleConns int
ConnMaxIdleTime time.Duration
ConnMaxLifetime time.Duration
TLSConfig *tls.Config
@ -68,10 +73,12 @@ func (o *UniversalOptions) Cluster() *ClusterOptions {
}
return &ClusterOptions{
Addrs: o.Addrs,
Dialer: o.Dialer,
OnConnect: o.OnConnect,
Addrs: o.Addrs,
ClientName: o.ClientName,
Dialer: o.Dialer,
OnConnect: o.OnConnect,
Protocol: o.Protocol,
Username: o.Username,
Password: o.Password,
@ -84,16 +91,19 @@ func (o *UniversalOptions) Cluster() *ClusterOptions {
MinRetryBackoff: o.MinRetryBackoff,
MaxRetryBackoff: o.MaxRetryBackoff,
DialTimeout: o.DialTimeout,
ReadTimeout: o.ReadTimeout,
WriteTimeout: o.WriteTimeout,
PoolFIFO: o.PoolFIFO,
PoolSize: o.PoolSize,
MinIdleConns: o.MinIdleConns,
MaxConnAge: o.MaxConnAge,
PoolTimeout: o.PoolTimeout,
IdleTimeout: o.IdleTimeout,
IdleCheckFrequency: o.IdleCheckFrequency,
DialTimeout: o.DialTimeout,
ReadTimeout: o.ReadTimeout,
WriteTimeout: o.WriteTimeout,
ContextTimeoutEnabled: o.ContextTimeoutEnabled,
PoolFIFO: o.PoolFIFO,
PoolSize: o.PoolSize,
PoolTimeout: o.PoolTimeout,
MinIdleConns: o.MinIdleConns,
MaxIdleConns: o.MaxIdleConns,
ConnMaxIdleTime: o.ConnMaxIdleTime,
ConnMaxLifetime: o.ConnMaxLifetime,
TLSConfig: o.TLSConfig,
}
@ -108,11 +118,13 @@ func (o *UniversalOptions) Failover() *FailoverOptions {
return &FailoverOptions{
SentinelAddrs: o.Addrs,
MasterName: o.MasterName,
ClientName: o.ClientName,
Dialer: o.Dialer,
OnConnect: o.OnConnect,
DB: o.DB,
Protocol: o.Protocol,
Username: o.Username,
Password: o.Password,
SentinelUsername: o.SentinelUsername,
@ -122,17 +134,18 @@ func (o *UniversalOptions) Failover() *FailoverOptions {
MinRetryBackoff: o.MinRetryBackoff,
MaxRetryBackoff: o.MaxRetryBackoff,
DialTimeout: o.DialTimeout,
ReadTimeout: o.ReadTimeout,
WriteTimeout: o.WriteTimeout,
DialTimeout: o.DialTimeout,
ReadTimeout: o.ReadTimeout,
WriteTimeout: o.WriteTimeout,
ContextTimeoutEnabled: o.ContextTimeoutEnabled,
PoolFIFO: o.PoolFIFO,
PoolSize: o.PoolSize,
MinIdleConns: o.MinIdleConns,
MaxConnAge: o.MaxConnAge,
PoolTimeout: o.PoolTimeout,
IdleTimeout: o.IdleTimeout,
IdleCheckFrequency: o.IdleCheckFrequency,
PoolFIFO: o.PoolFIFO,
PoolSize: o.PoolSize,
PoolTimeout: o.PoolTimeout,
MinIdleConns: o.MinIdleConns,
MaxIdleConns: o.MaxIdleConns,
ConnMaxIdleTime: o.ConnMaxIdleTime,
ConnMaxLifetime: o.ConnMaxLifetime,
TLSConfig: o.TLSConfig,
}
@ -146,11 +159,13 @@ func (o *UniversalOptions) Simple() *Options {
}
return &Options{
Addr: addr,
Dialer: o.Dialer,
OnConnect: o.OnConnect,
Addr: addr,
ClientName: o.ClientName,
Dialer: o.Dialer,
OnConnect: o.OnConnect,
DB: o.DB,
Protocol: o.Protocol,
Username: o.Username,
Password: o.Password,
@ -158,17 +173,18 @@ func (o *UniversalOptions) Simple() *Options {
MinRetryBackoff: o.MinRetryBackoff,
MaxRetryBackoff: o.MaxRetryBackoff,
DialTimeout: o.DialTimeout,
ReadTimeout: o.ReadTimeout,
WriteTimeout: o.WriteTimeout,
DialTimeout: o.DialTimeout,
ReadTimeout: o.ReadTimeout,
WriteTimeout: o.WriteTimeout,
ContextTimeoutEnabled: o.ContextTimeoutEnabled,
PoolFIFO: o.PoolFIFO,
PoolSize: o.PoolSize,
MinIdleConns: o.MinIdleConns,
MaxConnAge: o.MaxConnAge,
PoolTimeout: o.PoolTimeout,
IdleTimeout: o.IdleTimeout,
IdleCheckFrequency: o.IdleCheckFrequency,
PoolFIFO: o.PoolFIFO,
PoolSize: o.PoolSize,
PoolTimeout: o.PoolTimeout,
MinIdleConns: o.MinIdleConns,
MaxIdleConns: o.MaxIdleConns,
ConnMaxIdleTime: o.ConnMaxIdleTime,
ConnMaxLifetime: o.ConnMaxLifetime,
TLSConfig: o.TLSConfig,
}
@ -182,13 +198,13 @@ func (o *UniversalOptions) Simple() *Options {
// clients in different environments.
type UniversalClient interface {
Cmdable
Context() context.Context
AddHook(Hook)
Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error
Do(ctx context.Context, args ...interface{}) *Cmd
Process(ctx context.Context, cmd Cmder) error
Subscribe(ctx context.Context, channels ...string) *PubSub
PSubscribe(ctx context.Context, channels ...string) *PubSub
SSubscribe(ctx context.Context, channels ...string) *PubSub
Close() error
PoolStats() *PoolStats
}

View File

@ -2,5 +2,5 @@ package redis
// Version is the current release version.
func Version() string {
return "8.11.5"
return "9.1.0"
}

22
vendor/modules.txt vendored
View File

@ -130,6 +130,8 @@ github.com/eggsampler/acme/v3
# github.com/felixge/httpsnoop v1.0.3
## explicit; go 1.13
github.com/felixge/httpsnoop
# github.com/fsnotify/fsnotify v1.4.9
## explicit; go 1.13
# github.com/go-logr/logr v1.2.4
## explicit; go 1.16
github.com/go-logr/logr
@ -144,16 +146,6 @@ github.com/go-playground/locales/currency
# github.com/go-playground/universal-translator v0.18.1
## explicit; go 1.18
github.com/go-playground/universal-translator
# github.com/go-redis/redis/v8 v8.11.5
## explicit; go 1.17
github.com/go-redis/redis/v8
github.com/go-redis/redis/v8/internal
github.com/go-redis/redis/v8/internal/hashtag
github.com/go-redis/redis/v8/internal/hscan
github.com/go-redis/redis/v8/internal/pool
github.com/go-redis/redis/v8/internal/proto
github.com/go-redis/redis/v8/internal/rand
github.com/go-redis/redis/v8/internal/util
# github.com/go-sql-driver/mysql v1.5.0
## explicit; go 1.10
github.com/go-sql-driver/mysql
@ -241,6 +233,16 @@ github.com/prometheus/common/model
github.com/prometheus/procfs
github.com/prometheus/procfs/internal/fs
github.com/prometheus/procfs/internal/util
# github.com/redis/go-redis/v9 v9.1.0
## explicit; go 1.18
github.com/redis/go-redis/v9
github.com/redis/go-redis/v9/internal
github.com/redis/go-redis/v9/internal/hashtag
github.com/redis/go-redis/v9/internal/hscan
github.com/redis/go-redis/v9/internal/pool
github.com/redis/go-redis/v9/internal/proto
github.com/redis/go-redis/v9/internal/rand
github.com/redis/go-redis/v9/internal/util
# github.com/rogpeppe/go-internal v1.9.0
## explicit; go 1.17
# github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399