Add rocsp-tool to manually store OCSP responses in Redis (#5758)
This is a sort of proof of concept of the Redis interaction, which will evolve into a tool for inspection and manual repair of missing entries, if we find ourselves needing to do that. The important bits here are rocsp/rocsp.go and cmd/rocsp-tool/main.go. Also, the newly-vendored Redis client.
This commit is contained in:
parent
9d07942c9d
commit
7fab32a000
|
|
@ -29,6 +29,7 @@ import (
|
|||
_ "github.com/letsencrypt/boulder/cmd/ocsp-updater"
|
||||
_ "github.com/letsencrypt/boulder/cmd/orphan-finder"
|
||||
_ "github.com/letsencrypt/boulder/cmd/reversed-hostname-checker"
|
||||
_ "github.com/letsencrypt/boulder/cmd/rocsp-tool"
|
||||
|
||||
"github.com/letsencrypt/boulder/cmd"
|
||||
)
|
||||
|
|
|
|||
|
|
@ -0,0 +1,192 @@
|
|||
package notmain
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/asn1"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/jmhodges/clock"
|
||||
"github.com/letsencrypt/boulder/cmd"
|
||||
"github.com/letsencrypt/boulder/core"
|
||||
"github.com/letsencrypt/boulder/issuance"
|
||||
"github.com/letsencrypt/boulder/rocsp"
|
||||
rocsp_config "github.com/letsencrypt/boulder/rocsp/config"
|
||||
"github.com/letsencrypt/boulder/test/ocsp/helper"
|
||||
"golang.org/x/crypto/ocsp"
|
||||
)
|
||||
|
||||
type config struct {
|
||||
ROCSPTool struct {
|
||||
Redis rocsp_config.RedisConfig
|
||||
// Issuers is a map from filenames to short issuer IDs.
|
||||
// Each filename must contain an issuer certificate. The short issuer
|
||||
// IDs are arbitrarily assigned and must be consistent across OCSP
|
||||
// components. For production we'll use the number part of the CN, i.e.
|
||||
// E1 -> 1, R3 -> 3, etc.
|
||||
Issuers map[string]int
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
cmd.RegisterCommand("rocsp-tool", main)
|
||||
}
|
||||
|
||||
func main() {
|
||||
if err := main2(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
type ShortIDIssuer struct {
|
||||
*issuance.Certificate
|
||||
subject pkix.RDNSequence
|
||||
shortID byte
|
||||
}
|
||||
|
||||
func loadIssuers(input map[string]int) ([]ShortIDIssuer, error) {
|
||||
var issuers []ShortIDIssuer
|
||||
for issuerFile, shortID := range input {
|
||||
if shortID > 255 || shortID < 0 {
|
||||
return nil, fmt.Errorf("invalid shortID %d (must be byte)", shortID)
|
||||
}
|
||||
cert, err := issuance.LoadCertificate(issuerFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading issuer: %w", err)
|
||||
}
|
||||
var subject pkix.RDNSequence
|
||||
_, err = asn1.Unmarshal(cert.Certificate.RawSubject, &subject)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parsing issuer.RawSubject: %w", err)
|
||||
}
|
||||
var shortID byte = byte(shortID)
|
||||
for _, issuer := range issuers {
|
||||
if issuer.shortID == shortID {
|
||||
return nil, fmt.Errorf("duplicate shortID in config file: %d (for %q and %q)", shortID, issuer.subject, subject)
|
||||
}
|
||||
if !issuer.IsCA {
|
||||
return nil, fmt.Errorf("certificate for %q is not a CA certificate", subject)
|
||||
}
|
||||
}
|
||||
issuers = append(issuers, ShortIDIssuer{cert, subject, shortID})
|
||||
}
|
||||
return issuers, nil
|
||||
}
|
||||
|
||||
func findIssuer(resp *ocsp.Response, issuers []ShortIDIssuer) (*ShortIDIssuer, error) {
|
||||
var responder pkix.RDNSequence
|
||||
_, err := asn1.Unmarshal(resp.RawResponderName, &responder)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parsing resp.RawResponderName: %w", err)
|
||||
}
|
||||
var responders strings.Builder
|
||||
for _, issuer := range issuers {
|
||||
fmt.Fprintf(&responders, "%s\n", issuer.subject)
|
||||
if bytes.Equal(issuer.RawSubject, resp.RawResponderName) {
|
||||
return &issuer, nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("no issuer found matching OCSP response for %s. Available issuers:\n%s\n", responder, responders.String())
|
||||
}
|
||||
|
||||
func main2() error {
|
||||
configFile := flag.String("config", "", "File path to the configuration file for this service")
|
||||
flag.Parse()
|
||||
if *configFile == "" {
|
||||
flag.Usage()
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
var c config
|
||||
err := cmd.ReadConfigFile(*configFile, &c)
|
||||
if err != nil {
|
||||
return fmt.Errorf("reading JSON config file: %w", err)
|
||||
}
|
||||
|
||||
issuers, err := loadIssuers(c.ROCSPTool.Issuers)
|
||||
if err != nil {
|
||||
return fmt.Errorf("loading issuers: %w", err)
|
||||
}
|
||||
if len(issuers) == 0 {
|
||||
return fmt.Errorf("'issuers' section of config JSON is required.")
|
||||
}
|
||||
clk := cmd.Clock()
|
||||
client, err := rocsp_config.MakeClient(&c.ROCSPTool.Redis, clk)
|
||||
if err != nil {
|
||||
return fmt.Errorf("making client: %w", err)
|
||||
}
|
||||
|
||||
for _, respFile := range flag.Args() {
|
||||
err := storeResponse(respFile, issuers, client, clk)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func storeResponse(respFile string, issuers []ShortIDIssuer, client *rocsp.WritingClient, clk clock.Clock) error {
|
||||
ctx := context.Background()
|
||||
respBytes, err := ioutil.ReadFile(respFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("reading response file %q: %w", respFile, err)
|
||||
}
|
||||
resp, err := ocsp.ParseResponse(respBytes, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing response: %w", err)
|
||||
}
|
||||
issuer, err := findIssuer(resp, issuers)
|
||||
if err != nil {
|
||||
return fmt.Errorf("finding issuer for response: %w", err)
|
||||
}
|
||||
|
||||
// Re-parse the response, this time verifying with the appropriate issuer
|
||||
resp, err = ocsp.ParseResponse(respBytes, issuer.Certificate.Certificate)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing response: %w", err)
|
||||
}
|
||||
|
||||
serial := core.SerialToString(resp.SerialNumber)
|
||||
|
||||
if resp.NextUpdate.Before(clk.Now()) {
|
||||
return fmt.Errorf("response for %s expired %s ago", serial,
|
||||
clk.Now().Sub(resp.NextUpdate))
|
||||
}
|
||||
|
||||
// Note: Here we set the TTL to slightly more than the lifetime of the
|
||||
// OCSP response. In ocsp-updater we'll want to set it to the lifetime
|
||||
// of the certificate, so that the metadata field doesn't fall out of
|
||||
// storage even if we are down for days. However, in this tool we don't
|
||||
// have the full certificate, so this will do.
|
||||
ttl := resp.NextUpdate.Sub(clk.Now()) + time.Hour
|
||||
|
||||
log.Printf("storing response for %s, generated %s, ttl %g hours",
|
||||
serial,
|
||||
resp.ThisUpdate,
|
||||
ttl.Hours(),
|
||||
)
|
||||
|
||||
err = client.StoreResponse(ctx, respBytes, issuer.shortID, ttl)
|
||||
if err != nil {
|
||||
return fmt.Errorf("storing response: %w", err)
|
||||
}
|
||||
|
||||
retrievedResponse, err := client.GetResponse(ctx, serial)
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting response: %w", err)
|
||||
}
|
||||
|
||||
parsedRetrievedResponse, err := ocsp.ParseResponse(retrievedResponse, issuer.Certificate.Certificate)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing retrieved response: %w", err)
|
||||
}
|
||||
log.Printf("retrieved %s", helper.PrettyResponse(parsedRetrievedResponse))
|
||||
return nil
|
||||
}
|
||||
|
|
@ -16,6 +16,8 @@ services:
|
|||
ipv4_address: 10.77.77.77
|
||||
rednet:
|
||||
ipv4_address: 10.88.88.88
|
||||
redisnet:
|
||||
ipv4_address: 10.33.33.33
|
||||
# Use sd-test-srv as a backup to Docker's embedded DNS server
|
||||
# (https://docs.docker.com/config/containers/container-networking/#dns-services).
|
||||
# If there's a name Docker's DNS server doesn't know about, it will
|
||||
|
|
@ -33,6 +35,8 @@ services:
|
|||
- 8055:8055 # dns-test-srv updates
|
||||
depends_on:
|
||||
- bmysql
|
||||
- bredis
|
||||
- bredis_clusterer
|
||||
entrypoint: test/entrypoint.sh
|
||||
working_dir: &boulder_working_dir /go/src/github.com/letsencrypt/boulder
|
||||
|
||||
|
|
@ -64,6 +68,20 @@ services:
|
|||
networks:
|
||||
redisnet:
|
||||
|
||||
bredis_clusterer:
|
||||
image: redis:latest
|
||||
volumes:
|
||||
- ./test/:/test/:cached
|
||||
- ./cluster/:/cluster/:cached
|
||||
command: /test/wait-for-it.sh 10.33.33.2 4218 /test/redis-create.sh
|
||||
depends_on:
|
||||
- bredis
|
||||
networks:
|
||||
redisnet:
|
||||
ipv4_address: 10.33.33.10
|
||||
aliases:
|
||||
- boulder-redis-clusterer
|
||||
|
||||
netaccess:
|
||||
image: *boulder_image
|
||||
environment:
|
||||
|
|
@ -76,8 +94,6 @@ services:
|
|||
- .:/go/src/github.com/letsencrypt/boulder
|
||||
working_dir: *boulder_working_dir
|
||||
entrypoint: test/entrypoint-netaccess.sh
|
||||
depends_on:
|
||||
- bmysql
|
||||
|
||||
networks:
|
||||
bluenet:
|
||||
|
|
|
|||
2
go.mod
2
go.mod
|
|
@ -6,6 +6,8 @@ require (
|
|||
github.com/beeker1121/goque v1.0.3-0.20191103205551-d618510128af
|
||||
github.com/eggsampler/acme/v3 v3.0.0
|
||||
github.com/go-gorp/gorp/v3 v3.0.2
|
||||
github.com/go-redis/redis v6.15.9+incompatible // indirect
|
||||
github.com/go-redis/redis/v8 v8.11.4
|
||||
github.com/go-sql-driver/mysql v1.5.0
|
||||
github.com/google/certificate-transparency-go v1.0.22-0.20181127102053-c25855a82c75
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
|
||||
|
|
|
|||
54
go.sum
54
go.sum
|
|
@ -21,6 +21,8 @@ github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
|||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
|
||||
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
|
||||
|
|
@ -37,6 +39,8 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
|
|||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
|
||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
github.com/eggsampler/acme/v3 v3.0.0 h1:Fl1fWD94NcdC7Ensb6Ed/CJZ6S24PpekLo/jZB6Ltg8=
|
||||
github.com/eggsampler/acme/v3 v3.0.0/go.mod h1:gw64Ckma6iKulWks9BtE/g/9z/Vdz9D1lM7x7M1X1Ag=
|
||||
|
|
@ -62,6 +66,7 @@ github.com/felixge/httpsnoop v1.0.1 h1:lvB5Jl89CsZtGIWuTcDM1E/vkVs49/Ml7JJe07l8S
|
|||
github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
|
||||
github.com/gin-gonic/gin v1.7.1/go.mod h1:jD2toBW3GZUr5UMcdrwQA10I7RuaFOl/SGeDjXkfUtY=
|
||||
|
|
@ -75,11 +80,16 @@ github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvSc
|
|||
github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8=
|
||||
github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA=
|
||||
github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4=
|
||||
github.com/go-redis/redis v6.15.9+incompatible h1:K0pv1D7EQUjfyoMql+r/jZqCLizCGKFlFgcHWWmHQjg=
|
||||
github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
|
||||
github.com/go-redis/redis/v8 v8.11.4 h1:kHoYkfZP6+pe04aFTnhDH6GDROa5yJdHJVNxV3F46Tg=
|
||||
github.com/go-redis/redis/v8 v8.11.4/go.mod h1:2Z2wHZXdQpCDXEGzqMockDpNyYvi2l4Pxt6RJr792+w=
|
||||
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs=
|
||||
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
|
||||
github.com/gobuffalo/attrs v0.1.0/go.mod h1:fmNpaWyHM0tRm8gCZWKx8yY9fvaNLo2PyzBNSrBZ5Hw=
|
||||
github.com/gobuffalo/envy v1.8.1/go.mod h1:FurDp9+EDPE4aIUS3ZLyD+7/9fpx7YRt/ukY6jIHf0w=
|
||||
github.com/gobuffalo/envy v1.9.0/go.mod h1:FurDp9+EDPE4aIUS3ZLyD+7/9fpx7YRt/ukY6jIHf0w=
|
||||
|
|
@ -122,8 +132,9 @@ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvq
|
|||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||
github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.5.0 h1:LUVKkCeviFUMKqHa4tXIIij/lbhnMbP7Fn5wKdKkRh4=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
|
||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w=
|
||||
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
|
|
@ -134,8 +145,8 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw
|
|||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
|
|
@ -210,7 +221,6 @@ github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfn
|
|||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/labstack/echo/v4 v4.3.0/go.mod h1:PvmtTvhVqKDzDQy4d3bWzPjZLzom4iQbAZy2sgZ/qI8=
|
||||
github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k=
|
||||
|
|
@ -222,7 +232,6 @@ github.com/letsencrypt/pkcs11key/v4 v4.0.0/go.mod h1:EFUvBDay26dErnNb70Nd0/VW3tJ
|
|||
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.3.0 h1:/qkRGz8zljWiDcFvgpwUpwIAPu3r07TDvs3Rws+o/pU=
|
||||
github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/luna-duclos/instrumentedsql v1.1.3/go.mod h1:9J1njvFds+zN7y85EDhN9XNQLANWwZt2ULeIC8yMNYs=
|
||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
|
|
@ -242,7 +251,6 @@ github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Ky
|
|||
github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
||||
github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
||||
github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
|
||||
github.com/mattn/go-sqlite3 v2.0.3+incompatible h1:gXHsfypPkaMZrKbD5209QV9jbUTJKjyR5WD3HYQSd+U=
|
||||
github.com/mattn/go-sqlite3 v2.0.3+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
|
|
@ -261,22 +269,24 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN
|
|||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/mreiferson/go-httpclient v0.0.0-20160630210159-31f0106b4474/go.mod h1:OQA4XLvDbMgS8P0CevmM4m9Q3Jq4phKUzcocxuGJ5m8=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
|
||||
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
|
||||
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
|
||||
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs=
|
||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU=
|
||||
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/onsi/gomega v1.16.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
|
||||
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/poy/onpar v0.0.0-20190519213022-ee068f8ea4d1 h1:oL4IBbcqwhhNWh31bjOX8C/OCy0zs9906d/VUru+bqg=
|
||||
github.com/poy/onpar v0.0.0-20190519213022-ee068f8ea4d1/go.mod h1:nSbFQvMj97ZyhFRSJYtut+msi4sOY6zJDGCdSc+/rZU=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
|
||||
|
|
@ -334,7 +344,6 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV
|
|||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE=
|
||||
github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ=
|
||||
|
|
@ -358,8 +367,8 @@ github.com/weppos/publicsuffix-go v0.15.1-0.20211029155132-7594db4f858a h1:9vq9x
|
|||
github.com/weppos/publicsuffix-go v0.15.1-0.20211029155132-7594db4f858a/go.mod h1:HYux0V0Zi04bHNwOHy4cXJVz/TQjYonnF6aoYhj+3QE=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q=
|
||||
github.com/ziutek/mymysql v1.5.4 h1:GB0qdRGsTwQSBVYuVShFBKaXSnSnYYC2d9knnE1LHFs=
|
||||
github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0=
|
||||
github.com/zmap/rc2 v0.0.0-20131011165748-24b9757f5521/go.mod h1:3YZ9o3WnatTIZhuOtot4IcUfzoKVjUHqu6WALIyI0nE=
|
||||
github.com/zmap/zcertificate v0.0.0-20180516150559-0e3d58b1bac4/go.mod h1:5iU54tB79AMBcySS0R2XIyZBAVmeHranShAFELYx7is=
|
||||
|
|
@ -372,9 +381,7 @@ go.opentelemetry.io/contrib/propagators v0.19.0 h1:HrixVNZYFjUl/Db+Tr3DhqzLsVW9G
|
|||
go.opentelemetry.io/contrib/propagators v0.19.0/go.mod h1:4QOdZClXISU5S43xZxk5tYaWcpb+lehqfKtE6PK6msE=
|
||||
go.opentelemetry.io/otel v0.19.0 h1:Lenfy7QHRXPZVsw/12CWpxX6d/JkrX8wrx2vO8G80Ng=
|
||||
go.opentelemetry.io/otel v0.19.0/go.mod h1:j9bF567N9EfomkSidSfmMwIwIBuP37AMAIzVW85OxSg=
|
||||
go.opentelemetry.io/otel/metric v0.19.0 h1:dtZ1Ju44gkJkYvo+3qGqVXmf88tc+a42edOywypengg=
|
||||
go.opentelemetry.io/otel/metric v0.19.0/go.mod h1:8f9fglJPRnXuskQmKpnad31lcLJ2VmNNqIsx/uIwBSc=
|
||||
go.opentelemetry.io/otel/oteltest v0.19.0 h1:YVfA0ByROYqTwOxqHVZYZExzEpfZor+MU1rU+ip2v9Q=
|
||||
go.opentelemetry.io/otel/oteltest v0.19.0/go.mod h1:tI4yxwh8U21v7JD6R3BcA/2+RBoTKFexE/PJ/nSO7IA=
|
||||
go.opentelemetry.io/otel/trace v0.19.0 h1:1ucYlenXIDA1OlHVLDZKX0ObXV5RLaq06DtUKz5e5zc=
|
||||
go.opentelemetry.io/otel/trace v0.19.0/go.mod h1:4IXiNextNOpPnRlI4ryK69mn5iC84bjBWZQA5DXz/qg=
|
||||
|
|
@ -403,6 +410,7 @@ golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvx
|
|||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
|
|
@ -422,10 +430,12 @@ golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLL
|
|||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4 h1:4nGaVu0QrbjT/AK2PRLuQfQuh6DJve+pELhqTdAj3x0=
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
|
||||
golang.org/x/net v0.0.0-20211029224645-99673261e6eb h1:pirldcYWx7rx7kE5r+9WsOXPXK0+WH5+uZ7uPmJ44uM=
|
||||
golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
|
|
@ -433,8 +443,8 @@ golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJ
|
|||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
|
|
@ -448,8 +458,11 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
|
|
@ -457,9 +470,9 @@ golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210228012217-479acdf4ea46/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57 h1:F5Gozwx4I1xtr/sr/8CFbb57iKi3297KFs0QDbGN60A=
|
||||
golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da h1:b3NXsE2LusjYGGjL5bxEVZZORm/YEFFrWFjR8eFrw/c=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
|
|
@ -485,12 +498,13 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn
|
|||
golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200117220505-0cba7a3a9ee9/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200308013534-11ec41452d41/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
|
||||
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM=
|
||||
|
|
@ -514,9 +528,9 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi
|
|||
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c=
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ=
|
||||
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
|
|
@ -525,7 +539,6 @@ gopkg.in/alexcesaro/statsd.v2 v2.0.0/go.mod h1:i0ubccKGzBVNBpdGV5MocxyA/XlLUJzA7
|
|||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU=
|
||||
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
|
||||
|
|
@ -545,7 +558,6 @@ gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
|||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
|
|
|
|||
|
|
@ -0,0 +1,46 @@
|
|||
package rocsp_config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/go-redis/redis/v8"
|
||||
"github.com/jmhodges/clock"
|
||||
"github.com/letsencrypt/boulder/cmd"
|
||||
"github.com/letsencrypt/boulder/rocsp"
|
||||
)
|
||||
|
||||
// RedisConfig contains the configuration needed to act as a Redis client.
|
||||
type RedisConfig struct {
|
||||
// PasswordFile is a file containing the password for the Redis user.
|
||||
cmd.PasswordConfig
|
||||
// TLS contains the configuration to speak TLS with Redis.
|
||||
TLS cmd.TLSConfig
|
||||
// Username is a Redis username.
|
||||
Username string
|
||||
// Addrs is a list of IP address:port pairs.
|
||||
Addrs []string
|
||||
// Timeout is a per-request timeout applied to all Redis requests.
|
||||
Timeout cmd.ConfigDuration
|
||||
}
|
||||
|
||||
func MakeClient(c *RedisConfig, clk clock.Clock) (*rocsp.WritingClient, error) {
|
||||
password, err := c.PasswordConfig.Pass()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("loading password: %w", err)
|
||||
}
|
||||
|
||||
tlsConfig, err := c.TLS.Load()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("loading TLS config: %w", err)
|
||||
}
|
||||
|
||||
timeout := c.Timeout.Duration
|
||||
|
||||
rdb := redis.NewClusterClient(&redis.ClusterOptions{
|
||||
Addrs: c.Addrs,
|
||||
Username: c.Username,
|
||||
Password: password,
|
||||
TLSConfig: tlsConfig,
|
||||
})
|
||||
return rocsp.NewWritingClient(rdb, timeout, clk), nil
|
||||
}
|
||||
|
|
@ -0,0 +1,175 @@
|
|||
package rocsp
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/go-redis/redis/v8"
|
||||
"github.com/jmhodges/clock"
|
||||
"github.com/letsencrypt/boulder/core"
|
||||
"golang.org/x/crypto/ocsp"
|
||||
)
|
||||
|
||||
// Metadata represents information stored with the 'm' prefix in the Redis DB:
|
||||
// information required to maintain or serve the response, but not the response
|
||||
// itself.
|
||||
type Metadata struct {
|
||||
ShortIssuerID byte
|
||||
// ThisUpdate contains the ThisUpdate time of the stored OCSP response.
|
||||
ThisUpdate time.Time
|
||||
}
|
||||
|
||||
// String implements pretty-printing of Metadata
|
||||
func (m Metadata) String() string {
|
||||
return fmt.Sprintf("shortIssuerID: 0x%x, updated at: %s", m.ShortIssuerID, m.ThisUpdate)
|
||||
}
|
||||
|
||||
// Marshal turns a metadata into a slice of 9 bytes for writing into Redis.
|
||||
// Storing these always as 9 bytes gives us some potential to change the
|
||||
// storage format non-disruptively in the future, so long as we can distinguish
|
||||
// on the length of the stored value.
|
||||
func (m Metadata) Marshal() []byte {
|
||||
var output [9]byte
|
||||
output[0] = m.ShortIssuerID
|
||||
var epochSeconds uint64 = uint64(m.ThisUpdate.Unix())
|
||||
binary.LittleEndian.PutUint64(output[1:], epochSeconds)
|
||||
return output[:]
|
||||
}
|
||||
|
||||
// UnmarshalMetadata takes data from Redis and turns it into a Metadata object.
|
||||
func UnmarshalMetadata(input []byte) (Metadata, error) {
|
||||
if len(input) != 9 {
|
||||
return Metadata{}, fmt.Errorf("invalid metadata length %d", len(input))
|
||||
}
|
||||
var output Metadata
|
||||
output.ShortIssuerID = input[0]
|
||||
epochSeconds := binary.LittleEndian.Uint64(input[1:])
|
||||
output.ThisUpdate = time.Unix(int64(epochSeconds), 0).UTC()
|
||||
return output, nil
|
||||
}
|
||||
|
||||
// MakeResponseKey generates a Redis key string under which a response with the
|
||||
// given serial should be stored.
|
||||
func MakeResponseKey(serial string) string {
|
||||
return fmt.Sprintf("r{%s}", serial)
|
||||
}
|
||||
|
||||
// MakeMetadataKey generates a Redis key string under which metadata for the
|
||||
// response with the given serial should be stored.
|
||||
func MakeMetadataKey(serial string) string {
|
||||
return fmt.Sprintf("m{%s}", serial)
|
||||
}
|
||||
|
||||
// Client represents a read-only Redis client.
|
||||
type Client struct {
|
||||
rdb *redis.ClusterClient
|
||||
timeout time.Duration
|
||||
clk clock.Clock
|
||||
}
|
||||
|
||||
// NewClient creates a Client.
|
||||
func NewClient(rdb *redis.ClusterClient, timeout time.Duration, clk clock.Clock) *Client {
|
||||
return &Client{
|
||||
rdb: rdb,
|
||||
timeout: timeout,
|
||||
clk: clk,
|
||||
}
|
||||
}
|
||||
|
||||
// WritingClient represents a Redis client that can both read and write.
|
||||
type WritingClient struct {
|
||||
Client
|
||||
}
|
||||
|
||||
// NewWritingClient creates a WritingClient.
|
||||
func NewWritingClient(rdb *redis.ClusterClient, timeout time.Duration, clk clock.Clock) *WritingClient {
|
||||
return &WritingClient{
|
||||
Client{
|
||||
rdb: rdb,
|
||||
timeout: timeout,
|
||||
clk: clk,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// StoreResponse parses the given bytes as an OCSP response, and stores it into
|
||||
// Redis, updating both the metadata and response keys. ShortIssuerID is an
|
||||
// arbitrarily assigned byte that unique identifies each issuer. Must be the
|
||||
// same across OCSP components. Returns error if the OCSP response fails to
|
||||
// parse.
|
||||
func (c *WritingClient) StoreResponse(ctx context.Context, respBytes []byte, shortIssuerID byte, ttl time.Duration) error {
|
||||
ctx, cancel := context.WithTimeout(ctx, c.timeout)
|
||||
defer cancel()
|
||||
|
||||
resp, err := ocsp.ParseResponse(respBytes, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing %d-byte response: %w", len(respBytes), err)
|
||||
}
|
||||
|
||||
serial := core.SerialToString(resp.SerialNumber)
|
||||
|
||||
responseKey := MakeResponseKey(serial)
|
||||
metadataKey := MakeMetadataKey(serial)
|
||||
|
||||
metadataStruct := Metadata{
|
||||
ThisUpdate: resp.ThisUpdate,
|
||||
ShortIssuerID: shortIssuerID,
|
||||
}
|
||||
metadataValue := metadataStruct.Marshal()
|
||||
|
||||
err = c.rdb.Watch(ctx, func(tx *redis.Tx) error {
|
||||
err = tx.Set(ctx, responseKey, respBytes, ttl).Err()
|
||||
if err != nil {
|
||||
return fmt.Errorf("setting response: %w", err)
|
||||
}
|
||||
|
||||
err = tx.Set(ctx, metadataKey, metadataValue, ttl).Err()
|
||||
if err != nil {
|
||||
return fmt.Errorf("setting metadata: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}, metadataKey, responseKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("transaction failed: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetResponse fetches a response for the given serial number.
|
||||
// Returns error if the OCSP response fails to parse.
|
||||
// Does not check the metadata field.
|
||||
func (c *Client) GetResponse(ctx context.Context, serial string) ([]byte, error) {
|
||||
ctx, cancel := context.WithTimeout(ctx, c.timeout)
|
||||
defer cancel()
|
||||
|
||||
responseKey := MakeResponseKey(serial)
|
||||
|
||||
val, err := c.rdb.Get(ctx, responseKey).Result()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getting response: %w", err)
|
||||
}
|
||||
|
||||
return []byte(val), nil
|
||||
}
|
||||
|
||||
// GetMetadata fetches the metadata for the given serial number.
|
||||
func (c *Client) GetMetadata(ctx context.Context, serial string) (*Metadata, error) {
|
||||
ctx, cancel := context.WithTimeout(ctx, c.timeout)
|
||||
defer cancel()
|
||||
|
||||
metadataKey := MakeMetadataKey(serial)
|
||||
|
||||
val, err := c.rdb.Get(ctx, metadataKey).Result()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getting metadata: %w", err)
|
||||
}
|
||||
metadata, err := UnmarshalMetadata([]byte(val))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unmarshaling metadata: %w", err)
|
||||
}
|
||||
return &metadata, nil
|
||||
}
|
||||
|
|
@ -0,0 +1,75 @@
|
|||
package rocsp
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/go-redis/redis/v8"
|
||||
"github.com/jmhodges/clock"
|
||||
"github.com/letsencrypt/boulder/cmd"
|
||||
)
|
||||
|
||||
func makeClient() (*WritingClient, clock.Clock) {
|
||||
CACertFile := "../test/redis-tls/minica.pem"
|
||||
CertFile := "../test/redis-tls/boulder/cert.pem"
|
||||
KeyFile := "../test/redis-tls/boulder/key.pem"
|
||||
tlsConfig := cmd.TLSConfig{
|
||||
CACertFile: &CACertFile,
|
||||
CertFile: &CertFile,
|
||||
KeyFile: &KeyFile,
|
||||
}
|
||||
tlsConfig2, err := tlsConfig.Load()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
rdb := redis.NewClusterClient(&redis.ClusterOptions{
|
||||
Addrs: []string{"10.33.33.2:4218"},
|
||||
Username: "unittest-rw",
|
||||
Password: "824968fa490f4ecec1e52d5e34916bdb60d45f8d",
|
||||
TLSConfig: tlsConfig2,
|
||||
})
|
||||
clk := clock.NewFake()
|
||||
return NewWritingClient(rdb, 5*time.Second, clk), clk
|
||||
}
|
||||
|
||||
func TestSetAndGet(t *testing.T) {
|
||||
client, _ := makeClient()
|
||||
|
||||
response, err := ioutil.ReadFile("testdata/ocsp.response")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
var shortIssuerID byte = 99
|
||||
err = client.StoreResponse(context.Background(), response, byte(shortIssuerID), time.Hour)
|
||||
if err != nil {
|
||||
t.Fatalf("storing response: %s", err)
|
||||
}
|
||||
|
||||
serial := "ffaa13f9c34be80b8e2532b83afe063b59a6"
|
||||
resp2, err := client.GetResponse(context.Background(), serial)
|
||||
if err != nil {
|
||||
t.Fatalf("getting response: %s", err)
|
||||
}
|
||||
if !bytes.Equal(resp2, response) {
|
||||
t.Errorf("response written and response retrieved were not equal")
|
||||
}
|
||||
|
||||
metadata, err := client.GetMetadata(context.Background(), serial)
|
||||
if err != nil {
|
||||
t.Fatalf("getting metadata: %s", err)
|
||||
}
|
||||
if metadata.ShortIssuerID != shortIssuerID {
|
||||
t.Errorf("expected shortIssuerID %d, got %d", shortIssuerID, metadata.ShortIssuerID)
|
||||
}
|
||||
expectedTime, err := time.Parse(time.RFC3339, "2021-10-25T20:00:00Z")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to parse time: %s", err)
|
||||
}
|
||||
if metadata.ThisUpdate != expectedTime {
|
||||
t.Errorf("expected ThisUpdate %q, got %q", expectedTime, metadata.ThisUpdate)
|
||||
}
|
||||
}
|
||||
Binary file not shown.
|
|
@ -0,0 +1,23 @@
|
|||
{
|
||||
"rocspTool": {
|
||||
"redis": {
|
||||
"username": "ocsp-updater",
|
||||
"passwordFile": "test/secrets/rocsp_tool_password",
|
||||
"addrs": [
|
||||
"10.33.33.7:4218"
|
||||
],
|
||||
"timeout": "5s",
|
||||
"tls": {
|
||||
"caCertFile": "test/redis-tls/minica.pem",
|
||||
"certFile": "test/redis-tls/boulder/cert.pem",
|
||||
"keyFile": "test/redis-tls/boulder/key.pem"
|
||||
}
|
||||
},
|
||||
"issuers": {
|
||||
".hierarchy/intermediate-cert-ecdsa-a.pem": 1,
|
||||
".hierarchy/intermediate-cert-ecdsa-b.pem": 2,
|
||||
".hierarchy/intermediate-cert-rsa-a.pem": 3,
|
||||
".hierarchy/intermediate-cert-rsa-b.pem": 4
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -10,4 +10,4 @@ if [[ "$@" = "" ]]; then
|
|||
echo "Not needed as part of 'docker-compose up'. Exiting normally."
|
||||
exit 0
|
||||
fi
|
||||
$(dirname "${BASH_SOURCE[0]}")/entrypoint.sh "$@"
|
||||
"$@"
|
||||
|
|
|
|||
|
|
@ -30,8 +30,9 @@ wait_tcp_port() {
|
|||
exec 6>&-
|
||||
echo "Connected to $host:$port"
|
||||
}
|
||||
# make sure we can reach the mysqldb
|
||||
# make sure we can reach the mysqldb and Redis cluster is done being created.
|
||||
wait_tcp_port boulder-mysql 3306
|
||||
wait_tcp_port 10.33.33.10 4218
|
||||
|
||||
# create the database
|
||||
MYSQL_CONTAINER=1 $DIR/create_db.sh
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ package helper
|
|||
import (
|
||||
"bytes"
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/asn1"
|
||||
"encoding/base64"
|
||||
"encoding/pem"
|
||||
|
|
@ -358,23 +359,33 @@ func PrettyResponse(resp *ocsp.Response) string {
|
|||
|
||||
pr("\n")
|
||||
pr("Response:\n")
|
||||
pr(" CertStatus %d\n", resp.Status)
|
||||
pr(" SerialNumber %036x\n", resp.SerialNumber)
|
||||
pr(" CertStatus %d\n", resp.Status)
|
||||
pr(" RevocationReason %d\n", resp.RevocationReason)
|
||||
pr(" RevokedAt %s\n", resp.RevokedAt)
|
||||
pr(" ProducedAt %s\n", resp.ProducedAt)
|
||||
pr(" ThisUpdate %s\n", resp.ThisUpdate)
|
||||
pr(" NextUpdate %s\n", resp.NextUpdate)
|
||||
pr(" RevokedAt %s\n", resp.RevokedAt)
|
||||
pr(" RevocationReason %d\n", resp.RevocationReason)
|
||||
pr(" SignatureAlgorithm %s\n", resp.SignatureAlgorithm)
|
||||
pr(" Extensions %#v\n", resp.Extensions)
|
||||
if resp.Certificate == nil {
|
||||
pr(" Certificate: nil\n")
|
||||
} else {
|
||||
pr(" IssuerHash %s\n", resp.IssuerHash)
|
||||
if resp.Extensions != nil {
|
||||
pr(" Extensions %#v\n", resp.Extensions)
|
||||
}
|
||||
if resp.Certificate != nil {
|
||||
pr(" Certificate:\n")
|
||||
pr(" Subject: %s\n", resp.Certificate.Subject)
|
||||
pr(" Issuer: %s\n", resp.Certificate.Issuer)
|
||||
pr(" NotBefore: %s\n", resp.Certificate.NotBefore)
|
||||
pr(" NotAfter: %s\n", resp.Certificate.NotAfter)
|
||||
}
|
||||
|
||||
var responder pkix.RDNSequence
|
||||
_, err := asn1.Unmarshal(resp.RawResponderName, &responder)
|
||||
if err != nil {
|
||||
pr(" Responder: error (%s)\n", err)
|
||||
} else {
|
||||
pr(" Responder: %s\n", responder)
|
||||
}
|
||||
|
||||
return builder.String()
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,14 +1,30 @@
|
|||
#!/bin/bash
|
||||
|
||||
redis-cli \
|
||||
--cluster-yes \
|
||||
--cluster create \
|
||||
10.33.33.2:4218 10.33.33.3:4218 10.33.33.4:4218 \
|
||||
10.33.33.5:4218 10.33.33.6:4218 10.33.33.7:4218 \
|
||||
--cluster-replicas 1 \
|
||||
--tls \
|
||||
--cert /test/redis-tls/redis/cert.pem \
|
||||
--key /test/redis-tls/redis/key.pem \
|
||||
--cacert /test/redis-tls/minica.pem \
|
||||
--user replication-user \
|
||||
--pass 435e9c4225f08813ef3af7c725f0d30d263b9cd3
|
||||
set -feuo pipefail
|
||||
|
||||
ARGS="--tls \
|
||||
--cert /test/redis-tls/redis/cert.pem \
|
||||
--key /test/redis-tls/redis/key.pem \
|
||||
--cacert /test/redis-tls/minica.pem \
|
||||
--user replication-user \
|
||||
--pass 435e9c4225f08813ef3af7c725f0d30d263b9cd3"
|
||||
|
||||
if ! redis-cli \
|
||||
--cluster check \
|
||||
10.33.33.2:4218 \
|
||||
$ARGS ; then
|
||||
echo "Cluster needs creation!"
|
||||
redis-cli \
|
||||
--cluster-yes \
|
||||
--cluster create \
|
||||
10.33.33.2:4218 10.33.33.3:4218 10.33.33.4:4218 \
|
||||
10.33.33.5:4218 10.33.33.6:4218 10.33.33.7:4218 \
|
||||
--cluster-replicas 1 \
|
||||
$ARGS
|
||||
fi
|
||||
|
||||
# Hack: run redis-server so we have something listening on a port.
|
||||
# The Boulder container will wait for this port on this container to be
|
||||
# available before starting up.
|
||||
echo "Starting a server so everything knows we're done."
|
||||
redis-server /test/redis.config
|
||||
|
|
|
|||
|
|
@ -24,6 +24,7 @@ user ocsp-updater on +@all ~* >e4e9ce7845cb6adbbc44fb1d9deb05e6b4dc1386
|
|||
user ocsp-responder on +@all ~* >0e5a4c8b5faaf3194c8ad83c3dd9a0dd8a75982b
|
||||
user boulder-ra on +@all ~* >b3b2fcbbf46fe39fd522c395a51f84d93a98ff2f
|
||||
user replication-user on +@all ~* >435e9c4225f08813ef3af7c725f0d30d263b9cd3
|
||||
user unittest-rw on +@all ~* >824968fa490f4ecec1e52d5e34916bdb60d45f8d
|
||||
masteruser replication-user
|
||||
masterauth 435e9c4225f08813ef3af7c725f0d30d263b9cd3
|
||||
tls-cert-file /test/redis-tls/redis/cert.pem
|
||||
|
|
|
|||
|
|
@ -0,0 +1 @@
|
|||
e4e9ce7845cb6adbbc44fb1d9deb05e6b4dc1386
|
||||
|
|
@ -0,0 +1,28 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -e -u
|
||||
|
||||
wait_tcp_port() {
|
||||
local host="${1}" port="${2}"
|
||||
|
||||
# see http://tldp.org/LDP/abs/html/devref1.html for description of this syntax.
|
||||
local max_tries="40"
|
||||
for n in `seq 1 "${max_tries}"` ; do
|
||||
if exec 6<>/dev/tcp/"${host}"/"${port}"; then
|
||||
break
|
||||
else
|
||||
echo "$(date) - still trying to connect to ${host}:${port}"
|
||||
sleep 1
|
||||
fi
|
||||
if [ "${n}" -eq "${max_tries}" ]; then
|
||||
echo "unable to connect"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
exec 6>&-
|
||||
echo "Connected to ${host}:${port}"
|
||||
}
|
||||
|
||||
wait_tcp_port "${1}" "${2}"
|
||||
shift 2
|
||||
exec "$@"
|
||||
|
|
@ -1,8 +0,0 @@
|
|||
language: go
|
||||
go:
|
||||
- "1.x"
|
||||
- master
|
||||
env:
|
||||
- TAGS=""
|
||||
- TAGS="-tags purego"
|
||||
script: go test $TAGS -v ./...
|
||||
|
|
@ -1,7 +1,7 @@
|
|||
# xxhash
|
||||
|
||||
[](https://godoc.org/github.com/cespare/xxhash)
|
||||
[](https://travis-ci.org/cespare/xxhash)
|
||||
[](https://pkg.go.dev/github.com/cespare/xxhash/v2)
|
||||
[](https://github.com/cespare/xxhash/actions/workflows/test.yml)
|
||||
|
||||
xxhash is a Go implementation of the 64-bit
|
||||
[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
|
||||
|
|
@ -64,4 +64,6 @@ $ go test -benchtime 10s -bench '/xxhash,direct,bytes'
|
|||
|
||||
- [InfluxDB](https://github.com/influxdata/influxdb)
|
||||
- [Prometheus](https://github.com/prometheus/prometheus)
|
||||
- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics)
|
||||
- [FreeCache](https://github.com/coocood/freecache)
|
||||
- [FastCache](https://github.com/VictoriaMetrics/fastcache)
|
||||
|
|
|
|||
|
|
@ -193,7 +193,6 @@ func (d *Digest) UnmarshalBinary(b []byte) error {
|
|||
b, d.v4 = consumeUint64(b)
|
||||
b, d.total = consumeUint64(b)
|
||||
copy(d.mem[:], b)
|
||||
b = b[len(d.mem):]
|
||||
d.n = int(d.total % uint64(len(d.mem)))
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@
|
|||
|
||||
// Register allocation:
|
||||
// AX h
|
||||
// CX pointer to advance through b
|
||||
// SI pointer to advance through b
|
||||
// DX n
|
||||
// BX loop end
|
||||
// R8 v1, k1
|
||||
|
|
@ -16,39 +16,39 @@
|
|||
// R12 tmp
|
||||
// R13 prime1v
|
||||
// R14 prime2v
|
||||
// R15 prime4v
|
||||
// DI prime4v
|
||||
|
||||
// round reads from and advances the buffer pointer in CX.
|
||||
// round reads from and advances the buffer pointer in SI.
|
||||
// It assumes that R13 has prime1v and R14 has prime2v.
|
||||
#define round(r) \
|
||||
MOVQ (CX), R12 \
|
||||
ADDQ $8, CX \
|
||||
MOVQ (SI), R12 \
|
||||
ADDQ $8, SI \
|
||||
IMULQ R14, R12 \
|
||||
ADDQ R12, r \
|
||||
ROLQ $31, r \
|
||||
IMULQ R13, r
|
||||
|
||||
// mergeRound applies a merge round on the two registers acc and val.
|
||||
// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v.
|
||||
// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v.
|
||||
#define mergeRound(acc, val) \
|
||||
IMULQ R14, val \
|
||||
ROLQ $31, val \
|
||||
IMULQ R13, val \
|
||||
XORQ val, acc \
|
||||
IMULQ R13, acc \
|
||||
ADDQ R15, acc
|
||||
ADDQ DI, acc
|
||||
|
||||
// func Sum64(b []byte) uint64
|
||||
TEXT ·Sum64(SB), NOSPLIT, $0-32
|
||||
// Load fixed primes.
|
||||
MOVQ ·prime1v(SB), R13
|
||||
MOVQ ·prime2v(SB), R14
|
||||
MOVQ ·prime4v(SB), R15
|
||||
MOVQ ·prime4v(SB), DI
|
||||
|
||||
// Load slice.
|
||||
MOVQ b_base+0(FP), CX
|
||||
MOVQ b_base+0(FP), SI
|
||||
MOVQ b_len+8(FP), DX
|
||||
LEAQ (CX)(DX*1), BX
|
||||
LEAQ (SI)(DX*1), BX
|
||||
|
||||
// The first loop limit will be len(b)-32.
|
||||
SUBQ $32, BX
|
||||
|
|
@ -65,14 +65,14 @@ TEXT ·Sum64(SB), NOSPLIT, $0-32
|
|||
XORQ R11, R11
|
||||
SUBQ R13, R11
|
||||
|
||||
// Loop until CX > BX.
|
||||
// Loop until SI > BX.
|
||||
blockLoop:
|
||||
round(R8)
|
||||
round(R9)
|
||||
round(R10)
|
||||
round(R11)
|
||||
|
||||
CMPQ CX, BX
|
||||
CMPQ SI, BX
|
||||
JLE blockLoop
|
||||
|
||||
MOVQ R8, AX
|
||||
|
|
@ -100,16 +100,16 @@ noBlocks:
|
|||
afterBlocks:
|
||||
ADDQ DX, AX
|
||||
|
||||
// Right now BX has len(b)-32, and we want to loop until CX > len(b)-8.
|
||||
// Right now BX has len(b)-32, and we want to loop until SI > len(b)-8.
|
||||
ADDQ $24, BX
|
||||
|
||||
CMPQ CX, BX
|
||||
CMPQ SI, BX
|
||||
JG fourByte
|
||||
|
||||
wordLoop:
|
||||
// Calculate k1.
|
||||
MOVQ (CX), R8
|
||||
ADDQ $8, CX
|
||||
MOVQ (SI), R8
|
||||
ADDQ $8, SI
|
||||
IMULQ R14, R8
|
||||
ROLQ $31, R8
|
||||
IMULQ R13, R8
|
||||
|
|
@ -117,18 +117,18 @@ wordLoop:
|
|||
XORQ R8, AX
|
||||
ROLQ $27, AX
|
||||
IMULQ R13, AX
|
||||
ADDQ R15, AX
|
||||
ADDQ DI, AX
|
||||
|
||||
CMPQ CX, BX
|
||||
CMPQ SI, BX
|
||||
JLE wordLoop
|
||||
|
||||
fourByte:
|
||||
ADDQ $4, BX
|
||||
CMPQ CX, BX
|
||||
CMPQ SI, BX
|
||||
JG singles
|
||||
|
||||
MOVL (CX), R8
|
||||
ADDQ $4, CX
|
||||
MOVL (SI), R8
|
||||
ADDQ $4, SI
|
||||
IMULQ R13, R8
|
||||
XORQ R8, AX
|
||||
|
||||
|
|
@ -138,19 +138,19 @@ fourByte:
|
|||
|
||||
singles:
|
||||
ADDQ $4, BX
|
||||
CMPQ CX, BX
|
||||
CMPQ SI, BX
|
||||
JGE finalize
|
||||
|
||||
singlesLoop:
|
||||
MOVBQZX (CX), R12
|
||||
ADDQ $1, CX
|
||||
MOVBQZX (SI), R12
|
||||
ADDQ $1, SI
|
||||
IMULQ ·prime5v(SB), R12
|
||||
XORQ R12, AX
|
||||
|
||||
ROLQ $11, AX
|
||||
IMULQ R13, AX
|
||||
|
||||
CMPQ CX, BX
|
||||
CMPQ SI, BX
|
||||
JL singlesLoop
|
||||
|
||||
finalize:
|
||||
|
|
@ -179,9 +179,9 @@ TEXT ·writeBlocks(SB), NOSPLIT, $0-40
|
|||
MOVQ ·prime2v(SB), R14
|
||||
|
||||
// Load slice.
|
||||
MOVQ b_base+8(FP), CX
|
||||
MOVQ b_base+8(FP), SI
|
||||
MOVQ b_len+16(FP), DX
|
||||
LEAQ (CX)(DX*1), BX
|
||||
LEAQ (SI)(DX*1), BX
|
||||
SUBQ $32, BX
|
||||
|
||||
// Load vN from d.
|
||||
|
|
@ -199,7 +199,7 @@ blockLoop:
|
|||
round(R10)
|
||||
round(R11)
|
||||
|
||||
CMPQ CX, BX
|
||||
CMPQ SI, BX
|
||||
JLE blockLoop
|
||||
|
||||
// Copy vN back to d.
|
||||
|
|
@ -208,8 +208,8 @@ blockLoop:
|
|||
MOVQ R10, 16(AX)
|
||||
MOVQ R11, 24(AX)
|
||||
|
||||
// The number of bytes written is CX minus the old base pointer.
|
||||
SUBQ b_base+8(FP), CX
|
||||
MOVQ CX, ret+32(FP)
|
||||
// The number of bytes written is SI minus the old base pointer.
|
||||
SUBQ b_base+8(FP), SI
|
||||
MOVQ SI, ret+32(FP)
|
||||
|
||||
RET
|
||||
|
|
|
|||
|
|
@ -6,41 +6,52 @@
|
|||
package xxhash
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Notes:
|
||||
//
|
||||
// See https://groups.google.com/d/msg/golang-nuts/dcjzJy-bSpw/tcZYBzQqAQAJ
|
||||
// for some discussion about these unsafe conversions.
|
||||
//
|
||||
// In the future it's possible that compiler optimizations will make these
|
||||
// unsafe operations unnecessary: https://golang.org/issue/2205.
|
||||
// XxxString functions unnecessary by realizing that calls such as
|
||||
// Sum64([]byte(s)) don't need to copy s. See https://golang.org/issue/2205.
|
||||
// If that happens, even if we keep these functions they can be replaced with
|
||||
// the trivial safe code.
|
||||
|
||||
// NOTE: The usual way of doing an unsafe string-to-[]byte conversion is:
|
||||
//
|
||||
// Both of these wrapper functions still incur function call overhead since they
|
||||
// will not be inlined. We could write Go/asm copies of Sum64 and Digest.Write
|
||||
// for strings to squeeze out a bit more speed. Mid-stack inlining should
|
||||
// eventually fix this.
|
||||
// var b []byte
|
||||
// bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
|
||||
// bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
|
||||
// bh.Len = len(s)
|
||||
// bh.Cap = len(s)
|
||||
//
|
||||
// Unfortunately, as of Go 1.15.3 the inliner's cost model assigns a high enough
|
||||
// weight to this sequence of expressions that any function that uses it will
|
||||
// not be inlined. Instead, the functions below use a different unsafe
|
||||
// conversion designed to minimize the inliner weight and allow both to be
|
||||
// inlined. There is also a test (TestInlining) which verifies that these are
|
||||
// inlined.
|
||||
//
|
||||
// See https://github.com/golang/go/issues/42739 for discussion.
|
||||
|
||||
// Sum64String computes the 64-bit xxHash digest of s.
|
||||
// It may be faster than Sum64([]byte(s)) by avoiding a copy.
|
||||
func Sum64String(s string) uint64 {
|
||||
var b []byte
|
||||
bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
|
||||
bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
|
||||
bh.Len = len(s)
|
||||
bh.Cap = len(s)
|
||||
b := *(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)}))
|
||||
return Sum64(b)
|
||||
}
|
||||
|
||||
// WriteString adds more data to d. It always returns len(s), nil.
|
||||
// It may be faster than Write([]byte(s)) by avoiding a copy.
|
||||
func (d *Digest) WriteString(s string) (n int, err error) {
|
||||
var b []byte
|
||||
bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
|
||||
bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
|
||||
bh.Len = len(s)
|
||||
bh.Cap = len(s)
|
||||
return d.Write(b)
|
||||
d.Write(*(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)})))
|
||||
// d.Write always returns len(s), nil.
|
||||
// Ignoring the return output and returning these fixed values buys a
|
||||
// savings of 6 in the inliner's cost model.
|
||||
return len(s), nil
|
||||
}
|
||||
|
||||
// sliceHeader is similar to reflect.SliceHeader, but it assumes that the layout
|
||||
// of the first two words is the same as the layout of a string.
|
||||
type sliceHeader struct {
|
||||
s string
|
||||
cap int
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,21 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2017-2020 Damian Gryski <damian@gryski.com>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
|
|
@ -0,0 +1,79 @@
|
|||
package rendezvous
|
||||
|
||||
type Rendezvous struct {
|
||||
nodes map[string]int
|
||||
nstr []string
|
||||
nhash []uint64
|
||||
hash Hasher
|
||||
}
|
||||
|
||||
type Hasher func(s string) uint64
|
||||
|
||||
func New(nodes []string, hash Hasher) *Rendezvous {
|
||||
r := &Rendezvous{
|
||||
nodes: make(map[string]int, len(nodes)),
|
||||
nstr: make([]string, len(nodes)),
|
||||
nhash: make([]uint64, len(nodes)),
|
||||
hash: hash,
|
||||
}
|
||||
|
||||
for i, n := range nodes {
|
||||
r.nodes[n] = i
|
||||
r.nstr[i] = n
|
||||
r.nhash[i] = hash(n)
|
||||
}
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
func (r *Rendezvous) Lookup(k string) string {
|
||||
// short-circuit if we're empty
|
||||
if len(r.nodes) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
khash := r.hash(k)
|
||||
|
||||
var midx int
|
||||
var mhash = xorshiftMult64(khash ^ r.nhash[0])
|
||||
|
||||
for i, nhash := range r.nhash[1:] {
|
||||
if h := xorshiftMult64(khash ^ nhash); h > mhash {
|
||||
midx = i + 1
|
||||
mhash = h
|
||||
}
|
||||
}
|
||||
|
||||
return r.nstr[midx]
|
||||
}
|
||||
|
||||
func (r *Rendezvous) Add(node string) {
|
||||
r.nodes[node] = len(r.nstr)
|
||||
r.nstr = append(r.nstr, node)
|
||||
r.nhash = append(r.nhash, r.hash(node))
|
||||
}
|
||||
|
||||
func (r *Rendezvous) Remove(node string) {
|
||||
// find index of node to remove
|
||||
nidx := r.nodes[node]
|
||||
|
||||
// remove from the slices
|
||||
l := len(r.nstr)
|
||||
r.nstr[nidx] = r.nstr[l]
|
||||
r.nstr = r.nstr[:l]
|
||||
|
||||
r.nhash[nidx] = r.nhash[l]
|
||||
r.nhash = r.nhash[:l]
|
||||
|
||||
// update the map
|
||||
delete(r.nodes, node)
|
||||
moved := r.nstr[nidx]
|
||||
r.nodes[moved] = nidx
|
||||
}
|
||||
|
||||
func xorshiftMult64(x uint64) uint64 {
|
||||
x ^= x >> 12 // a
|
||||
x ^= x << 25 // b
|
||||
x ^= x >> 27 // c
|
||||
return x * 2685821657736338717
|
||||
}
|
||||
|
|
@ -0,0 +1,3 @@
|
|||
*.rdb
|
||||
testdata/*/
|
||||
.idea/
|
||||
|
|
@ -0,0 +1,27 @@
|
|||
run:
|
||||
concurrency: 8
|
||||
deadline: 5m
|
||||
tests: false
|
||||
linters:
|
||||
enable-all: true
|
||||
disable:
|
||||
- funlen
|
||||
- gochecknoglobals
|
||||
- gochecknoinits
|
||||
- gocognit
|
||||
- goconst
|
||||
- godox
|
||||
- gosec
|
||||
- maligned
|
||||
- wsl
|
||||
- gomnd
|
||||
- goerr113
|
||||
- exhaustive
|
||||
- nestif
|
||||
- nlreturn
|
||||
- exhaustivestruct
|
||||
- wrapcheck
|
||||
- errorlint
|
||||
- cyclop
|
||||
- forcetypeassert
|
||||
- forbidigo
|
||||
|
|
@ -0,0 +1,4 @@
|
|||
semi: false
|
||||
singleQuote: true
|
||||
proseWrap: always
|
||||
printWidth: 100
|
||||
|
|
@ -0,0 +1,149 @@
|
|||
## [8.11.4](https://github.com/go-redis/redis/compare/v8.11.3...v8.11.4) (2021-10-04)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* add acl auth support for sentinels ([f66582f](https://github.com/go-redis/redis/commit/f66582f44f3dc3a4705a5260f982043fde4aa634))
|
||||
* add Cmd.{String,Int,Float,Bool}Slice helpers and an example ([5d3d293](https://github.com/go-redis/redis/commit/5d3d293cc9c60b90871e2420602001463708ce24))
|
||||
* add SetVal method for each command ([168981d](https://github.com/go-redis/redis/commit/168981da2d84ee9e07d15d3e74d738c162e264c4))
|
||||
|
||||
|
||||
|
||||
## v8.11
|
||||
|
||||
- Remove OpenTelemetry metrics.
|
||||
- Supports more redis commands and options.
|
||||
|
||||
## v8.10
|
||||
|
||||
- Removed extra OpenTelemetry spans from go-redis core. Now go-redis instrumentation only adds a
|
||||
single span with a Redis command (instead of 4 spans). There are multiple reasons behind this
|
||||
decision:
|
||||
|
||||
- Traces become smaller and less noisy.
|
||||
- It may be costly to process those 3 extra spans for each query.
|
||||
- go-redis no longer depends on OpenTelemetry.
|
||||
|
||||
Eventually we hope to replace the information that we no longer collect with OpenTelemetry
|
||||
Metrics.
|
||||
|
||||
## v8.9
|
||||
|
||||
- Changed `PubSub.Channel` to only rely on `Ping` result. You can now use `WithChannelSize`,
|
||||
`WithChannelHealthCheckInterval`, and `WithChannelSendTimeout` to override default settings.
|
||||
|
||||
## v8.8
|
||||
|
||||
- To make updating easier, extra modules now have the same version as go-redis does. That means that
|
||||
you need to update your imports:
|
||||
|
||||
```
|
||||
github.com/go-redis/redis/extra/redisotel -> github.com/go-redis/redis/extra/redisotel/v8
|
||||
github.com/go-redis/redis/extra/rediscensus -> github.com/go-redis/redis/extra/rediscensus/v8
|
||||
```
|
||||
|
||||
## v8.5
|
||||
|
||||
- [knadh](https://github.com/knadh) contributed long-awaited ability to scan Redis Hash into a
|
||||
struct:
|
||||
|
||||
```go
|
||||
err := rdb.HGetAll(ctx, "hash").Scan(&data)
|
||||
|
||||
err := rdb.MGet(ctx, "key1", "key2").Scan(&data)
|
||||
```
|
||||
|
||||
- Please check [redismock](https://github.com/go-redis/redismock) by
|
||||
[monkey92t](https://github.com/monkey92t) if you are looking for mocking Redis Client.
|
||||
|
||||
## v8
|
||||
|
||||
- All commands require `context.Context` as a first argument, e.g. `rdb.Ping(ctx)`. If you are not
|
||||
using `context.Context` yet, the simplest option is to define global package variable
|
||||
`var ctx = context.TODO()` and use it when `ctx` is required.
|
||||
|
||||
- Full support for `context.Context` canceling.
|
||||
|
||||
- Added `redis.NewFailoverClusterClient` that supports routing read-only commands to a slave node.
|
||||
|
||||
- Added `redisext.OpenTemetryHook` that adds
|
||||
[Redis OpenTelemetry instrumentation](https://redis.uptrace.dev/tracing/).
|
||||
|
||||
- Redis slow log support.
|
||||
|
||||
- Ring uses Rendezvous Hashing by default which provides better distribution. You need to move
|
||||
existing keys to a new location or keys will be inaccessible / lost. To use old hashing scheme:
|
||||
|
||||
```go
|
||||
import "github.com/golang/groupcache/consistenthash"
|
||||
|
||||
ring := redis.NewRing(&redis.RingOptions{
|
||||
NewConsistentHash: func() {
|
||||
return consistenthash.New(100, crc32.ChecksumIEEE)
|
||||
},
|
||||
})
|
||||
```
|
||||
|
||||
- `ClusterOptions.MaxRedirects` default value is changed from 8 to 3.
|
||||
- `Options.MaxRetries` default value is changed from 0 to 3.
|
||||
|
||||
- `Cluster.ForEachNode` is renamed to `ForEachShard` for consistency with `Ring`.
|
||||
|
||||
## v7.3
|
||||
|
||||
- New option `Options.Username` which causes client to use `AuthACL`. Be aware if your connection
|
||||
URL contains username.
|
||||
|
||||
## v7.2
|
||||
|
||||
- Existing `HMSet` is renamed to `HSet` and old deprecated `HMSet` is restored for Redis 3 users.
|
||||
|
||||
## v7.1
|
||||
|
||||
- Existing `Cmd.String` is renamed to `Cmd.Text`. New `Cmd.String` implements `fmt.Stringer`
|
||||
interface.
|
||||
|
||||
## v7
|
||||
|
||||
- _Important_. Tx.Pipeline now returns a non-transactional pipeline. Use Tx.TxPipeline for a
|
||||
transactional pipeline.
|
||||
- WrapProcess is replaced with more convenient AddHook that has access to context.Context.
|
||||
- WithContext now can not be used to create a shallow copy of the client.
|
||||
- New methods ProcessContext, DoContext, and ExecContext.
|
||||
- Client respects Context.Deadline when setting net.Conn deadline.
|
||||
- Client listens on Context.Done while waiting for a connection from the pool and returns an error
|
||||
when context context is cancelled.
|
||||
- Add PubSub.ChannelWithSubscriptions that sends `*Subscription` in addition to `*Message` to allow
|
||||
detecting reconnections.
|
||||
- `time.Time` is now marshalled in RFC3339 format. `rdb.Get("foo").Time()` helper is added to parse
|
||||
the time.
|
||||
- `SetLimiter` is removed and added `Options.Limiter` instead.
|
||||
- `HMSet` is deprecated as of Redis v4.
|
||||
|
||||
## v6.15
|
||||
|
||||
- Cluster and Ring pipelines process commands for each node in its own goroutine.
|
||||
|
||||
## 6.14
|
||||
|
||||
- Added Options.MinIdleConns.
|
||||
- Added Options.MaxConnAge.
|
||||
- PoolStats.FreeConns is renamed to PoolStats.IdleConns.
|
||||
- Add Client.Do to simplify creating custom commands.
|
||||
- Add Cmd.String, Cmd.Int, Cmd.Int64, Cmd.Uint64, Cmd.Float64, and Cmd.Bool helpers.
|
||||
- Lower memory usage.
|
||||
|
||||
## v6.13
|
||||
|
||||
- Ring got new options called `HashReplicas` and `Hash`. It is recommended to set
|
||||
`HashReplicas = 1000` for better keys distribution between shards.
|
||||
- Cluster client was optimized to use much less memory when reloading cluster state.
|
||||
- PubSub.ReceiveMessage is re-worked to not use ReceiveTimeout so it does not lose data when timeout
|
||||
occurres. In most cases it is recommended to use PubSub.Channel instead.
|
||||
- Dialer.KeepAlive is set to 5 minutes by default.
|
||||
|
||||
## v6.12
|
||||
|
||||
- ClusterClient got new option called `ClusterSlots` which allows to build cluster of normal Redis
|
||||
Servers that don't have cluster mode enabled. See
|
||||
https://godoc.org/github.com/go-redis/redis#example-NewClusterClient--ManualSetup
|
||||
|
|
@ -0,0 +1,25 @@
|
|||
Copyright (c) 2013 The github.com/go-redis/redis Authors.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
|
@ -0,0 +1,35 @@
|
|||
PACKAGE_DIRS := $(shell find . -mindepth 2 -type f -name 'go.mod' -exec dirname {} \; | sort)
|
||||
|
||||
test: testdeps
|
||||
go test ./...
|
||||
go test ./... -short -race
|
||||
go test ./... -run=NONE -bench=. -benchmem
|
||||
env GOOS=linux GOARCH=386 go test ./...
|
||||
go vet
|
||||
|
||||
testdeps: testdata/redis/src/redis-server
|
||||
|
||||
bench: testdeps
|
||||
go test ./... -test.run=NONE -test.bench=. -test.benchmem
|
||||
|
||||
.PHONY: all test testdeps bench
|
||||
|
||||
testdata/redis:
|
||||
mkdir -p $@
|
||||
wget -qO- https://download.redis.io/releases/redis-6.2.5.tar.gz | tar xvz --strip-components=1 -C $@
|
||||
|
||||
testdata/redis/src/redis-server: testdata/redis
|
||||
cd $< && make all
|
||||
|
||||
fmt:
|
||||
gofmt -w -s ./
|
||||
goimports -w -local github.com/go-redis/redis ./
|
||||
|
||||
go_mod_tidy:
|
||||
go get -u && go mod tidy
|
||||
set -e; for dir in $(PACKAGE_DIRS); do \
|
||||
echo "go mod tidy in $${dir}"; \
|
||||
(cd "$${dir}" && \
|
||||
go get -u && \
|
||||
go mod tidy); \
|
||||
done
|
||||
|
|
@ -0,0 +1,178 @@
|
|||
<p align="center">
|
||||
<a href="https://uptrace.dev/?utm_source=gh-redis&utm_campaign=gh-redis-banner1">
|
||||
<img src="https://raw.githubusercontent.com/uptrace/roadmap/master/banner1.png" alt="All-in-one tool to optimize performance and monitor errors & logs">
|
||||
</a>
|
||||
</p>
|
||||
|
||||
# Redis client for Golang
|
||||
|
||||

|
||||
[](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc)
|
||||
[](https://redis.uptrace.dev/)
|
||||
[](https://discord.gg/rWtp5Aj)
|
||||
|
||||
- To ask questions, join [Discord](https://discord.gg/rWtp5Aj) or use
|
||||
[Discussions](https://github.com/go-redis/redis/discussions).
|
||||
- [Newsletter](https://blog.uptrace.dev/pages/newsletter.html) to get latest updates.
|
||||
- [Documentation](https://redis.uptrace.dev)
|
||||
- [Reference](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc)
|
||||
- [Examples](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#pkg-examples)
|
||||
- [RealWorld example app](https://github.com/uptrace/go-treemux-realworld-example-app)
|
||||
|
||||
Other projects you may like:
|
||||
|
||||
- [Bun](https://bun.uptrace.dev) - fast and simple SQL client for PostgreSQL, MySQL, and SQLite.
|
||||
- [treemux](https://github.com/vmihailenco/treemux) - high-speed, flexible, tree-based HTTP router
|
||||
for Go.
|
||||
|
||||
## Ecosystem
|
||||
|
||||
- [Redis Mock](https://github.com/go-redis/redismock).
|
||||
- [Distributed Locks](https://github.com/bsm/redislock).
|
||||
- [Redis Cache](https://github.com/go-redis/cache).
|
||||
- [Rate limiting](https://github.com/go-redis/redis_rate).
|
||||
|
||||
## Features
|
||||
|
||||
- Redis 3 commands except QUIT, MONITOR, and SYNC.
|
||||
- Automatic connection pooling with
|
||||
[circuit breaker](https://en.wikipedia.org/wiki/Circuit_breaker_design_pattern) support.
|
||||
- [Pub/Sub](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#PubSub).
|
||||
- [Transactions](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client-TxPipeline).
|
||||
- [Pipeline](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client-Pipeline) and
|
||||
[TxPipeline](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client-TxPipeline).
|
||||
- [Scripting](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#Script).
|
||||
- [Timeouts](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#Options).
|
||||
- [Redis Sentinel](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewFailoverClient).
|
||||
- [Redis Cluster](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewClusterClient).
|
||||
- [Cluster of Redis Servers](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-NewClusterClient--ManualSetup)
|
||||
without using cluster mode and Redis Sentinel.
|
||||
- [Ring](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewRing).
|
||||
- [Instrumentation](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#ex-package--Instrumentation).
|
||||
|
||||
## Installation
|
||||
|
||||
go-redis supports 2 last Go versions and requires a Go version with
|
||||
[modules](https://github.com/golang/go/wiki/Modules) support. So make sure to initialize a Go
|
||||
module:
|
||||
|
||||
```shell
|
||||
go mod init github.com/my/repo
|
||||
```
|
||||
|
||||
And then install go-redis/v8 (note _v8_ in the import; omitting it is a popular mistake):
|
||||
|
||||
```shell
|
||||
go get github.com/go-redis/redis/v8
|
||||
```
|
||||
|
||||
## Quickstart
|
||||
|
||||
```go
|
||||
import (
|
||||
"context"
|
||||
"github.com/go-redis/redis/v8"
|
||||
)
|
||||
|
||||
var ctx = context.Background()
|
||||
|
||||
func ExampleClient() {
|
||||
rdb := redis.NewClient(&redis.Options{
|
||||
Addr: "localhost:6379",
|
||||
Password: "", // no password set
|
||||
DB: 0, // use default DB
|
||||
})
|
||||
|
||||
err := rdb.Set(ctx, "key", "value", 0).Err()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
val, err := rdb.Get(ctx, "key").Result()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
fmt.Println("key", val)
|
||||
|
||||
val2, err := rdb.Get(ctx, "key2").Result()
|
||||
if err == redis.Nil {
|
||||
fmt.Println("key2 does not exist")
|
||||
} else if err != nil {
|
||||
panic(err)
|
||||
} else {
|
||||
fmt.Println("key2", val2)
|
||||
}
|
||||
// Output: key value
|
||||
// key2 does not exist
|
||||
}
|
||||
```
|
||||
|
||||
## Look and feel
|
||||
|
||||
Some corner cases:
|
||||
|
||||
```go
|
||||
// SET key value EX 10 NX
|
||||
set, err := rdb.SetNX(ctx, "key", "value", 10*time.Second).Result()
|
||||
|
||||
// SET key value keepttl NX
|
||||
set, err := rdb.SetNX(ctx, "key", "value", redis.KeepTTL).Result()
|
||||
|
||||
// SORT list LIMIT 0 2 ASC
|
||||
vals, err := rdb.Sort(ctx, "list", &redis.Sort{Offset: 0, Count: 2, Order: "ASC"}).Result()
|
||||
|
||||
// ZRANGEBYSCORE zset -inf +inf WITHSCORES LIMIT 0 2
|
||||
vals, err := rdb.ZRangeByScoreWithScores(ctx, "zset", &redis.ZRangeBy{
|
||||
Min: "-inf",
|
||||
Max: "+inf",
|
||||
Offset: 0,
|
||||
Count: 2,
|
||||
}).Result()
|
||||
|
||||
// ZINTERSTORE out 2 zset1 zset2 WEIGHTS 2 3 AGGREGATE SUM
|
||||
vals, err := rdb.ZInterStore(ctx, "out", &redis.ZStore{
|
||||
Keys: []string{"zset1", "zset2"},
|
||||
Weights: []int64{2, 3}
|
||||
}).Result()
|
||||
|
||||
// EVAL "return {KEYS[1],ARGV[1]}" 1 "key" "hello"
|
||||
vals, err := rdb.Eval(ctx, "return {KEYS[1],ARGV[1]}", []string{"key"}, "hello").Result()
|
||||
|
||||
// custom command
|
||||
res, err := rdb.Do(ctx, "set", "key", "value").Result()
|
||||
```
|
||||
|
||||
## Run the test
|
||||
|
||||
go-redis will start a redis-server and run the test cases.
|
||||
|
||||
The paths of redis-server bin file and redis config file are defined in `main_test.go`:
|
||||
|
||||
```
|
||||
var (
|
||||
redisServerBin, _ = filepath.Abs(filepath.Join("testdata", "redis", "src", "redis-server"))
|
||||
redisServerConf, _ = filepath.Abs(filepath.Join("testdata", "redis", "redis.conf"))
|
||||
)
|
||||
```
|
||||
|
||||
For local testing, you can change the variables to refer to your local files, or create a soft link
|
||||
to the corresponding folder for redis-server and copy the config file to `testdata/redis/`:
|
||||
|
||||
```
|
||||
ln -s /usr/bin/redis-server ./go-redis/testdata/redis/src
|
||||
cp ./go-redis/testdata/redis.conf ./go-redis/testdata/redis/
|
||||
```
|
||||
|
||||
Lastly, run:
|
||||
|
||||
```
|
||||
go test
|
||||
```
|
||||
|
||||
## Contributors
|
||||
|
||||
Thanks to all the people who already contributed!
|
||||
|
||||
<a href="https://github.com/go-redis/redis/graphs/contributors">
|
||||
<img src="https://contributors-img.web.app/image?repo=go-redis/redis" />
|
||||
</a>
|
||||
|
|
@ -0,0 +1,15 @@
|
|||
# Releasing
|
||||
|
||||
1. Run `release.sh` script which updates versions in go.mod files and pushes a new branch to GitHub:
|
||||
|
||||
```shell
|
||||
TAG=v1.0.0 ./scripts/release.sh
|
||||
```
|
||||
|
||||
2. Open a pull request and wait for the build to finish.
|
||||
|
||||
3. Merge the pull request and run `tag.sh` to create tags for packages:
|
||||
|
||||
```shell
|
||||
TAG=v1.0.0 ./scripts/tag.sh
|
||||
```
|
||||
File diff suppressed because it is too large
Load Diff
|
|
@ -0,0 +1,109 @@
|
|||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
func (c *ClusterClient) DBSize(ctx context.Context) *IntCmd {
|
||||
cmd := NewIntCmd(ctx, "dbsize")
|
||||
_ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error {
|
||||
var size int64
|
||||
err := c.ForEachMaster(ctx, func(ctx context.Context, master *Client) error {
|
||||
n, err := master.DBSize(ctx).Result()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
atomic.AddInt64(&size, n)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
cmd.SetErr(err)
|
||||
} else {
|
||||
cmd.val = size
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return cmd
|
||||
}
|
||||
|
||||
func (c *ClusterClient) ScriptLoad(ctx context.Context, script string) *StringCmd {
|
||||
cmd := NewStringCmd(ctx, "script", "load", script)
|
||||
_ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error {
|
||||
mu := &sync.Mutex{}
|
||||
err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error {
|
||||
val, err := shard.ScriptLoad(ctx, script).Result()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
mu.Lock()
|
||||
if cmd.Val() == "" {
|
||||
cmd.val = val
|
||||
}
|
||||
mu.Unlock()
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
cmd.SetErr(err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return cmd
|
||||
}
|
||||
|
||||
func (c *ClusterClient) ScriptFlush(ctx context.Context) *StatusCmd {
|
||||
cmd := NewStatusCmd(ctx, "script", "flush")
|
||||
_ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error {
|
||||
err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error {
|
||||
return shard.ScriptFlush(ctx).Err()
|
||||
})
|
||||
if err != nil {
|
||||
cmd.SetErr(err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return cmd
|
||||
}
|
||||
|
||||
func (c *ClusterClient) ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd {
|
||||
args := make([]interface{}, 2+len(hashes))
|
||||
args[0] = "script"
|
||||
args[1] = "exists"
|
||||
for i, hash := range hashes {
|
||||
args[2+i] = hash
|
||||
}
|
||||
cmd := NewBoolSliceCmd(ctx, args...)
|
||||
|
||||
result := make([]bool, len(hashes))
|
||||
for i := range result {
|
||||
result[i] = true
|
||||
}
|
||||
|
||||
_ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error {
|
||||
mu := &sync.Mutex{}
|
||||
err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error {
|
||||
val, err := shard.ScriptExists(ctx, hashes...).Result()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
mu.Lock()
|
||||
for i, v := range val {
|
||||
result[i] = result[i] && v
|
||||
}
|
||||
mu.Unlock()
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
cmd.SetErr(err)
|
||||
} else {
|
||||
cmd.val = result
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return cmd
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
|
@ -0,0 +1,4 @@
|
|||
/*
|
||||
Package redis implements a Redis client.
|
||||
*/
|
||||
package redis
|
||||
|
|
@ -0,0 +1,144 @@
|
|||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net"
|
||||
"strings"
|
||||
|
||||
"github.com/go-redis/redis/v8/internal/pool"
|
||||
"github.com/go-redis/redis/v8/internal/proto"
|
||||
)
|
||||
|
||||
// ErrClosed performs any operation on the closed client will return this error.
|
||||
var ErrClosed = pool.ErrClosed
|
||||
|
||||
type Error interface {
|
||||
error
|
||||
|
||||
// RedisError is a no-op function but
|
||||
// serves to distinguish types that are Redis
|
||||
// errors from ordinary errors: a type is a
|
||||
// Redis error if it has a RedisError method.
|
||||
RedisError()
|
||||
}
|
||||
|
||||
var _ Error = proto.RedisError("")
|
||||
|
||||
func shouldRetry(err error, retryTimeout bool) bool {
|
||||
switch err {
|
||||
case io.EOF, io.ErrUnexpectedEOF:
|
||||
return true
|
||||
case nil, context.Canceled, context.DeadlineExceeded:
|
||||
return false
|
||||
}
|
||||
|
||||
if v, ok := err.(timeoutError); ok {
|
||||
if v.Timeout() {
|
||||
return retryTimeout
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
s := err.Error()
|
||||
if s == "ERR max number of clients reached" {
|
||||
return true
|
||||
}
|
||||
if strings.HasPrefix(s, "LOADING ") {
|
||||
return true
|
||||
}
|
||||
if strings.HasPrefix(s, "READONLY ") {
|
||||
return true
|
||||
}
|
||||
if strings.HasPrefix(s, "CLUSTERDOWN ") {
|
||||
return true
|
||||
}
|
||||
if strings.HasPrefix(s, "TRYAGAIN ") {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func isRedisError(err error) bool {
|
||||
_, ok := err.(proto.RedisError)
|
||||
return ok
|
||||
}
|
||||
|
||||
func isBadConn(err error, allowTimeout bool, addr string) bool {
|
||||
switch err {
|
||||
case nil:
|
||||
return false
|
||||
case context.Canceled, context.DeadlineExceeded:
|
||||
return true
|
||||
}
|
||||
|
||||
if isRedisError(err) {
|
||||
switch {
|
||||
case isReadOnlyError(err):
|
||||
// Close connections in read only state in case domain addr is used
|
||||
// and domain resolves to a different Redis Server. See #790.
|
||||
return true
|
||||
case isMovedSameConnAddr(err, addr):
|
||||
// Close connections when we are asked to move to the same addr
|
||||
// of the connection. Force a DNS resolution when all connections
|
||||
// of the pool are recycled
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if allowTimeout {
|
||||
if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
|
||||
return !netErr.Temporary()
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func isMovedError(err error) (moved bool, ask bool, addr string) {
|
||||
if !isRedisError(err) {
|
||||
return
|
||||
}
|
||||
|
||||
s := err.Error()
|
||||
switch {
|
||||
case strings.HasPrefix(s, "MOVED "):
|
||||
moved = true
|
||||
case strings.HasPrefix(s, "ASK "):
|
||||
ask = true
|
||||
default:
|
||||
return
|
||||
}
|
||||
|
||||
ind := strings.LastIndex(s, " ")
|
||||
if ind == -1 {
|
||||
return false, false, ""
|
||||
}
|
||||
addr = s[ind+1:]
|
||||
return
|
||||
}
|
||||
|
||||
func isLoadingError(err error) bool {
|
||||
return strings.HasPrefix(err.Error(), "LOADING ")
|
||||
}
|
||||
|
||||
func isReadOnlyError(err error) bool {
|
||||
return strings.HasPrefix(err.Error(), "READONLY ")
|
||||
}
|
||||
|
||||
func isMovedSameConnAddr(err error, addr string) bool {
|
||||
redisError := err.Error()
|
||||
if !strings.HasPrefix(redisError, "MOVED ") {
|
||||
return false
|
||||
}
|
||||
return strings.HasSuffix(redisError, addr)
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
type timeoutError interface {
|
||||
Timeout() bool
|
||||
}
|
||||
|
|
@ -0,0 +1,11 @@
|
|||
module github.com/go-redis/redis/v8
|
||||
|
||||
go 1.13
|
||||
|
||||
require (
|
||||
github.com/cespare/xxhash/v2 v2.1.2
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f
|
||||
github.com/google/go-cmp v0.5.6 // indirect
|
||||
github.com/onsi/ginkgo v1.16.4
|
||||
github.com/onsi/gomega v1.16.0
|
||||
)
|
||||
|
|
@ -0,0 +1,100 @@
|
|||
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
|
||||
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
|
||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
|
||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
|
||||
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
|
||||
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||
github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc=
|
||||
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/onsi/gomega v1.16.0 h1:6gjqkI8iiRHMvdccRJM8rVKjCWk6ZIm6FTm3ddIe4/c=
|
||||
github.com/onsi/gomega v1.16.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781 h1:DzZ89McO9/gWPsQXS/FVKAlG02ZjaQ6AlZRBimEYOd0=
|
||||
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da h1:b3NXsE2LusjYGGjL5bxEVZZORm/YEFFrWFjR8eFrw/c=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
|
|
@ -0,0 +1,56 @@
|
|||
package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
func AppendArg(b []byte, v interface{}) []byte {
|
||||
switch v := v.(type) {
|
||||
case nil:
|
||||
return append(b, "<nil>"...)
|
||||
case string:
|
||||
return appendUTF8String(b, Bytes(v))
|
||||
case []byte:
|
||||
return appendUTF8String(b, v)
|
||||
case int:
|
||||
return strconv.AppendInt(b, int64(v), 10)
|
||||
case int8:
|
||||
return strconv.AppendInt(b, int64(v), 10)
|
||||
case int16:
|
||||
return strconv.AppendInt(b, int64(v), 10)
|
||||
case int32:
|
||||
return strconv.AppendInt(b, int64(v), 10)
|
||||
case int64:
|
||||
return strconv.AppendInt(b, v, 10)
|
||||
case uint:
|
||||
return strconv.AppendUint(b, uint64(v), 10)
|
||||
case uint8:
|
||||
return strconv.AppendUint(b, uint64(v), 10)
|
||||
case uint16:
|
||||
return strconv.AppendUint(b, uint64(v), 10)
|
||||
case uint32:
|
||||
return strconv.AppendUint(b, uint64(v), 10)
|
||||
case uint64:
|
||||
return strconv.AppendUint(b, v, 10)
|
||||
case float32:
|
||||
return strconv.AppendFloat(b, float64(v), 'f', -1, 64)
|
||||
case float64:
|
||||
return strconv.AppendFloat(b, v, 'f', -1, 64)
|
||||
case bool:
|
||||
if v {
|
||||
return append(b, "true"...)
|
||||
}
|
||||
return append(b, "false"...)
|
||||
case time.Time:
|
||||
return v.AppendFormat(b, time.RFC3339Nano)
|
||||
default:
|
||||
return append(b, fmt.Sprint(v)...)
|
||||
}
|
||||
}
|
||||
|
||||
func appendUTF8String(dst []byte, src []byte) []byte {
|
||||
dst = append(dst, src...)
|
||||
return dst
|
||||
}
|
||||
|
|
@ -0,0 +1,78 @@
|
|||
package hashtag
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/go-redis/redis/v8/internal/rand"
|
||||
)
|
||||
|
||||
const slotNumber = 16384
|
||||
|
||||
// CRC16 implementation according to CCITT standards.
|
||||
// Copyright 2001-2010 Georges Menie (www.menie.org)
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// http://redis.io/topics/cluster-spec#appendix-a-crc16-reference-implementation-in-ansi-c
|
||||
var crc16tab = [256]uint16{
|
||||
0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7,
|
||||
0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef,
|
||||
0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6,
|
||||
0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de,
|
||||
0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485,
|
||||
0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d,
|
||||
0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4,
|
||||
0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc,
|
||||
0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823,
|
||||
0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b,
|
||||
0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12,
|
||||
0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a,
|
||||
0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41,
|
||||
0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49,
|
||||
0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70,
|
||||
0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78,
|
||||
0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f,
|
||||
0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067,
|
||||
0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e,
|
||||
0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256,
|
||||
0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d,
|
||||
0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405,
|
||||
0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c,
|
||||
0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634,
|
||||
0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab,
|
||||
0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3,
|
||||
0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a,
|
||||
0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92,
|
||||
0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9,
|
||||
0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1,
|
||||
0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8,
|
||||
0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0,
|
||||
}
|
||||
|
||||
func Key(key string) string {
|
||||
if s := strings.IndexByte(key, '{'); s > -1 {
|
||||
if e := strings.IndexByte(key[s+1:], '}'); e > 0 {
|
||||
return key[s+1 : s+e+1]
|
||||
}
|
||||
}
|
||||
return key
|
||||
}
|
||||
|
||||
func RandomSlot() int {
|
||||
return rand.Intn(slotNumber)
|
||||
}
|
||||
|
||||
// Slot returns a consistent slot number between 0 and 16383
|
||||
// for any given string key.
|
||||
func Slot(key string) int {
|
||||
if key == "" {
|
||||
return RandomSlot()
|
||||
}
|
||||
key = Key(key)
|
||||
return int(crc16sum(key)) % slotNumber
|
||||
}
|
||||
|
||||
func crc16sum(key string) (crc uint16) {
|
||||
for i := 0; i < len(key); i++ {
|
||||
crc = (crc << 8) ^ crc16tab[(byte(crc>>8)^key[i])&0x00ff]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
|
@ -0,0 +1,201 @@
|
|||
package hscan
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// decoderFunc represents decoding functions for default built-in types.
|
||||
type decoderFunc func(reflect.Value, string) error
|
||||
|
||||
var (
|
||||
// List of built-in decoders indexed by their numeric constant values (eg: reflect.Bool = 1).
|
||||
decoders = []decoderFunc{
|
||||
reflect.Bool: decodeBool,
|
||||
reflect.Int: decodeInt,
|
||||
reflect.Int8: decodeInt8,
|
||||
reflect.Int16: decodeInt16,
|
||||
reflect.Int32: decodeInt32,
|
||||
reflect.Int64: decodeInt64,
|
||||
reflect.Uint: decodeUint,
|
||||
reflect.Uint8: decodeUint8,
|
||||
reflect.Uint16: decodeUint16,
|
||||
reflect.Uint32: decodeUint32,
|
||||
reflect.Uint64: decodeUint64,
|
||||
reflect.Float32: decodeFloat32,
|
||||
reflect.Float64: decodeFloat64,
|
||||
reflect.Complex64: decodeUnsupported,
|
||||
reflect.Complex128: decodeUnsupported,
|
||||
reflect.Array: decodeUnsupported,
|
||||
reflect.Chan: decodeUnsupported,
|
||||
reflect.Func: decodeUnsupported,
|
||||
reflect.Interface: decodeUnsupported,
|
||||
reflect.Map: decodeUnsupported,
|
||||
reflect.Ptr: decodeUnsupported,
|
||||
reflect.Slice: decodeSlice,
|
||||
reflect.String: decodeString,
|
||||
reflect.Struct: decodeUnsupported,
|
||||
reflect.UnsafePointer: decodeUnsupported,
|
||||
}
|
||||
|
||||
// Global map of struct field specs that is populated once for every new
|
||||
// struct type that is scanned. This caches the field types and the corresponding
|
||||
// decoder functions to avoid iterating through struct fields on subsequent scans.
|
||||
globalStructMap = newStructMap()
|
||||
)
|
||||
|
||||
func Struct(dst interface{}) (StructValue, error) {
|
||||
v := reflect.ValueOf(dst)
|
||||
|
||||
// The destination to scan into should be a struct pointer.
|
||||
if v.Kind() != reflect.Ptr || v.IsNil() {
|
||||
return StructValue{}, fmt.Errorf("redis.Scan(non-pointer %T)", dst)
|
||||
}
|
||||
|
||||
v = v.Elem()
|
||||
if v.Kind() != reflect.Struct {
|
||||
return StructValue{}, fmt.Errorf("redis.Scan(non-struct %T)", dst)
|
||||
}
|
||||
|
||||
return StructValue{
|
||||
spec: globalStructMap.get(v.Type()),
|
||||
value: v,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Scan scans the results from a key-value Redis map result set to a destination struct.
|
||||
// The Redis keys are matched to the struct's field with the `redis` tag.
|
||||
func Scan(dst interface{}, keys []interface{}, vals []interface{}) error {
|
||||
if len(keys) != len(vals) {
|
||||
return errors.New("args should have the same number of keys and vals")
|
||||
}
|
||||
|
||||
strct, err := Struct(dst)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Iterate through the (key, value) sequence.
|
||||
for i := 0; i < len(vals); i++ {
|
||||
key, ok := keys[i].(string)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
val, ok := vals[i].(string)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := strct.Scan(key, val); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func decodeBool(f reflect.Value, s string) error {
|
||||
b, err := strconv.ParseBool(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.SetBool(b)
|
||||
return nil
|
||||
}
|
||||
|
||||
func decodeInt8(f reflect.Value, s string) error {
|
||||
return decodeNumber(f, s, 8)
|
||||
}
|
||||
|
||||
func decodeInt16(f reflect.Value, s string) error {
|
||||
return decodeNumber(f, s, 16)
|
||||
}
|
||||
|
||||
func decodeInt32(f reflect.Value, s string) error {
|
||||
return decodeNumber(f, s, 32)
|
||||
}
|
||||
|
||||
func decodeInt64(f reflect.Value, s string) error {
|
||||
return decodeNumber(f, s, 64)
|
||||
}
|
||||
|
||||
func decodeInt(f reflect.Value, s string) error {
|
||||
return decodeNumber(f, s, 0)
|
||||
}
|
||||
|
||||
func decodeNumber(f reflect.Value, s string, bitSize int) error {
|
||||
v, err := strconv.ParseInt(s, 10, bitSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.SetInt(v)
|
||||
return nil
|
||||
}
|
||||
|
||||
func decodeUint8(f reflect.Value, s string) error {
|
||||
return decodeUnsignedNumber(f, s, 8)
|
||||
}
|
||||
|
||||
func decodeUint16(f reflect.Value, s string) error {
|
||||
return decodeUnsignedNumber(f, s, 16)
|
||||
}
|
||||
|
||||
func decodeUint32(f reflect.Value, s string) error {
|
||||
return decodeUnsignedNumber(f, s, 32)
|
||||
}
|
||||
|
||||
func decodeUint64(f reflect.Value, s string) error {
|
||||
return decodeUnsignedNumber(f, s, 64)
|
||||
}
|
||||
|
||||
func decodeUint(f reflect.Value, s string) error {
|
||||
return decodeUnsignedNumber(f, s, 0)
|
||||
}
|
||||
|
||||
func decodeUnsignedNumber(f reflect.Value, s string, bitSize int) error {
|
||||
v, err := strconv.ParseUint(s, 10, bitSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.SetUint(v)
|
||||
return nil
|
||||
}
|
||||
|
||||
func decodeFloat32(f reflect.Value, s string) error {
|
||||
v, err := strconv.ParseFloat(s, 32)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.SetFloat(v)
|
||||
return nil
|
||||
}
|
||||
|
||||
// although the default is float64, but we better define it.
|
||||
func decodeFloat64(f reflect.Value, s string) error {
|
||||
v, err := strconv.ParseFloat(s, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.SetFloat(v)
|
||||
return nil
|
||||
}
|
||||
|
||||
func decodeString(f reflect.Value, s string) error {
|
||||
f.SetString(s)
|
||||
return nil
|
||||
}
|
||||
|
||||
func decodeSlice(f reflect.Value, s string) error {
|
||||
// []byte slice ([]uint8).
|
||||
if f.Type().Elem().Kind() == reflect.Uint8 {
|
||||
f.SetBytes([]byte(s))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func decodeUnsupported(v reflect.Value, s string) error {
|
||||
return fmt.Errorf("redis.Scan(unsupported %s)", v.Type())
|
||||
}
|
||||
|
|
@ -0,0 +1,93 @@
|
|||
package hscan
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// structMap contains the map of struct fields for target structs
|
||||
// indexed by the struct type.
|
||||
type structMap struct {
|
||||
m sync.Map
|
||||
}
|
||||
|
||||
func newStructMap() *structMap {
|
||||
return new(structMap)
|
||||
}
|
||||
|
||||
func (s *structMap) get(t reflect.Type) *structSpec {
|
||||
if v, ok := s.m.Load(t); ok {
|
||||
return v.(*structSpec)
|
||||
}
|
||||
|
||||
spec := newStructSpec(t, "redis")
|
||||
s.m.Store(t, spec)
|
||||
return spec
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
// structSpec contains the list of all fields in a target struct.
|
||||
type structSpec struct {
|
||||
m map[string]*structField
|
||||
}
|
||||
|
||||
func (s *structSpec) set(tag string, sf *structField) {
|
||||
s.m[tag] = sf
|
||||
}
|
||||
|
||||
func newStructSpec(t reflect.Type, fieldTag string) *structSpec {
|
||||
numField := t.NumField()
|
||||
out := &structSpec{
|
||||
m: make(map[string]*structField, numField),
|
||||
}
|
||||
|
||||
for i := 0; i < numField; i++ {
|
||||
f := t.Field(i)
|
||||
|
||||
tag := f.Tag.Get(fieldTag)
|
||||
if tag == "" || tag == "-" {
|
||||
continue
|
||||
}
|
||||
|
||||
tag = strings.Split(tag, ",")[0]
|
||||
if tag == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Use the built-in decoder.
|
||||
out.set(tag, &structField{index: i, fn: decoders[f.Type.Kind()]})
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
// structField represents a single field in a target struct.
|
||||
type structField struct {
|
||||
index int
|
||||
fn decoderFunc
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
type StructValue struct {
|
||||
spec *structSpec
|
||||
value reflect.Value
|
||||
}
|
||||
|
||||
func (s StructValue) Scan(key string, value string) error {
|
||||
field, ok := s.spec.m[key]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
if err := field.fn(s.value.Field(field.index), value); err != nil {
|
||||
t := s.value.Type()
|
||||
return fmt.Errorf("cannot scan redis.result %s into struct field %s.%s of type %s, error-%s",
|
||||
value, t.Name(), t.Field(field.index).Name, t.Field(field.index).Type, err.Error())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -0,0 +1,29 @@
|
|||
package internal
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/go-redis/redis/v8/internal/rand"
|
||||
)
|
||||
|
||||
func RetryBackoff(retry int, minBackoff, maxBackoff time.Duration) time.Duration {
|
||||
if retry < 0 {
|
||||
panic("not reached")
|
||||
}
|
||||
if minBackoff == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
d := minBackoff << uint(retry)
|
||||
if d < minBackoff {
|
||||
return maxBackoff
|
||||
}
|
||||
|
||||
d = minBackoff + time.Duration(rand.Int63n(int64(d)))
|
||||
|
||||
if d > maxBackoff || d < minBackoff {
|
||||
d = maxBackoff
|
||||
}
|
||||
|
||||
return d
|
||||
}
|
||||
|
|
@ -0,0 +1,26 @@
|
|||
package internal
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
)
|
||||
|
||||
type Logging interface {
|
||||
Printf(ctx context.Context, format string, v ...interface{})
|
||||
}
|
||||
|
||||
type logger struct {
|
||||
log *log.Logger
|
||||
}
|
||||
|
||||
func (l *logger) Printf(ctx context.Context, format string, v ...interface{}) {
|
||||
_ = l.log.Output(2, fmt.Sprintf(format, v...))
|
||||
}
|
||||
|
||||
// Logger calls Output to print to the stderr.
|
||||
// Arguments are handled in the manner of fmt.Print.
|
||||
var Logger Logging = &logger{
|
||||
log: log.New(os.Stderr, "redis: ", log.LstdFlags|log.Lshortfile),
|
||||
}
|
||||
|
|
@ -0,0 +1,60 @@
|
|||
/*
|
||||
Copyright 2014 The Camlistore Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// A Once will perform a successful action exactly once.
|
||||
//
|
||||
// Unlike a sync.Once, this Once's func returns an error
|
||||
// and is re-armed on failure.
|
||||
type Once struct {
|
||||
m sync.Mutex
|
||||
done uint32
|
||||
}
|
||||
|
||||
// Do calls the function f if and only if Do has not been invoked
|
||||
// without error for this instance of Once. In other words, given
|
||||
// var once Once
|
||||
// if once.Do(f) is called multiple times, only the first call will
|
||||
// invoke f, even if f has a different value in each invocation unless
|
||||
// f returns an error. A new instance of Once is required for each
|
||||
// function to execute.
|
||||
//
|
||||
// Do is intended for initialization that must be run exactly once. Since f
|
||||
// is niladic, it may be necessary to use a function literal to capture the
|
||||
// arguments to a function to be invoked by Do:
|
||||
// err := config.once.Do(func() error { return config.init(filename) })
|
||||
func (o *Once) Do(f func() error) error {
|
||||
if atomic.LoadUint32(&o.done) == 1 {
|
||||
return nil
|
||||
}
|
||||
// Slow-path.
|
||||
o.m.Lock()
|
||||
defer o.m.Unlock()
|
||||
var err error
|
||||
if o.done == 0 {
|
||||
err = f()
|
||||
if err == nil {
|
||||
atomic.StoreUint32(&o.done, 1)
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
|
@ -0,0 +1,121 @@
|
|||
package pool
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"net"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/go-redis/redis/v8/internal/proto"
|
||||
)
|
||||
|
||||
var noDeadline = time.Time{}
|
||||
|
||||
type Conn struct {
|
||||
usedAt int64 // atomic
|
||||
netConn net.Conn
|
||||
|
||||
rd *proto.Reader
|
||||
bw *bufio.Writer
|
||||
wr *proto.Writer
|
||||
|
||||
Inited bool
|
||||
pooled bool
|
||||
createdAt time.Time
|
||||
}
|
||||
|
||||
func NewConn(netConn net.Conn) *Conn {
|
||||
cn := &Conn{
|
||||
netConn: netConn,
|
||||
createdAt: time.Now(),
|
||||
}
|
||||
cn.rd = proto.NewReader(netConn)
|
||||
cn.bw = bufio.NewWriter(netConn)
|
||||
cn.wr = proto.NewWriter(cn.bw)
|
||||
cn.SetUsedAt(time.Now())
|
||||
return cn
|
||||
}
|
||||
|
||||
func (cn *Conn) UsedAt() time.Time {
|
||||
unix := atomic.LoadInt64(&cn.usedAt)
|
||||
return time.Unix(unix, 0)
|
||||
}
|
||||
|
||||
func (cn *Conn) SetUsedAt(tm time.Time) {
|
||||
atomic.StoreInt64(&cn.usedAt, tm.Unix())
|
||||
}
|
||||
|
||||
func (cn *Conn) SetNetConn(netConn net.Conn) {
|
||||
cn.netConn = netConn
|
||||
cn.rd.Reset(netConn)
|
||||
cn.bw.Reset(netConn)
|
||||
}
|
||||
|
||||
func (cn *Conn) Write(b []byte) (int, error) {
|
||||
return cn.netConn.Write(b)
|
||||
}
|
||||
|
||||
func (cn *Conn) RemoteAddr() net.Addr {
|
||||
if cn.netConn != nil {
|
||||
return cn.netConn.RemoteAddr()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cn *Conn) WithReader(ctx context.Context, timeout time.Duration, fn func(rd *proto.Reader) error) error {
|
||||
if err := cn.netConn.SetReadDeadline(cn.deadline(ctx, timeout)); err != nil {
|
||||
return err
|
||||
}
|
||||
return fn(cn.rd)
|
||||
}
|
||||
|
||||
func (cn *Conn) WithWriter(
|
||||
ctx context.Context, timeout time.Duration, fn func(wr *proto.Writer) error,
|
||||
) error {
|
||||
if err := cn.netConn.SetWriteDeadline(cn.deadline(ctx, timeout)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if cn.bw.Buffered() > 0 {
|
||||
cn.bw.Reset(cn.netConn)
|
||||
}
|
||||
|
||||
if err := fn(cn.wr); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return cn.bw.Flush()
|
||||
}
|
||||
|
||||
func (cn *Conn) Close() error {
|
||||
return cn.netConn.Close()
|
||||
}
|
||||
|
||||
func (cn *Conn) deadline(ctx context.Context, timeout time.Duration) time.Time {
|
||||
tm := time.Now()
|
||||
cn.SetUsedAt(tm)
|
||||
|
||||
if timeout > 0 {
|
||||
tm = tm.Add(timeout)
|
||||
}
|
||||
|
||||
if ctx != nil {
|
||||
deadline, ok := ctx.Deadline()
|
||||
if ok {
|
||||
if timeout == 0 {
|
||||
return deadline
|
||||
}
|
||||
if deadline.Before(tm) {
|
||||
return deadline
|
||||
}
|
||||
return tm
|
||||
}
|
||||
}
|
||||
|
||||
if timeout > 0 {
|
||||
return tm
|
||||
}
|
||||
|
||||
return noDeadline
|
||||
}
|
||||
|
|
@ -0,0 +1,557 @@
|
|||
package pool
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"net"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/go-redis/redis/v8/internal"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrClosed performs any operation on the closed client will return this error.
|
||||
ErrClosed = errors.New("redis: client is closed")
|
||||
|
||||
// ErrPoolTimeout timed out waiting to get a connection from the connection pool.
|
||||
ErrPoolTimeout = errors.New("redis: connection pool timeout")
|
||||
)
|
||||
|
||||
var timers = sync.Pool{
|
||||
New: func() interface{} {
|
||||
t := time.NewTimer(time.Hour)
|
||||
t.Stop()
|
||||
return t
|
||||
},
|
||||
}
|
||||
|
||||
// Stats contains pool state information and accumulated stats.
|
||||
type Stats struct {
|
||||
Hits uint32 // number of times free connection was found in the pool
|
||||
Misses uint32 // number of times free connection was NOT found in the pool
|
||||
Timeouts uint32 // number of times a wait timeout occurred
|
||||
|
||||
TotalConns uint32 // number of total connections in the pool
|
||||
IdleConns uint32 // number of idle connections in the pool
|
||||
StaleConns uint32 // number of stale connections removed from the pool
|
||||
}
|
||||
|
||||
type Pooler interface {
|
||||
NewConn(context.Context) (*Conn, error)
|
||||
CloseConn(*Conn) error
|
||||
|
||||
Get(context.Context) (*Conn, error)
|
||||
Put(context.Context, *Conn)
|
||||
Remove(context.Context, *Conn, error)
|
||||
|
||||
Len() int
|
||||
IdleLen() int
|
||||
Stats() *Stats
|
||||
|
||||
Close() error
|
||||
}
|
||||
|
||||
type Options struct {
|
||||
Dialer func(context.Context) (net.Conn, error)
|
||||
OnClose func(*Conn) error
|
||||
|
||||
PoolFIFO bool
|
||||
PoolSize int
|
||||
MinIdleConns int
|
||||
MaxConnAge time.Duration
|
||||
PoolTimeout time.Duration
|
||||
IdleTimeout time.Duration
|
||||
IdleCheckFrequency time.Duration
|
||||
}
|
||||
|
||||
type lastDialErrorWrap struct {
|
||||
err error
|
||||
}
|
||||
|
||||
type ConnPool struct {
|
||||
opt *Options
|
||||
|
||||
dialErrorsNum uint32 // atomic
|
||||
|
||||
lastDialError atomic.Value
|
||||
|
||||
queue chan struct{}
|
||||
|
||||
connsMu sync.Mutex
|
||||
conns []*Conn
|
||||
idleConns []*Conn
|
||||
poolSize int
|
||||
idleConnsLen int
|
||||
|
||||
stats Stats
|
||||
|
||||
_closed uint32 // atomic
|
||||
closedCh chan struct{}
|
||||
}
|
||||
|
||||
var _ Pooler = (*ConnPool)(nil)
|
||||
|
||||
func NewConnPool(opt *Options) *ConnPool {
|
||||
p := &ConnPool{
|
||||
opt: opt,
|
||||
|
||||
queue: make(chan struct{}, opt.PoolSize),
|
||||
conns: make([]*Conn, 0, opt.PoolSize),
|
||||
idleConns: make([]*Conn, 0, opt.PoolSize),
|
||||
closedCh: make(chan struct{}),
|
||||
}
|
||||
|
||||
p.connsMu.Lock()
|
||||
p.checkMinIdleConns()
|
||||
p.connsMu.Unlock()
|
||||
|
||||
if opt.IdleTimeout > 0 && opt.IdleCheckFrequency > 0 {
|
||||
go p.reaper(opt.IdleCheckFrequency)
|
||||
}
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *ConnPool) checkMinIdleConns() {
|
||||
if p.opt.MinIdleConns == 0 {
|
||||
return
|
||||
}
|
||||
for p.poolSize < p.opt.PoolSize && p.idleConnsLen < p.opt.MinIdleConns {
|
||||
p.poolSize++
|
||||
p.idleConnsLen++
|
||||
|
||||
go func() {
|
||||
err := p.addIdleConn()
|
||||
if err != nil && err != ErrClosed {
|
||||
p.connsMu.Lock()
|
||||
p.poolSize--
|
||||
p.idleConnsLen--
|
||||
p.connsMu.Unlock()
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
func (p *ConnPool) addIdleConn() error {
|
||||
cn, err := p.dialConn(context.TODO(), true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
p.connsMu.Lock()
|
||||
defer p.connsMu.Unlock()
|
||||
|
||||
// It is not allowed to add new connections to the closed connection pool.
|
||||
if p.closed() {
|
||||
_ = cn.Close()
|
||||
return ErrClosed
|
||||
}
|
||||
|
||||
p.conns = append(p.conns, cn)
|
||||
p.idleConns = append(p.idleConns, cn)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *ConnPool) NewConn(ctx context.Context) (*Conn, error) {
|
||||
return p.newConn(ctx, false)
|
||||
}
|
||||
|
||||
func (p *ConnPool) newConn(ctx context.Context, pooled bool) (*Conn, error) {
|
||||
cn, err := p.dialConn(ctx, pooled)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
p.connsMu.Lock()
|
||||
defer p.connsMu.Unlock()
|
||||
|
||||
// It is not allowed to add new connections to the closed connection pool.
|
||||
if p.closed() {
|
||||
_ = cn.Close()
|
||||
return nil, ErrClosed
|
||||
}
|
||||
|
||||
p.conns = append(p.conns, cn)
|
||||
if pooled {
|
||||
// If pool is full remove the cn on next Put.
|
||||
if p.poolSize >= p.opt.PoolSize {
|
||||
cn.pooled = false
|
||||
} else {
|
||||
p.poolSize++
|
||||
}
|
||||
}
|
||||
|
||||
return cn, nil
|
||||
}
|
||||
|
||||
func (p *ConnPool) dialConn(ctx context.Context, pooled bool) (*Conn, error) {
|
||||
if p.closed() {
|
||||
return nil, ErrClosed
|
||||
}
|
||||
|
||||
if atomic.LoadUint32(&p.dialErrorsNum) >= uint32(p.opt.PoolSize) {
|
||||
return nil, p.getLastDialError()
|
||||
}
|
||||
|
||||
netConn, err := p.opt.Dialer(ctx)
|
||||
if err != nil {
|
||||
p.setLastDialError(err)
|
||||
if atomic.AddUint32(&p.dialErrorsNum, 1) == uint32(p.opt.PoolSize) {
|
||||
go p.tryDial()
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cn := NewConn(netConn)
|
||||
cn.pooled = pooled
|
||||
return cn, nil
|
||||
}
|
||||
|
||||
func (p *ConnPool) tryDial() {
|
||||
for {
|
||||
if p.closed() {
|
||||
return
|
||||
}
|
||||
|
||||
conn, err := p.opt.Dialer(context.Background())
|
||||
if err != nil {
|
||||
p.setLastDialError(err)
|
||||
time.Sleep(time.Second)
|
||||
continue
|
||||
}
|
||||
|
||||
atomic.StoreUint32(&p.dialErrorsNum, 0)
|
||||
_ = conn.Close()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (p *ConnPool) setLastDialError(err error) {
|
||||
p.lastDialError.Store(&lastDialErrorWrap{err: err})
|
||||
}
|
||||
|
||||
func (p *ConnPool) getLastDialError() error {
|
||||
err, _ := p.lastDialError.Load().(*lastDialErrorWrap)
|
||||
if err != nil {
|
||||
return err.err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get returns existed connection from the pool or creates a new one.
|
||||
func (p *ConnPool) Get(ctx context.Context) (*Conn, error) {
|
||||
if p.closed() {
|
||||
return nil, ErrClosed
|
||||
}
|
||||
|
||||
if err := p.waitTurn(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for {
|
||||
p.connsMu.Lock()
|
||||
cn, err := p.popIdle()
|
||||
p.connsMu.Unlock()
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if cn == nil {
|
||||
break
|
||||
}
|
||||
|
||||
if p.isStaleConn(cn) {
|
||||
_ = p.CloseConn(cn)
|
||||
continue
|
||||
}
|
||||
|
||||
atomic.AddUint32(&p.stats.Hits, 1)
|
||||
return cn, nil
|
||||
}
|
||||
|
||||
atomic.AddUint32(&p.stats.Misses, 1)
|
||||
|
||||
newcn, err := p.newConn(ctx, true)
|
||||
if err != nil {
|
||||
p.freeTurn()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return newcn, nil
|
||||
}
|
||||
|
||||
func (p *ConnPool) getTurn() {
|
||||
p.queue <- struct{}{}
|
||||
}
|
||||
|
||||
func (p *ConnPool) waitTurn(ctx context.Context) error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
default:
|
||||
}
|
||||
|
||||
select {
|
||||
case p.queue <- struct{}{}:
|
||||
return nil
|
||||
default:
|
||||
}
|
||||
|
||||
timer := timers.Get().(*time.Timer)
|
||||
timer.Reset(p.opt.PoolTimeout)
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
if !timer.Stop() {
|
||||
<-timer.C
|
||||
}
|
||||
timers.Put(timer)
|
||||
return ctx.Err()
|
||||
case p.queue <- struct{}{}:
|
||||
if !timer.Stop() {
|
||||
<-timer.C
|
||||
}
|
||||
timers.Put(timer)
|
||||
return nil
|
||||
case <-timer.C:
|
||||
timers.Put(timer)
|
||||
atomic.AddUint32(&p.stats.Timeouts, 1)
|
||||
return ErrPoolTimeout
|
||||
}
|
||||
}
|
||||
|
||||
func (p *ConnPool) freeTurn() {
|
||||
<-p.queue
|
||||
}
|
||||
|
||||
func (p *ConnPool) popIdle() (*Conn, error) {
|
||||
if p.closed() {
|
||||
return nil, ErrClosed
|
||||
}
|
||||
n := len(p.idleConns)
|
||||
if n == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var cn *Conn
|
||||
if p.opt.PoolFIFO {
|
||||
cn = p.idleConns[0]
|
||||
copy(p.idleConns, p.idleConns[1:])
|
||||
p.idleConns = p.idleConns[:n-1]
|
||||
} else {
|
||||
idx := n - 1
|
||||
cn = p.idleConns[idx]
|
||||
p.idleConns = p.idleConns[:idx]
|
||||
}
|
||||
p.idleConnsLen--
|
||||
p.checkMinIdleConns()
|
||||
return cn, nil
|
||||
}
|
||||
|
||||
func (p *ConnPool) Put(ctx context.Context, cn *Conn) {
|
||||
if cn.rd.Buffered() > 0 {
|
||||
internal.Logger.Printf(ctx, "Conn has unread data")
|
||||
p.Remove(ctx, cn, BadConnError{})
|
||||
return
|
||||
}
|
||||
|
||||
if !cn.pooled {
|
||||
p.Remove(ctx, cn, nil)
|
||||
return
|
||||
}
|
||||
|
||||
p.connsMu.Lock()
|
||||
p.idleConns = append(p.idleConns, cn)
|
||||
p.idleConnsLen++
|
||||
p.connsMu.Unlock()
|
||||
p.freeTurn()
|
||||
}
|
||||
|
||||
func (p *ConnPool) Remove(ctx context.Context, cn *Conn, reason error) {
|
||||
p.removeConnWithLock(cn)
|
||||
p.freeTurn()
|
||||
_ = p.closeConn(cn)
|
||||
}
|
||||
|
||||
func (p *ConnPool) CloseConn(cn *Conn) error {
|
||||
p.removeConnWithLock(cn)
|
||||
return p.closeConn(cn)
|
||||
}
|
||||
|
||||
func (p *ConnPool) removeConnWithLock(cn *Conn) {
|
||||
p.connsMu.Lock()
|
||||
p.removeConn(cn)
|
||||
p.connsMu.Unlock()
|
||||
}
|
||||
|
||||
func (p *ConnPool) removeConn(cn *Conn) {
|
||||
for i, c := range p.conns {
|
||||
if c == cn {
|
||||
p.conns = append(p.conns[:i], p.conns[i+1:]...)
|
||||
if cn.pooled {
|
||||
p.poolSize--
|
||||
p.checkMinIdleConns()
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *ConnPool) closeConn(cn *Conn) error {
|
||||
if p.opt.OnClose != nil {
|
||||
_ = p.opt.OnClose(cn)
|
||||
}
|
||||
return cn.Close()
|
||||
}
|
||||
|
||||
// Len returns total number of connections.
|
||||
func (p *ConnPool) Len() int {
|
||||
p.connsMu.Lock()
|
||||
n := len(p.conns)
|
||||
p.connsMu.Unlock()
|
||||
return n
|
||||
}
|
||||
|
||||
// IdleLen returns number of idle connections.
|
||||
func (p *ConnPool) IdleLen() int {
|
||||
p.connsMu.Lock()
|
||||
n := p.idleConnsLen
|
||||
p.connsMu.Unlock()
|
||||
return n
|
||||
}
|
||||
|
||||
func (p *ConnPool) Stats() *Stats {
|
||||
idleLen := p.IdleLen()
|
||||
return &Stats{
|
||||
Hits: atomic.LoadUint32(&p.stats.Hits),
|
||||
Misses: atomic.LoadUint32(&p.stats.Misses),
|
||||
Timeouts: atomic.LoadUint32(&p.stats.Timeouts),
|
||||
|
||||
TotalConns: uint32(p.Len()),
|
||||
IdleConns: uint32(idleLen),
|
||||
StaleConns: atomic.LoadUint32(&p.stats.StaleConns),
|
||||
}
|
||||
}
|
||||
|
||||
func (p *ConnPool) closed() bool {
|
||||
return atomic.LoadUint32(&p._closed) == 1
|
||||
}
|
||||
|
||||
func (p *ConnPool) Filter(fn func(*Conn) bool) error {
|
||||
p.connsMu.Lock()
|
||||
defer p.connsMu.Unlock()
|
||||
|
||||
var firstErr error
|
||||
for _, cn := range p.conns {
|
||||
if fn(cn) {
|
||||
if err := p.closeConn(cn); err != nil && firstErr == nil {
|
||||
firstErr = err
|
||||
}
|
||||
}
|
||||
}
|
||||
return firstErr
|
||||
}
|
||||
|
||||
func (p *ConnPool) Close() error {
|
||||
if !atomic.CompareAndSwapUint32(&p._closed, 0, 1) {
|
||||
return ErrClosed
|
||||
}
|
||||
close(p.closedCh)
|
||||
|
||||
var firstErr error
|
||||
p.connsMu.Lock()
|
||||
for _, cn := range p.conns {
|
||||
if err := p.closeConn(cn); err != nil && firstErr == nil {
|
||||
firstErr = err
|
||||
}
|
||||
}
|
||||
p.conns = nil
|
||||
p.poolSize = 0
|
||||
p.idleConns = nil
|
||||
p.idleConnsLen = 0
|
||||
p.connsMu.Unlock()
|
||||
|
||||
return firstErr
|
||||
}
|
||||
|
||||
func (p *ConnPool) reaper(frequency time.Duration) {
|
||||
ticker := time.NewTicker(frequency)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
// It is possible that ticker and closedCh arrive together,
|
||||
// and select pseudo-randomly pick ticker case, we double
|
||||
// check here to prevent being executed after closed.
|
||||
if p.closed() {
|
||||
return
|
||||
}
|
||||
_, err := p.ReapStaleConns()
|
||||
if err != nil {
|
||||
internal.Logger.Printf(context.Background(), "ReapStaleConns failed: %s", err)
|
||||
continue
|
||||
}
|
||||
case <-p.closedCh:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *ConnPool) ReapStaleConns() (int, error) {
|
||||
var n int
|
||||
for {
|
||||
p.getTurn()
|
||||
|
||||
p.connsMu.Lock()
|
||||
cn := p.reapStaleConn()
|
||||
p.connsMu.Unlock()
|
||||
|
||||
p.freeTurn()
|
||||
|
||||
if cn != nil {
|
||||
_ = p.closeConn(cn)
|
||||
n++
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
atomic.AddUint32(&p.stats.StaleConns, uint32(n))
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (p *ConnPool) reapStaleConn() *Conn {
|
||||
if len(p.idleConns) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
cn := p.idleConns[0]
|
||||
if !p.isStaleConn(cn) {
|
||||
return nil
|
||||
}
|
||||
|
||||
p.idleConns = append(p.idleConns[:0], p.idleConns[1:]...)
|
||||
p.idleConnsLen--
|
||||
p.removeConn(cn)
|
||||
|
||||
return cn
|
||||
}
|
||||
|
||||
func (p *ConnPool) isStaleConn(cn *Conn) bool {
|
||||
if p.opt.IdleTimeout == 0 && p.opt.MaxConnAge == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
if p.opt.IdleTimeout > 0 && now.Sub(cn.UsedAt()) >= p.opt.IdleTimeout {
|
||||
return true
|
||||
}
|
||||
if p.opt.MaxConnAge > 0 && now.Sub(cn.createdAt) >= p.opt.MaxConnAge {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
|
@ -0,0 +1,58 @@
|
|||
package pool
|
||||
|
||||
import "context"
|
||||
|
||||
type SingleConnPool struct {
|
||||
pool Pooler
|
||||
cn *Conn
|
||||
stickyErr error
|
||||
}
|
||||
|
||||
var _ Pooler = (*SingleConnPool)(nil)
|
||||
|
||||
func NewSingleConnPool(pool Pooler, cn *Conn) *SingleConnPool {
|
||||
return &SingleConnPool{
|
||||
pool: pool,
|
||||
cn: cn,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *SingleConnPool) NewConn(ctx context.Context) (*Conn, error) {
|
||||
return p.pool.NewConn(ctx)
|
||||
}
|
||||
|
||||
func (p *SingleConnPool) CloseConn(cn *Conn) error {
|
||||
return p.pool.CloseConn(cn)
|
||||
}
|
||||
|
||||
func (p *SingleConnPool) Get(ctx context.Context) (*Conn, error) {
|
||||
if p.stickyErr != nil {
|
||||
return nil, p.stickyErr
|
||||
}
|
||||
return p.cn, nil
|
||||
}
|
||||
|
||||
func (p *SingleConnPool) Put(ctx context.Context, cn *Conn) {}
|
||||
|
||||
func (p *SingleConnPool) Remove(ctx context.Context, cn *Conn, reason error) {
|
||||
p.cn = nil
|
||||
p.stickyErr = reason
|
||||
}
|
||||
|
||||
func (p *SingleConnPool) Close() error {
|
||||
p.cn = nil
|
||||
p.stickyErr = ErrClosed
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *SingleConnPool) Len() int {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (p *SingleConnPool) IdleLen() int {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (p *SingleConnPool) Stats() *Stats {
|
||||
return &Stats{}
|
||||
}
|
||||
|
|
@ -0,0 +1,201 @@
|
|||
package pool
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
const (
|
||||
stateDefault = 0
|
||||
stateInited = 1
|
||||
stateClosed = 2
|
||||
)
|
||||
|
||||
type BadConnError struct {
|
||||
wrapped error
|
||||
}
|
||||
|
||||
var _ error = (*BadConnError)(nil)
|
||||
|
||||
func (e BadConnError) Error() string {
|
||||
s := "redis: Conn is in a bad state"
|
||||
if e.wrapped != nil {
|
||||
s += ": " + e.wrapped.Error()
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (e BadConnError) Unwrap() error {
|
||||
return e.wrapped
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
type StickyConnPool struct {
|
||||
pool Pooler
|
||||
shared int32 // atomic
|
||||
|
||||
state uint32 // atomic
|
||||
ch chan *Conn
|
||||
|
||||
_badConnError atomic.Value
|
||||
}
|
||||
|
||||
var _ Pooler = (*StickyConnPool)(nil)
|
||||
|
||||
func NewStickyConnPool(pool Pooler) *StickyConnPool {
|
||||
p, ok := pool.(*StickyConnPool)
|
||||
if !ok {
|
||||
p = &StickyConnPool{
|
||||
pool: pool,
|
||||
ch: make(chan *Conn, 1),
|
||||
}
|
||||
}
|
||||
atomic.AddInt32(&p.shared, 1)
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *StickyConnPool) NewConn(ctx context.Context) (*Conn, error) {
|
||||
return p.pool.NewConn(ctx)
|
||||
}
|
||||
|
||||
func (p *StickyConnPool) CloseConn(cn *Conn) error {
|
||||
return p.pool.CloseConn(cn)
|
||||
}
|
||||
|
||||
func (p *StickyConnPool) Get(ctx context.Context) (*Conn, error) {
|
||||
// In worst case this races with Close which is not a very common operation.
|
||||
for i := 0; i < 1000; i++ {
|
||||
switch atomic.LoadUint32(&p.state) {
|
||||
case stateDefault:
|
||||
cn, err := p.pool.Get(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if atomic.CompareAndSwapUint32(&p.state, stateDefault, stateInited) {
|
||||
return cn, nil
|
||||
}
|
||||
p.pool.Remove(ctx, cn, ErrClosed)
|
||||
case stateInited:
|
||||
if err := p.badConnError(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cn, ok := <-p.ch
|
||||
if !ok {
|
||||
return nil, ErrClosed
|
||||
}
|
||||
return cn, nil
|
||||
case stateClosed:
|
||||
return nil, ErrClosed
|
||||
default:
|
||||
panic("not reached")
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("redis: StickyConnPool.Get: infinite loop")
|
||||
}
|
||||
|
||||
func (p *StickyConnPool) Put(ctx context.Context, cn *Conn) {
|
||||
defer func() {
|
||||
if recover() != nil {
|
||||
p.freeConn(ctx, cn)
|
||||
}
|
||||
}()
|
||||
p.ch <- cn
|
||||
}
|
||||
|
||||
func (p *StickyConnPool) freeConn(ctx context.Context, cn *Conn) {
|
||||
if err := p.badConnError(); err != nil {
|
||||
p.pool.Remove(ctx, cn, err)
|
||||
} else {
|
||||
p.pool.Put(ctx, cn)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *StickyConnPool) Remove(ctx context.Context, cn *Conn, reason error) {
|
||||
defer func() {
|
||||
if recover() != nil {
|
||||
p.pool.Remove(ctx, cn, ErrClosed)
|
||||
}
|
||||
}()
|
||||
p._badConnError.Store(BadConnError{wrapped: reason})
|
||||
p.ch <- cn
|
||||
}
|
||||
|
||||
func (p *StickyConnPool) Close() error {
|
||||
if shared := atomic.AddInt32(&p.shared, -1); shared > 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
for i := 0; i < 1000; i++ {
|
||||
state := atomic.LoadUint32(&p.state)
|
||||
if state == stateClosed {
|
||||
return ErrClosed
|
||||
}
|
||||
if atomic.CompareAndSwapUint32(&p.state, state, stateClosed) {
|
||||
close(p.ch)
|
||||
cn, ok := <-p.ch
|
||||
if ok {
|
||||
p.freeConn(context.TODO(), cn)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return errors.New("redis: StickyConnPool.Close: infinite loop")
|
||||
}
|
||||
|
||||
func (p *StickyConnPool) Reset(ctx context.Context) error {
|
||||
if p.badConnError() == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
select {
|
||||
case cn, ok := <-p.ch:
|
||||
if !ok {
|
||||
return ErrClosed
|
||||
}
|
||||
p.pool.Remove(ctx, cn, ErrClosed)
|
||||
p._badConnError.Store(BadConnError{wrapped: nil})
|
||||
default:
|
||||
return errors.New("redis: StickyConnPool does not have a Conn")
|
||||
}
|
||||
|
||||
if !atomic.CompareAndSwapUint32(&p.state, stateInited, stateDefault) {
|
||||
state := atomic.LoadUint32(&p.state)
|
||||
return fmt.Errorf("redis: invalid StickyConnPool state: %d", state)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *StickyConnPool) badConnError() error {
|
||||
if v := p._badConnError.Load(); v != nil {
|
||||
if err := v.(BadConnError); err.wrapped != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *StickyConnPool) Len() int {
|
||||
switch atomic.LoadUint32(&p.state) {
|
||||
case stateDefault:
|
||||
return 0
|
||||
case stateInited:
|
||||
return 1
|
||||
case stateClosed:
|
||||
return 0
|
||||
default:
|
||||
panic("not reached")
|
||||
}
|
||||
}
|
||||
|
||||
func (p *StickyConnPool) IdleLen() int {
|
||||
return len(p.ch)
|
||||
}
|
||||
|
||||
func (p *StickyConnPool) Stats() *Stats {
|
||||
return &Stats{}
|
||||
}
|
||||
|
|
@ -0,0 +1,332 @@
|
|||
package proto
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/go-redis/redis/v8/internal/util"
|
||||
)
|
||||
|
||||
// redis resp protocol data type.
|
||||
const (
|
||||
ErrorReply = '-'
|
||||
StatusReply = '+'
|
||||
IntReply = ':'
|
||||
StringReply = '$'
|
||||
ArrayReply = '*'
|
||||
)
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
const Nil = RedisError("redis: nil") // nolint:errname
|
||||
|
||||
type RedisError string
|
||||
|
||||
func (e RedisError) Error() string { return string(e) }
|
||||
|
||||
func (RedisError) RedisError() {}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
type MultiBulkParse func(*Reader, int64) (interface{}, error)
|
||||
|
||||
type Reader struct {
|
||||
rd *bufio.Reader
|
||||
_buf []byte
|
||||
}
|
||||
|
||||
func NewReader(rd io.Reader) *Reader {
|
||||
return &Reader{
|
||||
rd: bufio.NewReader(rd),
|
||||
_buf: make([]byte, 64),
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Reader) Buffered() int {
|
||||
return r.rd.Buffered()
|
||||
}
|
||||
|
||||
func (r *Reader) Peek(n int) ([]byte, error) {
|
||||
return r.rd.Peek(n)
|
||||
}
|
||||
|
||||
func (r *Reader) Reset(rd io.Reader) {
|
||||
r.rd.Reset(rd)
|
||||
}
|
||||
|
||||
func (r *Reader) ReadLine() ([]byte, error) {
|
||||
line, err := r.readLine()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if isNilReply(line) {
|
||||
return nil, Nil
|
||||
}
|
||||
return line, nil
|
||||
}
|
||||
|
||||
// readLine that returns an error if:
|
||||
// - there is a pending read error;
|
||||
// - or line does not end with \r\n.
|
||||
func (r *Reader) readLine() ([]byte, error) {
|
||||
b, err := r.rd.ReadSlice('\n')
|
||||
if err != nil {
|
||||
if err != bufio.ErrBufferFull {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
full := make([]byte, len(b))
|
||||
copy(full, b)
|
||||
|
||||
b, err = r.rd.ReadBytes('\n')
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
full = append(full, b...) //nolint:makezero
|
||||
b = full
|
||||
}
|
||||
if len(b) <= 2 || b[len(b)-1] != '\n' || b[len(b)-2] != '\r' {
|
||||
return nil, fmt.Errorf("redis: invalid reply: %q", b)
|
||||
}
|
||||
return b[:len(b)-2], nil
|
||||
}
|
||||
|
||||
func (r *Reader) ReadReply(m MultiBulkParse) (interface{}, error) {
|
||||
line, err := r.ReadLine()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch line[0] {
|
||||
case ErrorReply:
|
||||
return nil, ParseErrorReply(line)
|
||||
case StatusReply:
|
||||
return string(line[1:]), nil
|
||||
case IntReply:
|
||||
return util.ParseInt(line[1:], 10, 64)
|
||||
case StringReply:
|
||||
return r.readStringReply(line)
|
||||
case ArrayReply:
|
||||
n, err := parseArrayLen(line)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if m == nil {
|
||||
err := fmt.Errorf("redis: got %.100q, but multi bulk parser is nil", line)
|
||||
return nil, err
|
||||
}
|
||||
return m(r, n)
|
||||
}
|
||||
return nil, fmt.Errorf("redis: can't parse %.100q", line)
|
||||
}
|
||||
|
||||
func (r *Reader) ReadIntReply() (int64, error) {
|
||||
line, err := r.ReadLine()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
switch line[0] {
|
||||
case ErrorReply:
|
||||
return 0, ParseErrorReply(line)
|
||||
case IntReply:
|
||||
return util.ParseInt(line[1:], 10, 64)
|
||||
default:
|
||||
return 0, fmt.Errorf("redis: can't parse int reply: %.100q", line)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Reader) ReadString() (string, error) {
|
||||
line, err := r.ReadLine()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
switch line[0] {
|
||||
case ErrorReply:
|
||||
return "", ParseErrorReply(line)
|
||||
case StringReply:
|
||||
return r.readStringReply(line)
|
||||
case StatusReply:
|
||||
return string(line[1:]), nil
|
||||
case IntReply:
|
||||
return string(line[1:]), nil
|
||||
default:
|
||||
return "", fmt.Errorf("redis: can't parse reply=%.100q reading string", line)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Reader) readStringReply(line []byte) (string, error) {
|
||||
if isNilReply(line) {
|
||||
return "", Nil
|
||||
}
|
||||
|
||||
replyLen, err := util.Atoi(line[1:])
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
b := make([]byte, replyLen+2)
|
||||
_, err = io.ReadFull(r.rd, b)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return util.BytesToString(b[:replyLen]), nil
|
||||
}
|
||||
|
||||
func (r *Reader) ReadArrayReply(m MultiBulkParse) (interface{}, error) {
|
||||
line, err := r.ReadLine()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch line[0] {
|
||||
case ErrorReply:
|
||||
return nil, ParseErrorReply(line)
|
||||
case ArrayReply:
|
||||
n, err := parseArrayLen(line)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m(r, n)
|
||||
default:
|
||||
return nil, fmt.Errorf("redis: can't parse array reply: %.100q", line)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Reader) ReadArrayLen() (int, error) {
|
||||
line, err := r.ReadLine()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
switch line[0] {
|
||||
case ErrorReply:
|
||||
return 0, ParseErrorReply(line)
|
||||
case ArrayReply:
|
||||
n, err := parseArrayLen(line)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return int(n), nil
|
||||
default:
|
||||
return 0, fmt.Errorf("redis: can't parse array reply: %.100q", line)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Reader) ReadScanReply() ([]string, uint64, error) {
|
||||
n, err := r.ReadArrayLen()
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
if n != 2 {
|
||||
return nil, 0, fmt.Errorf("redis: got %d elements in scan reply, expected 2", n)
|
||||
}
|
||||
|
||||
cursor, err := r.ReadUint()
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
n, err = r.ReadArrayLen()
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
keys := make([]string, n)
|
||||
|
||||
for i := 0; i < n; i++ {
|
||||
key, err := r.ReadString()
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
keys[i] = key
|
||||
}
|
||||
|
||||
return keys, cursor, err
|
||||
}
|
||||
|
||||
func (r *Reader) ReadInt() (int64, error) {
|
||||
b, err := r.readTmpBytesReply()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return util.ParseInt(b, 10, 64)
|
||||
}
|
||||
|
||||
func (r *Reader) ReadUint() (uint64, error) {
|
||||
b, err := r.readTmpBytesReply()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return util.ParseUint(b, 10, 64)
|
||||
}
|
||||
|
||||
func (r *Reader) ReadFloatReply() (float64, error) {
|
||||
b, err := r.readTmpBytesReply()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return util.ParseFloat(b, 64)
|
||||
}
|
||||
|
||||
func (r *Reader) readTmpBytesReply() ([]byte, error) {
|
||||
line, err := r.ReadLine()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch line[0] {
|
||||
case ErrorReply:
|
||||
return nil, ParseErrorReply(line)
|
||||
case StringReply:
|
||||
return r._readTmpBytesReply(line)
|
||||
case StatusReply:
|
||||
return line[1:], nil
|
||||
default:
|
||||
return nil, fmt.Errorf("redis: can't parse string reply: %.100q", line)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Reader) _readTmpBytesReply(line []byte) ([]byte, error) {
|
||||
if isNilReply(line) {
|
||||
return nil, Nil
|
||||
}
|
||||
|
||||
replyLen, err := util.Atoi(line[1:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
buf := r.buf(replyLen + 2)
|
||||
_, err = io.ReadFull(r.rd, buf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return buf[:replyLen], nil
|
||||
}
|
||||
|
||||
func (r *Reader) buf(n int) []byte {
|
||||
if n <= cap(r._buf) {
|
||||
return r._buf[:n]
|
||||
}
|
||||
d := n - cap(r._buf)
|
||||
r._buf = append(r._buf, make([]byte, d)...)
|
||||
return r._buf
|
||||
}
|
||||
|
||||
func isNilReply(b []byte) bool {
|
||||
return len(b) == 3 &&
|
||||
(b[0] == StringReply || b[0] == ArrayReply) &&
|
||||
b[1] == '-' && b[2] == '1'
|
||||
}
|
||||
|
||||
func ParseErrorReply(line []byte) error {
|
||||
return RedisError(string(line[1:]))
|
||||
}
|
||||
|
||||
func parseArrayLen(line []byte) (int64, error) {
|
||||
if isNilReply(line) {
|
||||
return 0, Nil
|
||||
}
|
||||
return util.ParseInt(line[1:], 10, 64)
|
||||
}
|
||||
|
|
@ -0,0 +1,172 @@
|
|||
package proto
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/go-redis/redis/v8/internal/util"
|
||||
)
|
||||
|
||||
// Scan parses bytes `b` to `v` with appropriate type.
|
||||
func Scan(b []byte, v interface{}) error {
|
||||
switch v := v.(type) {
|
||||
case nil:
|
||||
return fmt.Errorf("redis: Scan(nil)")
|
||||
case *string:
|
||||
*v = util.BytesToString(b)
|
||||
return nil
|
||||
case *[]byte:
|
||||
*v = b
|
||||
return nil
|
||||
case *int:
|
||||
var err error
|
||||
*v, err = util.Atoi(b)
|
||||
return err
|
||||
case *int8:
|
||||
n, err := util.ParseInt(b, 10, 8)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*v = int8(n)
|
||||
return nil
|
||||
case *int16:
|
||||
n, err := util.ParseInt(b, 10, 16)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*v = int16(n)
|
||||
return nil
|
||||
case *int32:
|
||||
n, err := util.ParseInt(b, 10, 32)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*v = int32(n)
|
||||
return nil
|
||||
case *int64:
|
||||
n, err := util.ParseInt(b, 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*v = n
|
||||
return nil
|
||||
case *uint:
|
||||
n, err := util.ParseUint(b, 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*v = uint(n)
|
||||
return nil
|
||||
case *uint8:
|
||||
n, err := util.ParseUint(b, 10, 8)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*v = uint8(n)
|
||||
return nil
|
||||
case *uint16:
|
||||
n, err := util.ParseUint(b, 10, 16)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*v = uint16(n)
|
||||
return nil
|
||||
case *uint32:
|
||||
n, err := util.ParseUint(b, 10, 32)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*v = uint32(n)
|
||||
return nil
|
||||
case *uint64:
|
||||
n, err := util.ParseUint(b, 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*v = n
|
||||
return nil
|
||||
case *float32:
|
||||
n, err := util.ParseFloat(b, 32)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*v = float32(n)
|
||||
return err
|
||||
case *float64:
|
||||
var err error
|
||||
*v, err = util.ParseFloat(b, 64)
|
||||
return err
|
||||
case *bool:
|
||||
*v = len(b) == 1 && b[0] == '1'
|
||||
return nil
|
||||
case *time.Time:
|
||||
var err error
|
||||
*v, err = time.Parse(time.RFC3339Nano, util.BytesToString(b))
|
||||
return err
|
||||
case encoding.BinaryUnmarshaler:
|
||||
return v.UnmarshalBinary(b)
|
||||
default:
|
||||
return fmt.Errorf(
|
||||
"redis: can't unmarshal %T (consider implementing BinaryUnmarshaler)", v)
|
||||
}
|
||||
}
|
||||
|
||||
func ScanSlice(data []string, slice interface{}) error {
|
||||
v := reflect.ValueOf(slice)
|
||||
if !v.IsValid() {
|
||||
return fmt.Errorf("redis: ScanSlice(nil)")
|
||||
}
|
||||
if v.Kind() != reflect.Ptr {
|
||||
return fmt.Errorf("redis: ScanSlice(non-pointer %T)", slice)
|
||||
}
|
||||
v = v.Elem()
|
||||
if v.Kind() != reflect.Slice {
|
||||
return fmt.Errorf("redis: ScanSlice(non-slice %T)", slice)
|
||||
}
|
||||
|
||||
next := makeSliceNextElemFunc(v)
|
||||
for i, s := range data {
|
||||
elem := next()
|
||||
if err := Scan([]byte(s), elem.Addr().Interface()); err != nil {
|
||||
err = fmt.Errorf("redis: ScanSlice index=%d value=%q failed: %w", i, s, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func makeSliceNextElemFunc(v reflect.Value) func() reflect.Value {
|
||||
elemType := v.Type().Elem()
|
||||
|
||||
if elemType.Kind() == reflect.Ptr {
|
||||
elemType = elemType.Elem()
|
||||
return func() reflect.Value {
|
||||
if v.Len() < v.Cap() {
|
||||
v.Set(v.Slice(0, v.Len()+1))
|
||||
elem := v.Index(v.Len() - 1)
|
||||
if elem.IsNil() {
|
||||
elem.Set(reflect.New(elemType))
|
||||
}
|
||||
return elem.Elem()
|
||||
}
|
||||
|
||||
elem := reflect.New(elemType)
|
||||
v.Set(reflect.Append(v, elem))
|
||||
return elem.Elem()
|
||||
}
|
||||
}
|
||||
|
||||
zero := reflect.Zero(elemType)
|
||||
return func() reflect.Value {
|
||||
if v.Len() < v.Cap() {
|
||||
v.Set(v.Slice(0, v.Len()+1))
|
||||
return v.Index(v.Len() - 1)
|
||||
}
|
||||
|
||||
v.Set(reflect.Append(v, zero))
|
||||
return v.Index(v.Len() - 1)
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,153 @@
|
|||
package proto
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/go-redis/redis/v8/internal/util"
|
||||
)
|
||||
|
||||
type writer interface {
|
||||
io.Writer
|
||||
io.ByteWriter
|
||||
// io.StringWriter
|
||||
WriteString(s string) (n int, err error)
|
||||
}
|
||||
|
||||
type Writer struct {
|
||||
writer
|
||||
|
||||
lenBuf []byte
|
||||
numBuf []byte
|
||||
}
|
||||
|
||||
func NewWriter(wr writer) *Writer {
|
||||
return &Writer{
|
||||
writer: wr,
|
||||
|
||||
lenBuf: make([]byte, 64),
|
||||
numBuf: make([]byte, 64),
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Writer) WriteArgs(args []interface{}) error {
|
||||
if err := w.WriteByte(ArrayReply); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := w.writeLen(len(args)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, arg := range args {
|
||||
if err := w.WriteArg(arg); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *Writer) writeLen(n int) error {
|
||||
w.lenBuf = strconv.AppendUint(w.lenBuf[:0], uint64(n), 10)
|
||||
w.lenBuf = append(w.lenBuf, '\r', '\n')
|
||||
_, err := w.Write(w.lenBuf)
|
||||
return err
|
||||
}
|
||||
|
||||
func (w *Writer) WriteArg(v interface{}) error {
|
||||
switch v := v.(type) {
|
||||
case nil:
|
||||
return w.string("")
|
||||
case string:
|
||||
return w.string(v)
|
||||
case []byte:
|
||||
return w.bytes(v)
|
||||
case int:
|
||||
return w.int(int64(v))
|
||||
case int8:
|
||||
return w.int(int64(v))
|
||||
case int16:
|
||||
return w.int(int64(v))
|
||||
case int32:
|
||||
return w.int(int64(v))
|
||||
case int64:
|
||||
return w.int(v)
|
||||
case uint:
|
||||
return w.uint(uint64(v))
|
||||
case uint8:
|
||||
return w.uint(uint64(v))
|
||||
case uint16:
|
||||
return w.uint(uint64(v))
|
||||
case uint32:
|
||||
return w.uint(uint64(v))
|
||||
case uint64:
|
||||
return w.uint(v)
|
||||
case float32:
|
||||
return w.float(float64(v))
|
||||
case float64:
|
||||
return w.float(v)
|
||||
case bool:
|
||||
if v {
|
||||
return w.int(1)
|
||||
}
|
||||
return w.int(0)
|
||||
case time.Time:
|
||||
w.numBuf = v.AppendFormat(w.numBuf[:0], time.RFC3339Nano)
|
||||
return w.bytes(w.numBuf)
|
||||
case encoding.BinaryMarshaler:
|
||||
b, err := v.MarshalBinary()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return w.bytes(b)
|
||||
default:
|
||||
return fmt.Errorf(
|
||||
"redis: can't marshal %T (implement encoding.BinaryMarshaler)", v)
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Writer) bytes(b []byte) error {
|
||||
if err := w.WriteByte(StringReply); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := w.writeLen(len(b)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := w.Write(b); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return w.crlf()
|
||||
}
|
||||
|
||||
func (w *Writer) string(s string) error {
|
||||
return w.bytes(util.StringToBytes(s))
|
||||
}
|
||||
|
||||
func (w *Writer) uint(n uint64) error {
|
||||
w.numBuf = strconv.AppendUint(w.numBuf[:0], n, 10)
|
||||
return w.bytes(w.numBuf)
|
||||
}
|
||||
|
||||
func (w *Writer) int(n int64) error {
|
||||
w.numBuf = strconv.AppendInt(w.numBuf[:0], n, 10)
|
||||
return w.bytes(w.numBuf)
|
||||
}
|
||||
|
||||
func (w *Writer) float(f float64) error {
|
||||
w.numBuf = strconv.AppendFloat(w.numBuf[:0], f, 'f', -1, 64)
|
||||
return w.bytes(w.numBuf)
|
||||
}
|
||||
|
||||
func (w *Writer) crlf() error {
|
||||
if err := w.WriteByte('\r'); err != nil {
|
||||
return err
|
||||
}
|
||||
return w.WriteByte('\n')
|
||||
}
|
||||
|
|
@ -0,0 +1,50 @@
|
|||
package rand
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Int returns a non-negative pseudo-random int.
|
||||
func Int() int { return pseudo.Int() }
|
||||
|
||||
// Intn returns, as an int, a non-negative pseudo-random number in [0,n).
|
||||
// It panics if n <= 0.
|
||||
func Intn(n int) int { return pseudo.Intn(n) }
|
||||
|
||||
// Int63n returns, as an int64, a non-negative pseudo-random number in [0,n).
|
||||
// It panics if n <= 0.
|
||||
func Int63n(n int64) int64 { return pseudo.Int63n(n) }
|
||||
|
||||
// Perm returns, as a slice of n ints, a pseudo-random permutation of the integers [0,n).
|
||||
func Perm(n int) []int { return pseudo.Perm(n) }
|
||||
|
||||
// Seed uses the provided seed value to initialize the default Source to a
|
||||
// deterministic state. If Seed is not called, the generator behaves as if
|
||||
// seeded by Seed(1).
|
||||
func Seed(n int64) { pseudo.Seed(n) }
|
||||
|
||||
var pseudo = rand.New(&source{src: rand.NewSource(1)})
|
||||
|
||||
type source struct {
|
||||
src rand.Source
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
func (s *source) Int63() int64 {
|
||||
s.mu.Lock()
|
||||
n := s.src.Int63()
|
||||
s.mu.Unlock()
|
||||
return n
|
||||
}
|
||||
|
||||
func (s *source) Seed(seed int64) {
|
||||
s.mu.Lock()
|
||||
s.src.Seed(seed)
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
// Shuffle pseudo-randomizes the order of elements.
|
||||
// n is the number of elements.
|
||||
// swap swaps the elements with indexes i and j.
|
||||
func Shuffle(n int, swap func(i, j int)) { pseudo.Shuffle(n, swap) }
|
||||
|
|
@ -0,0 +1,12 @@
|
|||
//go:build appengine
|
||||
// +build appengine
|
||||
|
||||
package internal
|
||||
|
||||
func String(b []byte) string {
|
||||
return string(b)
|
||||
}
|
||||
|
||||
func Bytes(s string) []byte {
|
||||
return []byte(s)
|
||||
}
|
||||
|
|
@ -0,0 +1,21 @@
|
|||
//go:build !appengine
|
||||
// +build !appengine
|
||||
|
||||
package internal
|
||||
|
||||
import "unsafe"
|
||||
|
||||
// String converts byte slice to string.
|
||||
func String(b []byte) string {
|
||||
return *(*string)(unsafe.Pointer(&b))
|
||||
}
|
||||
|
||||
// Bytes converts string to byte slice.
|
||||
func Bytes(s string) []byte {
|
||||
return *(*[]byte)(unsafe.Pointer(
|
||||
&struct {
|
||||
string
|
||||
Cap int
|
||||
}{s, len(s)},
|
||||
))
|
||||
}
|
||||
|
|
@ -0,0 +1,46 @@
|
|||
package internal
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/go-redis/redis/v8/internal/util"
|
||||
)
|
||||
|
||||
func Sleep(ctx context.Context, dur time.Duration) error {
|
||||
t := time.NewTimer(dur)
|
||||
defer t.Stop()
|
||||
|
||||
select {
|
||||
case <-t.C:
|
||||
return nil
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
func ToLower(s string) string {
|
||||
if isLower(s) {
|
||||
return s
|
||||
}
|
||||
|
||||
b := make([]byte, len(s))
|
||||
for i := range b {
|
||||
c := s[i]
|
||||
if c >= 'A' && c <= 'Z' {
|
||||
c += 'a' - 'A'
|
||||
}
|
||||
b[i] = c
|
||||
}
|
||||
return util.BytesToString(b)
|
||||
}
|
||||
|
||||
func isLower(s string) bool {
|
||||
for i := 0; i < len(s); i++ {
|
||||
c := s[i]
|
||||
if c >= 'A' && c <= 'Z' {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
|
@ -0,0 +1,12 @@
|
|||
//go:build appengine
|
||||
// +build appengine
|
||||
|
||||
package util
|
||||
|
||||
func BytesToString(b []byte) string {
|
||||
return string(b)
|
||||
}
|
||||
|
||||
func StringToBytes(s string) []byte {
|
||||
return []byte(s)
|
||||
}
|
||||
|
|
@ -0,0 +1,19 @@
|
|||
package util
|
||||
|
||||
import "strconv"
|
||||
|
||||
func Atoi(b []byte) (int, error) {
|
||||
return strconv.Atoi(BytesToString(b))
|
||||
}
|
||||
|
||||
func ParseInt(b []byte, base int, bitSize int) (int64, error) {
|
||||
return strconv.ParseInt(BytesToString(b), base, bitSize)
|
||||
}
|
||||
|
||||
func ParseUint(b []byte, base int, bitSize int) (uint64, error) {
|
||||
return strconv.ParseUint(BytesToString(b), base, bitSize)
|
||||
}
|
||||
|
||||
func ParseFloat(b []byte, bitSize int) (float64, error) {
|
||||
return strconv.ParseFloat(BytesToString(b), bitSize)
|
||||
}
|
||||
|
|
@ -0,0 +1,23 @@
|
|||
//go:build !appengine
|
||||
// +build !appengine
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// BytesToString converts byte slice to string.
|
||||
func BytesToString(b []byte) string {
|
||||
return *(*string)(unsafe.Pointer(&b))
|
||||
}
|
||||
|
||||
// StringToBytes converts string to byte slice.
|
||||
func StringToBytes(s string) []byte {
|
||||
return *(*[]byte)(unsafe.Pointer(
|
||||
&struct {
|
||||
string
|
||||
Cap int
|
||||
}{s, len(s)},
|
||||
))
|
||||
}
|
||||
|
|
@ -0,0 +1,77 @@
|
|||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// ScanIterator is used to incrementally iterate over a collection of elements.
|
||||
// It's safe for concurrent use by multiple goroutines.
|
||||
type ScanIterator struct {
|
||||
mu sync.Mutex // protects Scanner and pos
|
||||
cmd *ScanCmd
|
||||
pos int
|
||||
}
|
||||
|
||||
// Err returns the last iterator error, if any.
|
||||
func (it *ScanIterator) Err() error {
|
||||
it.mu.Lock()
|
||||
err := it.cmd.Err()
|
||||
it.mu.Unlock()
|
||||
return err
|
||||
}
|
||||
|
||||
// Next advances the cursor and returns true if more values can be read.
|
||||
func (it *ScanIterator) Next(ctx context.Context) bool {
|
||||
it.mu.Lock()
|
||||
defer it.mu.Unlock()
|
||||
|
||||
// Instantly return on errors.
|
||||
if it.cmd.Err() != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// Advance cursor, check if we are still within range.
|
||||
if it.pos < len(it.cmd.page) {
|
||||
it.pos++
|
||||
return true
|
||||
}
|
||||
|
||||
for {
|
||||
// Return if there is no more data to fetch.
|
||||
if it.cmd.cursor == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
// Fetch next page.
|
||||
switch it.cmd.args[0] {
|
||||
case "scan", "qscan":
|
||||
it.cmd.args[1] = it.cmd.cursor
|
||||
default:
|
||||
it.cmd.args[2] = it.cmd.cursor
|
||||
}
|
||||
|
||||
err := it.cmd.process(ctx, it.cmd)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
it.pos = 1
|
||||
|
||||
// Redis can occasionally return empty page.
|
||||
if len(it.cmd.page) > 0 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Val returns the key/field at the current cursor position.
|
||||
func (it *ScanIterator) Val() string {
|
||||
var v string
|
||||
it.mu.Lock()
|
||||
if it.cmd.Err() == nil && it.pos > 0 && it.pos <= len(it.cmd.page) {
|
||||
v = it.cmd.page[it.pos-1]
|
||||
}
|
||||
it.mu.Unlock()
|
||||
return v
|
||||
}
|
||||
|
|
@ -0,0 +1,429 @@
|
|||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-redis/redis/v8/internal/pool"
|
||||
)
|
||||
|
||||
// Limiter is the interface of a rate limiter or a circuit breaker.
|
||||
type Limiter interface {
|
||||
// Allow returns nil if operation is allowed or an error otherwise.
|
||||
// If operation is allowed client must ReportResult of the operation
|
||||
// whether it is a success or a failure.
|
||||
Allow() error
|
||||
// ReportResult reports the result of the previously allowed operation.
|
||||
// nil indicates a success, non-nil error usually indicates a failure.
|
||||
ReportResult(result error)
|
||||
}
|
||||
|
||||
// Options keeps the settings to setup redis connection.
|
||||
type Options struct {
|
||||
// The network type, either tcp or unix.
|
||||
// Default is tcp.
|
||||
Network string
|
||||
// host:port address.
|
||||
Addr string
|
||||
|
||||
// Dialer creates new network connection and has priority over
|
||||
// Network and Addr options.
|
||||
Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
|
||||
|
||||
// Hook that is called when new connection is established.
|
||||
OnConnect func(ctx context.Context, cn *Conn) error
|
||||
|
||||
// Use the specified Username to authenticate the current connection
|
||||
// with one of the connections defined in the ACL list when connecting
|
||||
// to a Redis 6.0 instance, or greater, that is using the Redis ACL system.
|
||||
Username string
|
||||
// Optional password. Must match the password specified in the
|
||||
// requirepass server configuration option (if connecting to a Redis 5.0 instance, or lower),
|
||||
// or the User Password when connecting to a Redis 6.0 instance, or greater,
|
||||
// that is using the Redis ACL system.
|
||||
Password string
|
||||
|
||||
// Database to be selected after connecting to the server.
|
||||
DB int
|
||||
|
||||
// Maximum number of retries before giving up.
|
||||
// Default is 3 retries; -1 (not 0) disables retries.
|
||||
MaxRetries int
|
||||
// Minimum backoff between each retry.
|
||||
// Default is 8 milliseconds; -1 disables backoff.
|
||||
MinRetryBackoff time.Duration
|
||||
// Maximum backoff between each retry.
|
||||
// Default is 512 milliseconds; -1 disables backoff.
|
||||
MaxRetryBackoff time.Duration
|
||||
|
||||
// Dial timeout for establishing new connections.
|
||||
// Default is 5 seconds.
|
||||
DialTimeout time.Duration
|
||||
// Timeout for socket reads. If reached, commands will fail
|
||||
// with a timeout instead of blocking. Use value -1 for no timeout and 0 for default.
|
||||
// Default is 3 seconds.
|
||||
ReadTimeout time.Duration
|
||||
// Timeout for socket writes. If reached, commands will fail
|
||||
// with a timeout instead of blocking.
|
||||
// Default is ReadTimeout.
|
||||
WriteTimeout time.Duration
|
||||
|
||||
// Type of connection pool.
|
||||
// true for FIFO pool, false for LIFO pool.
|
||||
// Note that fifo has higher overhead compared to lifo.
|
||||
PoolFIFO bool
|
||||
// Maximum number of socket connections.
|
||||
// Default is 10 connections per every available CPU as reported by runtime.GOMAXPROCS.
|
||||
PoolSize int
|
||||
// Minimum number of idle connections which is useful when establishing
|
||||
// new connection is slow.
|
||||
MinIdleConns int
|
||||
// Connection age at which client retires (closes) the connection.
|
||||
// Default is to not close aged connections.
|
||||
MaxConnAge time.Duration
|
||||
// Amount of time client waits for connection if all connections
|
||||
// are busy before returning an error.
|
||||
// Default is ReadTimeout + 1 second.
|
||||
PoolTimeout time.Duration
|
||||
// Amount of time after which client closes idle connections.
|
||||
// Should be less than server's timeout.
|
||||
// Default is 5 minutes. -1 disables idle timeout check.
|
||||
IdleTimeout time.Duration
|
||||
// Frequency of idle checks made by idle connections reaper.
|
||||
// Default is 1 minute. -1 disables idle connections reaper,
|
||||
// but idle connections are still discarded by the client
|
||||
// if IdleTimeout is set.
|
||||
IdleCheckFrequency time.Duration
|
||||
|
||||
// Enables read only queries on slave nodes.
|
||||
readOnly bool
|
||||
|
||||
// TLS Config to use. When set TLS will be negotiated.
|
||||
TLSConfig *tls.Config
|
||||
|
||||
// Limiter interface used to implemented circuit breaker or rate limiter.
|
||||
Limiter Limiter
|
||||
}
|
||||
|
||||
func (opt *Options) init() {
|
||||
if opt.Addr == "" {
|
||||
opt.Addr = "localhost:6379"
|
||||
}
|
||||
if opt.Network == "" {
|
||||
if strings.HasPrefix(opt.Addr, "/") {
|
||||
opt.Network = "unix"
|
||||
} else {
|
||||
opt.Network = "tcp"
|
||||
}
|
||||
}
|
||||
if opt.DialTimeout == 0 {
|
||||
opt.DialTimeout = 5 * time.Second
|
||||
}
|
||||
if opt.Dialer == nil {
|
||||
opt.Dialer = func(ctx context.Context, network, addr string) (net.Conn, error) {
|
||||
netDialer := &net.Dialer{
|
||||
Timeout: opt.DialTimeout,
|
||||
KeepAlive: 5 * time.Minute,
|
||||
}
|
||||
if opt.TLSConfig == nil {
|
||||
return netDialer.DialContext(ctx, network, addr)
|
||||
}
|
||||
return tls.DialWithDialer(netDialer, network, addr, opt.TLSConfig)
|
||||
}
|
||||
}
|
||||
if opt.PoolSize == 0 {
|
||||
opt.PoolSize = 10 * runtime.GOMAXPROCS(0)
|
||||
}
|
||||
switch opt.ReadTimeout {
|
||||
case -1:
|
||||
opt.ReadTimeout = 0
|
||||
case 0:
|
||||
opt.ReadTimeout = 3 * time.Second
|
||||
}
|
||||
switch opt.WriteTimeout {
|
||||
case -1:
|
||||
opt.WriteTimeout = 0
|
||||
case 0:
|
||||
opt.WriteTimeout = opt.ReadTimeout
|
||||
}
|
||||
if opt.PoolTimeout == 0 {
|
||||
opt.PoolTimeout = opt.ReadTimeout + time.Second
|
||||
}
|
||||
if opt.IdleTimeout == 0 {
|
||||
opt.IdleTimeout = 5 * time.Minute
|
||||
}
|
||||
if opt.IdleCheckFrequency == 0 {
|
||||
opt.IdleCheckFrequency = time.Minute
|
||||
}
|
||||
|
||||
if opt.MaxRetries == -1 {
|
||||
opt.MaxRetries = 0
|
||||
} else if opt.MaxRetries == 0 {
|
||||
opt.MaxRetries = 3
|
||||
}
|
||||
switch opt.MinRetryBackoff {
|
||||
case -1:
|
||||
opt.MinRetryBackoff = 0
|
||||
case 0:
|
||||
opt.MinRetryBackoff = 8 * time.Millisecond
|
||||
}
|
||||
switch opt.MaxRetryBackoff {
|
||||
case -1:
|
||||
opt.MaxRetryBackoff = 0
|
||||
case 0:
|
||||
opt.MaxRetryBackoff = 512 * time.Millisecond
|
||||
}
|
||||
}
|
||||
|
||||
func (opt *Options) clone() *Options {
|
||||
clone := *opt
|
||||
return &clone
|
||||
}
|
||||
|
||||
// ParseURL parses an URL into Options that can be used to connect to Redis.
|
||||
// Scheme is required.
|
||||
// There are two connection types: by tcp socket and by unix socket.
|
||||
// Tcp connection:
|
||||
// redis://<user>:<password>@<host>:<port>/<db_number>
|
||||
// Unix connection:
|
||||
// unix://<user>:<password>@</path/to/redis.sock>?db=<db_number>
|
||||
// Most Option fields can be set using query parameters, with the following restrictions:
|
||||
// - field names are mapped using snake-case conversion: to set MaxRetries, use max_retries
|
||||
// - only scalar type fields are supported (bool, int, time.Duration)
|
||||
// - for time.Duration fields, values must be a valid input for time.ParseDuration();
|
||||
// additionally a plain integer as value (i.e. without unit) is intepreted as seconds
|
||||
// - to disable a duration field, use value less than or equal to 0; to use the default
|
||||
// value, leave the value blank or remove the parameter
|
||||
// - only the last value is interpreted if a parameter is given multiple times
|
||||
// - fields "network", "addr", "username" and "password" can only be set using other
|
||||
// URL attributes (scheme, host, userinfo, resp.), query paremeters using these
|
||||
// names will be treated as unknown parameters
|
||||
// - unknown parameter names will result in an error
|
||||
// Examples:
|
||||
// redis://user:password@localhost:6789/3?dial_timeout=3&db=1&read_timeout=6s&max_retries=2
|
||||
// is equivalent to:
|
||||
// &Options{
|
||||
// Network: "tcp",
|
||||
// Addr: "localhost:6789",
|
||||
// DB: 1, // path "/3" was overridden by "&db=1"
|
||||
// DialTimeout: 3 * time.Second, // no time unit = seconds
|
||||
// ReadTimeout: 6 * time.Second,
|
||||
// MaxRetries: 2,
|
||||
// }
|
||||
func ParseURL(redisURL string) (*Options, error) {
|
||||
u, err := url.Parse(redisURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch u.Scheme {
|
||||
case "redis", "rediss":
|
||||
return setupTCPConn(u)
|
||||
case "unix":
|
||||
return setupUnixConn(u)
|
||||
default:
|
||||
return nil, fmt.Errorf("redis: invalid URL scheme: %s", u.Scheme)
|
||||
}
|
||||
}
|
||||
|
||||
func setupTCPConn(u *url.URL) (*Options, error) {
|
||||
o := &Options{Network: "tcp"}
|
||||
|
||||
o.Username, o.Password = getUserPassword(u)
|
||||
|
||||
h, p, err := net.SplitHostPort(u.Host)
|
||||
if err != nil {
|
||||
h = u.Host
|
||||
}
|
||||
if h == "" {
|
||||
h = "localhost"
|
||||
}
|
||||
if p == "" {
|
||||
p = "6379"
|
||||
}
|
||||
o.Addr = net.JoinHostPort(h, p)
|
||||
|
||||
f := strings.FieldsFunc(u.Path, func(r rune) bool {
|
||||
return r == '/'
|
||||
})
|
||||
switch len(f) {
|
||||
case 0:
|
||||
o.DB = 0
|
||||
case 1:
|
||||
if o.DB, err = strconv.Atoi(f[0]); err != nil {
|
||||
return nil, fmt.Errorf("redis: invalid database number: %q", f[0])
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("redis: invalid URL path: %s", u.Path)
|
||||
}
|
||||
|
||||
if u.Scheme == "rediss" {
|
||||
o.TLSConfig = &tls.Config{ServerName: h}
|
||||
}
|
||||
|
||||
return setupConnParams(u, o)
|
||||
}
|
||||
|
||||
func setupUnixConn(u *url.URL) (*Options, error) {
|
||||
o := &Options{
|
||||
Network: "unix",
|
||||
}
|
||||
|
||||
if strings.TrimSpace(u.Path) == "" { // path is required with unix connection
|
||||
return nil, errors.New("redis: empty unix socket path")
|
||||
}
|
||||
o.Addr = u.Path
|
||||
o.Username, o.Password = getUserPassword(u)
|
||||
return setupConnParams(u, o)
|
||||
}
|
||||
|
||||
type queryOptions struct {
|
||||
q url.Values
|
||||
err error
|
||||
}
|
||||
|
||||
func (o *queryOptions) string(name string) string {
|
||||
vs := o.q[name]
|
||||
if len(vs) == 0 {
|
||||
return ""
|
||||
}
|
||||
delete(o.q, name) // enable detection of unknown parameters
|
||||
return vs[len(vs)-1]
|
||||
}
|
||||
|
||||
func (o *queryOptions) int(name string) int {
|
||||
s := o.string(name)
|
||||
if s == "" {
|
||||
return 0
|
||||
}
|
||||
i, err := strconv.Atoi(s)
|
||||
if err == nil {
|
||||
return i
|
||||
}
|
||||
if o.err == nil {
|
||||
o.err = fmt.Errorf("redis: invalid %s number: %s", name, err)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (o *queryOptions) duration(name string) time.Duration {
|
||||
s := o.string(name)
|
||||
if s == "" {
|
||||
return 0
|
||||
}
|
||||
// try plain number first
|
||||
if i, err := strconv.Atoi(s); err == nil {
|
||||
if i <= 0 {
|
||||
// disable timeouts
|
||||
return -1
|
||||
}
|
||||
return time.Duration(i) * time.Second
|
||||
}
|
||||
dur, err := time.ParseDuration(s)
|
||||
if err == nil {
|
||||
return dur
|
||||
}
|
||||
if o.err == nil {
|
||||
o.err = fmt.Errorf("redis: invalid %s duration: %w", name, err)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (o *queryOptions) bool(name string) bool {
|
||||
switch s := o.string(name); s {
|
||||
case "true", "1":
|
||||
return true
|
||||
case "false", "0", "":
|
||||
return false
|
||||
default:
|
||||
if o.err == nil {
|
||||
o.err = fmt.Errorf("redis: invalid %s boolean: expected true/false/1/0 or an empty string, got %q", name, s)
|
||||
}
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (o *queryOptions) remaining() []string {
|
||||
if len(o.q) == 0 {
|
||||
return nil
|
||||
}
|
||||
keys := make([]string, 0, len(o.q))
|
||||
for k := range o.q {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
return keys
|
||||
}
|
||||
|
||||
// setupConnParams converts query parameters in u to option value in o.
|
||||
func setupConnParams(u *url.URL, o *Options) (*Options, error) {
|
||||
q := queryOptions{q: u.Query()}
|
||||
|
||||
// compat: a future major release may use q.int("db")
|
||||
if tmp := q.string("db"); tmp != "" {
|
||||
db, err := strconv.Atoi(tmp)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("redis: invalid database number: %w", err)
|
||||
}
|
||||
o.DB = db
|
||||
}
|
||||
|
||||
o.MaxRetries = q.int("max_retries")
|
||||
o.MinRetryBackoff = q.duration("min_retry_backoff")
|
||||
o.MaxRetryBackoff = q.duration("max_retry_backoff")
|
||||
o.DialTimeout = q.duration("dial_timeout")
|
||||
o.ReadTimeout = q.duration("read_timeout")
|
||||
o.WriteTimeout = q.duration("write_timeout")
|
||||
o.PoolFIFO = q.bool("pool_fifo")
|
||||
o.PoolSize = q.int("pool_size")
|
||||
o.MinIdleConns = q.int("min_idle_conns")
|
||||
o.MaxConnAge = q.duration("max_conn_age")
|
||||
o.PoolTimeout = q.duration("pool_timeout")
|
||||
o.IdleTimeout = q.duration("idle_timeout")
|
||||
o.IdleCheckFrequency = q.duration("idle_check_frequency")
|
||||
if q.err != nil {
|
||||
return nil, q.err
|
||||
}
|
||||
|
||||
// any parameters left?
|
||||
if r := q.remaining(); len(r) > 0 {
|
||||
return nil, fmt.Errorf("redis: unexpected option: %s", strings.Join(r, ", "))
|
||||
}
|
||||
|
||||
return o, nil
|
||||
}
|
||||
|
||||
func getUserPassword(u *url.URL) (string, string) {
|
||||
var user, password string
|
||||
if u.User != nil {
|
||||
user = u.User.Username()
|
||||
if p, ok := u.User.Password(); ok {
|
||||
password = p
|
||||
}
|
||||
}
|
||||
return user, password
|
||||
}
|
||||
|
||||
func newConnPool(opt *Options) *pool.ConnPool {
|
||||
return pool.NewConnPool(&pool.Options{
|
||||
Dialer: func(ctx context.Context) (net.Conn, error) {
|
||||
return opt.Dialer(ctx, opt.Network, opt.Addr)
|
||||
},
|
||||
PoolFIFO: opt.PoolFIFO,
|
||||
PoolSize: opt.PoolSize,
|
||||
MinIdleConns: opt.MinIdleConns,
|
||||
MaxConnAge: opt.MaxConnAge,
|
||||
PoolTimeout: opt.PoolTimeout,
|
||||
IdleTimeout: opt.IdleTimeout,
|
||||
IdleCheckFrequency: opt.IdleCheckFrequency,
|
||||
})
|
||||
}
|
||||
|
|
@ -0,0 +1,8 @@
|
|||
{
|
||||
"name": "redis",
|
||||
"version": "8.11.4",
|
||||
"main": "index.js",
|
||||
"repository": "git@github.com:go-redis/redis.git",
|
||||
"author": "Vladimir Mihailenco <vladimir.webdev@gmail.com>",
|
||||
"license": "BSD-2-clause"
|
||||
}
|
||||
|
|
@ -0,0 +1,137 @@
|
|||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/go-redis/redis/v8/internal/pool"
|
||||
)
|
||||
|
||||
type pipelineExecer func(context.Context, []Cmder) error
|
||||
|
||||
// Pipeliner is an mechanism to realise Redis Pipeline technique.
|
||||
//
|
||||
// Pipelining is a technique to extremely speed up processing by packing
|
||||
// operations to batches, send them at once to Redis and read a replies in a
|
||||
// singe step.
|
||||
// See https://redis.io/topics/pipelining
|
||||
//
|
||||
// Pay attention, that Pipeline is not a transaction, so you can get unexpected
|
||||
// results in case of big pipelines and small read/write timeouts.
|
||||
// Redis client has retransmission logic in case of timeouts, pipeline
|
||||
// can be retransmitted and commands can be executed more then once.
|
||||
// To avoid this: it is good idea to use reasonable bigger read/write timeouts
|
||||
// depends of your batch size and/or use TxPipeline.
|
||||
type Pipeliner interface {
|
||||
StatefulCmdable
|
||||
Do(ctx context.Context, args ...interface{}) *Cmd
|
||||
Process(ctx context.Context, cmd Cmder) error
|
||||
Close() error
|
||||
Discard() error
|
||||
Exec(ctx context.Context) ([]Cmder, error)
|
||||
}
|
||||
|
||||
var _ Pipeliner = (*Pipeline)(nil)
|
||||
|
||||
// Pipeline implements pipelining as described in
|
||||
// http://redis.io/topics/pipelining. It's safe for concurrent use
|
||||
// by multiple goroutines.
|
||||
type Pipeline struct {
|
||||
cmdable
|
||||
statefulCmdable
|
||||
|
||||
ctx context.Context
|
||||
exec pipelineExecer
|
||||
|
||||
mu sync.Mutex
|
||||
cmds []Cmder
|
||||
closed bool
|
||||
}
|
||||
|
||||
func (c *Pipeline) init() {
|
||||
c.cmdable = c.Process
|
||||
c.statefulCmdable = c.Process
|
||||
}
|
||||
|
||||
func (c *Pipeline) Do(ctx context.Context, args ...interface{}) *Cmd {
|
||||
cmd := NewCmd(ctx, args...)
|
||||
_ = c.Process(ctx, cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
// Process queues the cmd for later execution.
|
||||
func (c *Pipeline) Process(ctx context.Context, cmd Cmder) error {
|
||||
c.mu.Lock()
|
||||
c.cmds = append(c.cmds, cmd)
|
||||
c.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close closes the pipeline, releasing any open resources.
|
||||
func (c *Pipeline) Close() error {
|
||||
c.mu.Lock()
|
||||
_ = c.discard()
|
||||
c.closed = true
|
||||
c.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Discard resets the pipeline and discards queued commands.
|
||||
func (c *Pipeline) Discard() error {
|
||||
c.mu.Lock()
|
||||
err := c.discard()
|
||||
c.mu.Unlock()
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *Pipeline) discard() error {
|
||||
if c.closed {
|
||||
return pool.ErrClosed
|
||||
}
|
||||
c.cmds = c.cmds[:0]
|
||||
return nil
|
||||
}
|
||||
|
||||
// Exec executes all previously queued commands using one
|
||||
// client-server roundtrip.
|
||||
//
|
||||
// Exec always returns list of commands and error of the first failed
|
||||
// command if any.
|
||||
func (c *Pipeline) Exec(ctx context.Context) ([]Cmder, error) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
if c.closed {
|
||||
return nil, pool.ErrClosed
|
||||
}
|
||||
|
||||
if len(c.cmds) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
cmds := c.cmds
|
||||
c.cmds = nil
|
||||
|
||||
return cmds, c.exec(ctx, cmds)
|
||||
}
|
||||
|
||||
func (c *Pipeline) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
|
||||
if err := fn(c); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cmds, err := c.Exec(ctx)
|
||||
_ = c.Close()
|
||||
return cmds, err
|
||||
}
|
||||
|
||||
func (c *Pipeline) Pipeline() Pipeliner {
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Pipeline) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
|
||||
return c.Pipelined(ctx, fn)
|
||||
}
|
||||
|
||||
func (c *Pipeline) TxPipeline() Pipeliner {
|
||||
return c
|
||||
}
|
||||
|
|
@ -0,0 +1,668 @@
|
|||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/go-redis/redis/v8/internal"
|
||||
"github.com/go-redis/redis/v8/internal/pool"
|
||||
"github.com/go-redis/redis/v8/internal/proto"
|
||||
)
|
||||
|
||||
// PubSub implements Pub/Sub commands as described in
|
||||
// http://redis.io/topics/pubsub. Message receiving is NOT safe
|
||||
// for concurrent use by multiple goroutines.
|
||||
//
|
||||
// PubSub automatically reconnects to Redis Server and resubscribes
|
||||
// to the channels in case of network errors.
|
||||
type PubSub struct {
|
||||
opt *Options
|
||||
|
||||
newConn func(ctx context.Context, channels []string) (*pool.Conn, error)
|
||||
closeConn func(*pool.Conn) error
|
||||
|
||||
mu sync.Mutex
|
||||
cn *pool.Conn
|
||||
channels map[string]struct{}
|
||||
patterns map[string]struct{}
|
||||
|
||||
closed bool
|
||||
exit chan struct{}
|
||||
|
||||
cmd *Cmd
|
||||
|
||||
chOnce sync.Once
|
||||
msgCh *channel
|
||||
allCh *channel
|
||||
}
|
||||
|
||||
func (c *PubSub) init() {
|
||||
c.exit = make(chan struct{})
|
||||
}
|
||||
|
||||
func (c *PubSub) String() string {
|
||||
channels := mapKeys(c.channels)
|
||||
channels = append(channels, mapKeys(c.patterns)...)
|
||||
return fmt.Sprintf("PubSub(%s)", strings.Join(channels, ", "))
|
||||
}
|
||||
|
||||
func (c *PubSub) connWithLock(ctx context.Context) (*pool.Conn, error) {
|
||||
c.mu.Lock()
|
||||
cn, err := c.conn(ctx, nil)
|
||||
c.mu.Unlock()
|
||||
return cn, err
|
||||
}
|
||||
|
||||
func (c *PubSub) conn(ctx context.Context, newChannels []string) (*pool.Conn, error) {
|
||||
if c.closed {
|
||||
return nil, pool.ErrClosed
|
||||
}
|
||||
if c.cn != nil {
|
||||
return c.cn, nil
|
||||
}
|
||||
|
||||
channels := mapKeys(c.channels)
|
||||
channels = append(channels, newChannels...)
|
||||
|
||||
cn, err := c.newConn(ctx, channels)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := c.resubscribe(ctx, cn); err != nil {
|
||||
_ = c.closeConn(cn)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c.cn = cn
|
||||
return cn, nil
|
||||
}
|
||||
|
||||
func (c *PubSub) writeCmd(ctx context.Context, cn *pool.Conn, cmd Cmder) error {
|
||||
return cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
|
||||
return writeCmd(wr, cmd)
|
||||
})
|
||||
}
|
||||
|
||||
func (c *PubSub) resubscribe(ctx context.Context, cn *pool.Conn) error {
|
||||
var firstErr error
|
||||
|
||||
if len(c.channels) > 0 {
|
||||
firstErr = c._subscribe(ctx, cn, "subscribe", mapKeys(c.channels))
|
||||
}
|
||||
|
||||
if len(c.patterns) > 0 {
|
||||
err := c._subscribe(ctx, cn, "psubscribe", mapKeys(c.patterns))
|
||||
if err != nil && firstErr == nil {
|
||||
firstErr = err
|
||||
}
|
||||
}
|
||||
|
||||
return firstErr
|
||||
}
|
||||
|
||||
func mapKeys(m map[string]struct{}) []string {
|
||||
s := make([]string, len(m))
|
||||
i := 0
|
||||
for k := range m {
|
||||
s[i] = k
|
||||
i++
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (c *PubSub) _subscribe(
|
||||
ctx context.Context, cn *pool.Conn, redisCmd string, channels []string,
|
||||
) error {
|
||||
args := make([]interface{}, 0, 1+len(channels))
|
||||
args = append(args, redisCmd)
|
||||
for _, channel := range channels {
|
||||
args = append(args, channel)
|
||||
}
|
||||
cmd := NewSliceCmd(ctx, args...)
|
||||
return c.writeCmd(ctx, cn, cmd)
|
||||
}
|
||||
|
||||
func (c *PubSub) releaseConnWithLock(
|
||||
ctx context.Context,
|
||||
cn *pool.Conn,
|
||||
err error,
|
||||
allowTimeout bool,
|
||||
) {
|
||||
c.mu.Lock()
|
||||
c.releaseConn(ctx, cn, err, allowTimeout)
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
func (c *PubSub) releaseConn(ctx context.Context, cn *pool.Conn, err error, allowTimeout bool) {
|
||||
if c.cn != cn {
|
||||
return
|
||||
}
|
||||
if isBadConn(err, allowTimeout, c.opt.Addr) {
|
||||
c.reconnect(ctx, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *PubSub) reconnect(ctx context.Context, reason error) {
|
||||
_ = c.closeTheCn(reason)
|
||||
_, _ = c.conn(ctx, nil)
|
||||
}
|
||||
|
||||
func (c *PubSub) closeTheCn(reason error) error {
|
||||
if c.cn == nil {
|
||||
return nil
|
||||
}
|
||||
if !c.closed {
|
||||
internal.Logger.Printf(c.getContext(), "redis: discarding bad PubSub connection: %s", reason)
|
||||
}
|
||||
err := c.closeConn(c.cn)
|
||||
c.cn = nil
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *PubSub) Close() error {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
if c.closed {
|
||||
return pool.ErrClosed
|
||||
}
|
||||
c.closed = true
|
||||
close(c.exit)
|
||||
|
||||
return c.closeTheCn(pool.ErrClosed)
|
||||
}
|
||||
|
||||
// Subscribe the client to the specified channels. It returns
|
||||
// empty subscription if there are no channels.
|
||||
func (c *PubSub) Subscribe(ctx context.Context, channels ...string) error {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
err := c.subscribe(ctx, "subscribe", channels...)
|
||||
if c.channels == nil {
|
||||
c.channels = make(map[string]struct{})
|
||||
}
|
||||
for _, s := range channels {
|
||||
c.channels[s] = struct{}{}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// PSubscribe the client to the given patterns. It returns
|
||||
// empty subscription if there are no patterns.
|
||||
func (c *PubSub) PSubscribe(ctx context.Context, patterns ...string) error {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
err := c.subscribe(ctx, "psubscribe", patterns...)
|
||||
if c.patterns == nil {
|
||||
c.patterns = make(map[string]struct{})
|
||||
}
|
||||
for _, s := range patterns {
|
||||
c.patterns[s] = struct{}{}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Unsubscribe the client from the given channels, or from all of
|
||||
// them if none is given.
|
||||
func (c *PubSub) Unsubscribe(ctx context.Context, channels ...string) error {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
for _, channel := range channels {
|
||||
delete(c.channels, channel)
|
||||
}
|
||||
err := c.subscribe(ctx, "unsubscribe", channels...)
|
||||
return err
|
||||
}
|
||||
|
||||
// PUnsubscribe the client from the given patterns, or from all of
|
||||
// them if none is given.
|
||||
func (c *PubSub) PUnsubscribe(ctx context.Context, patterns ...string) error {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
for _, pattern := range patterns {
|
||||
delete(c.patterns, pattern)
|
||||
}
|
||||
err := c.subscribe(ctx, "punsubscribe", patterns...)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *PubSub) subscribe(ctx context.Context, redisCmd string, channels ...string) error {
|
||||
cn, err := c.conn(ctx, channels)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = c._subscribe(ctx, cn, redisCmd, channels)
|
||||
c.releaseConn(ctx, cn, err, false)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *PubSub) Ping(ctx context.Context, payload ...string) error {
|
||||
args := []interface{}{"ping"}
|
||||
if len(payload) == 1 {
|
||||
args = append(args, payload[0])
|
||||
}
|
||||
cmd := NewCmd(ctx, args...)
|
||||
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
cn, err := c.conn(ctx, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = c.writeCmd(ctx, cn, cmd)
|
||||
c.releaseConn(ctx, cn, err, false)
|
||||
return err
|
||||
}
|
||||
|
||||
// Subscription received after a successful subscription to channel.
|
||||
type Subscription struct {
|
||||
// Can be "subscribe", "unsubscribe", "psubscribe" or "punsubscribe".
|
||||
Kind string
|
||||
// Channel name we have subscribed to.
|
||||
Channel string
|
||||
// Number of channels we are currently subscribed to.
|
||||
Count int
|
||||
}
|
||||
|
||||
func (m *Subscription) String() string {
|
||||
return fmt.Sprintf("%s: %s", m.Kind, m.Channel)
|
||||
}
|
||||
|
||||
// Message received as result of a PUBLISH command issued by another client.
|
||||
type Message struct {
|
||||
Channel string
|
||||
Pattern string
|
||||
Payload string
|
||||
PayloadSlice []string
|
||||
}
|
||||
|
||||
func (m *Message) String() string {
|
||||
return fmt.Sprintf("Message<%s: %s>", m.Channel, m.Payload)
|
||||
}
|
||||
|
||||
// Pong received as result of a PING command issued by another client.
|
||||
type Pong struct {
|
||||
Payload string
|
||||
}
|
||||
|
||||
func (p *Pong) String() string {
|
||||
if p.Payload != "" {
|
||||
return fmt.Sprintf("Pong<%s>", p.Payload)
|
||||
}
|
||||
return "Pong"
|
||||
}
|
||||
|
||||
func (c *PubSub) newMessage(reply interface{}) (interface{}, error) {
|
||||
switch reply := reply.(type) {
|
||||
case string:
|
||||
return &Pong{
|
||||
Payload: reply,
|
||||
}, nil
|
||||
case []interface{}:
|
||||
switch kind := reply[0].(string); kind {
|
||||
case "subscribe", "unsubscribe", "psubscribe", "punsubscribe":
|
||||
// Can be nil in case of "unsubscribe".
|
||||
channel, _ := reply[1].(string)
|
||||
return &Subscription{
|
||||
Kind: kind,
|
||||
Channel: channel,
|
||||
Count: int(reply[2].(int64)),
|
||||
}, nil
|
||||
case "message":
|
||||
switch payload := reply[2].(type) {
|
||||
case string:
|
||||
return &Message{
|
||||
Channel: reply[1].(string),
|
||||
Payload: payload,
|
||||
}, nil
|
||||
case []interface{}:
|
||||
ss := make([]string, len(payload))
|
||||
for i, s := range payload {
|
||||
ss[i] = s.(string)
|
||||
}
|
||||
return &Message{
|
||||
Channel: reply[1].(string),
|
||||
PayloadSlice: ss,
|
||||
}, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("redis: unsupported pubsub message payload: %T", payload)
|
||||
}
|
||||
case "pmessage":
|
||||
return &Message{
|
||||
Pattern: reply[1].(string),
|
||||
Channel: reply[2].(string),
|
||||
Payload: reply[3].(string),
|
||||
}, nil
|
||||
case "pong":
|
||||
return &Pong{
|
||||
Payload: reply[1].(string),
|
||||
}, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("redis: unsupported pubsub message: %q", kind)
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("redis: unsupported pubsub message: %#v", reply)
|
||||
}
|
||||
}
|
||||
|
||||
// ReceiveTimeout acts like Receive but returns an error if message
|
||||
// is not received in time. This is low-level API and in most cases
|
||||
// Channel should be used instead.
|
||||
func (c *PubSub) ReceiveTimeout(ctx context.Context, timeout time.Duration) (interface{}, error) {
|
||||
if c.cmd == nil {
|
||||
c.cmd = NewCmd(ctx)
|
||||
}
|
||||
|
||||
// Don't hold the lock to allow subscriptions and pings.
|
||||
|
||||
cn, err := c.connWithLock(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = cn.WithReader(ctx, timeout, func(rd *proto.Reader) error {
|
||||
return c.cmd.readReply(rd)
|
||||
})
|
||||
|
||||
c.releaseConnWithLock(ctx, cn, err, timeout > 0)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return c.newMessage(c.cmd.Val())
|
||||
}
|
||||
|
||||
// Receive returns a message as a Subscription, Message, Pong or error.
|
||||
// See PubSub example for details. This is low-level API and in most cases
|
||||
// Channel should be used instead.
|
||||
func (c *PubSub) Receive(ctx context.Context) (interface{}, error) {
|
||||
return c.ReceiveTimeout(ctx, 0)
|
||||
}
|
||||
|
||||
// ReceiveMessage returns a Message or error ignoring Subscription and Pong
|
||||
// messages. This is low-level API and in most cases Channel should be used
|
||||
// instead.
|
||||
func (c *PubSub) ReceiveMessage(ctx context.Context) (*Message, error) {
|
||||
for {
|
||||
msg, err := c.Receive(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch msg := msg.(type) {
|
||||
case *Subscription:
|
||||
// Ignore.
|
||||
case *Pong:
|
||||
// Ignore.
|
||||
case *Message:
|
||||
return msg, nil
|
||||
default:
|
||||
err := fmt.Errorf("redis: unknown message: %T", msg)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *PubSub) getContext() context.Context {
|
||||
if c.cmd != nil {
|
||||
return c.cmd.ctx
|
||||
}
|
||||
return context.Background()
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
// Channel returns a Go channel for concurrently receiving messages.
|
||||
// The channel is closed together with the PubSub. If the Go channel
|
||||
// is blocked full for 30 seconds the message is dropped.
|
||||
// Receive* APIs can not be used after channel is created.
|
||||
//
|
||||
// go-redis periodically sends ping messages to test connection health
|
||||
// and re-subscribes if ping can not not received for 30 seconds.
|
||||
func (c *PubSub) Channel(opts ...ChannelOption) <-chan *Message {
|
||||
c.chOnce.Do(func() {
|
||||
c.msgCh = newChannel(c, opts...)
|
||||
c.msgCh.initMsgChan()
|
||||
})
|
||||
if c.msgCh == nil {
|
||||
err := fmt.Errorf("redis: Channel can't be called after ChannelWithSubscriptions")
|
||||
panic(err)
|
||||
}
|
||||
return c.msgCh.msgCh
|
||||
}
|
||||
|
||||
// ChannelSize is like Channel, but creates a Go channel
|
||||
// with specified buffer size.
|
||||
//
|
||||
// Deprecated: use Channel(WithChannelSize(size)), remove in v9.
|
||||
func (c *PubSub) ChannelSize(size int) <-chan *Message {
|
||||
return c.Channel(WithChannelSize(size))
|
||||
}
|
||||
|
||||
// ChannelWithSubscriptions is like Channel, but message type can be either
|
||||
// *Subscription or *Message. Subscription messages can be used to detect
|
||||
// reconnections.
|
||||
//
|
||||
// ChannelWithSubscriptions can not be used together with Channel or ChannelSize.
|
||||
func (c *PubSub) ChannelWithSubscriptions(_ context.Context, size int) <-chan interface{} {
|
||||
c.chOnce.Do(func() {
|
||||
c.allCh = newChannel(c, WithChannelSize(size))
|
||||
c.allCh.initAllChan()
|
||||
})
|
||||
if c.allCh == nil {
|
||||
err := fmt.Errorf("redis: ChannelWithSubscriptions can't be called after Channel")
|
||||
panic(err)
|
||||
}
|
||||
return c.allCh.allCh
|
||||
}
|
||||
|
||||
type ChannelOption func(c *channel)
|
||||
|
||||
// WithChannelSize specifies the Go chan size that is used to buffer incoming messages.
|
||||
//
|
||||
// The default is 100 messages.
|
||||
func WithChannelSize(size int) ChannelOption {
|
||||
return func(c *channel) {
|
||||
c.chanSize = size
|
||||
}
|
||||
}
|
||||
|
||||
// WithChannelHealthCheckInterval specifies the health check interval.
|
||||
// PubSub will ping Redis Server if it does not receive any messages within the interval.
|
||||
// To disable health check, use zero interval.
|
||||
//
|
||||
// The default is 3 seconds.
|
||||
func WithChannelHealthCheckInterval(d time.Duration) ChannelOption {
|
||||
return func(c *channel) {
|
||||
c.checkInterval = d
|
||||
}
|
||||
}
|
||||
|
||||
// WithChannelSendTimeout specifies the channel send timeout after which
|
||||
// the message is dropped.
|
||||
//
|
||||
// The default is 60 seconds.
|
||||
func WithChannelSendTimeout(d time.Duration) ChannelOption {
|
||||
return func(c *channel) {
|
||||
c.chanSendTimeout = d
|
||||
}
|
||||
}
|
||||
|
||||
type channel struct {
|
||||
pubSub *PubSub
|
||||
|
||||
msgCh chan *Message
|
||||
allCh chan interface{}
|
||||
ping chan struct{}
|
||||
|
||||
chanSize int
|
||||
chanSendTimeout time.Duration
|
||||
checkInterval time.Duration
|
||||
}
|
||||
|
||||
func newChannel(pubSub *PubSub, opts ...ChannelOption) *channel {
|
||||
c := &channel{
|
||||
pubSub: pubSub,
|
||||
|
||||
chanSize: 100,
|
||||
chanSendTimeout: time.Minute,
|
||||
checkInterval: 3 * time.Second,
|
||||
}
|
||||
for _, opt := range opts {
|
||||
opt(c)
|
||||
}
|
||||
if c.checkInterval > 0 {
|
||||
c.initHealthCheck()
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *channel) initHealthCheck() {
|
||||
ctx := context.TODO()
|
||||
c.ping = make(chan struct{}, 1)
|
||||
|
||||
go func() {
|
||||
timer := time.NewTimer(time.Minute)
|
||||
timer.Stop()
|
||||
|
||||
for {
|
||||
timer.Reset(c.checkInterval)
|
||||
select {
|
||||
case <-c.ping:
|
||||
if !timer.Stop() {
|
||||
<-timer.C
|
||||
}
|
||||
case <-timer.C:
|
||||
if pingErr := c.pubSub.Ping(ctx); pingErr != nil {
|
||||
c.pubSub.mu.Lock()
|
||||
c.pubSub.reconnect(ctx, pingErr)
|
||||
c.pubSub.mu.Unlock()
|
||||
}
|
||||
case <-c.pubSub.exit:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// initMsgChan must be in sync with initAllChan.
|
||||
func (c *channel) initMsgChan() {
|
||||
ctx := context.TODO()
|
||||
c.msgCh = make(chan *Message, c.chanSize)
|
||||
|
||||
go func() {
|
||||
timer := time.NewTimer(time.Minute)
|
||||
timer.Stop()
|
||||
|
||||
var errCount int
|
||||
for {
|
||||
msg, err := c.pubSub.Receive(ctx)
|
||||
if err != nil {
|
||||
if err == pool.ErrClosed {
|
||||
close(c.msgCh)
|
||||
return
|
||||
}
|
||||
if errCount > 0 {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
errCount++
|
||||
continue
|
||||
}
|
||||
|
||||
errCount = 0
|
||||
|
||||
// Any message is as good as a ping.
|
||||
select {
|
||||
case c.ping <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
|
||||
switch msg := msg.(type) {
|
||||
case *Subscription:
|
||||
// Ignore.
|
||||
case *Pong:
|
||||
// Ignore.
|
||||
case *Message:
|
||||
timer.Reset(c.chanSendTimeout)
|
||||
select {
|
||||
case c.msgCh <- msg:
|
||||
if !timer.Stop() {
|
||||
<-timer.C
|
||||
}
|
||||
case <-timer.C:
|
||||
internal.Logger.Printf(
|
||||
ctx, "redis: %s channel is full for %s (message is dropped)",
|
||||
c, c.chanSendTimeout)
|
||||
}
|
||||
default:
|
||||
internal.Logger.Printf(ctx, "redis: unknown message type: %T", msg)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// initAllChan must be in sync with initMsgChan.
|
||||
func (c *channel) initAllChan() {
|
||||
ctx := context.TODO()
|
||||
c.allCh = make(chan interface{}, c.chanSize)
|
||||
|
||||
go func() {
|
||||
timer := time.NewTimer(time.Minute)
|
||||
timer.Stop()
|
||||
|
||||
var errCount int
|
||||
for {
|
||||
msg, err := c.pubSub.Receive(ctx)
|
||||
if err != nil {
|
||||
if err == pool.ErrClosed {
|
||||
close(c.allCh)
|
||||
return
|
||||
}
|
||||
if errCount > 0 {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
errCount++
|
||||
continue
|
||||
}
|
||||
|
||||
errCount = 0
|
||||
|
||||
// Any message is as good as a ping.
|
||||
select {
|
||||
case c.ping <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
|
||||
switch msg := msg.(type) {
|
||||
case *Pong:
|
||||
// Ignore.
|
||||
case *Subscription, *Message:
|
||||
timer.Reset(c.chanSendTimeout)
|
||||
select {
|
||||
case c.allCh <- msg:
|
||||
if !timer.Stop() {
|
||||
<-timer.C
|
||||
}
|
||||
case <-timer.C:
|
||||
internal.Logger.Printf(
|
||||
ctx, "redis: %s channel is full for %s (message is dropped)",
|
||||
c, c.chanSendTimeout)
|
||||
}
|
||||
default:
|
||||
internal.Logger.Printf(ctx, "redis: unknown message type: %T", msg)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
|
@ -0,0 +1,773 @@
|
|||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/go-redis/redis/v8/internal"
|
||||
"github.com/go-redis/redis/v8/internal/pool"
|
||||
"github.com/go-redis/redis/v8/internal/proto"
|
||||
)
|
||||
|
||||
// Nil reply returned by Redis when key does not exist.
|
||||
const Nil = proto.Nil
|
||||
|
||||
func SetLogger(logger internal.Logging) {
|
||||
internal.Logger = logger
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
type Hook interface {
|
||||
BeforeProcess(ctx context.Context, cmd Cmder) (context.Context, error)
|
||||
AfterProcess(ctx context.Context, cmd Cmder) error
|
||||
|
||||
BeforeProcessPipeline(ctx context.Context, cmds []Cmder) (context.Context, error)
|
||||
AfterProcessPipeline(ctx context.Context, cmds []Cmder) error
|
||||
}
|
||||
|
||||
type hooks struct {
|
||||
hooks []Hook
|
||||
}
|
||||
|
||||
func (hs *hooks) lock() {
|
||||
hs.hooks = hs.hooks[:len(hs.hooks):len(hs.hooks)]
|
||||
}
|
||||
|
||||
func (hs hooks) clone() hooks {
|
||||
clone := hs
|
||||
clone.lock()
|
||||
return clone
|
||||
}
|
||||
|
||||
func (hs *hooks) AddHook(hook Hook) {
|
||||
hs.hooks = append(hs.hooks, hook)
|
||||
}
|
||||
|
||||
func (hs hooks) process(
|
||||
ctx context.Context, cmd Cmder, fn func(context.Context, Cmder) error,
|
||||
) error {
|
||||
if len(hs.hooks) == 0 {
|
||||
err := fn(ctx, cmd)
|
||||
cmd.SetErr(err)
|
||||
return err
|
||||
}
|
||||
|
||||
var hookIndex int
|
||||
var retErr error
|
||||
|
||||
for ; hookIndex < len(hs.hooks) && retErr == nil; hookIndex++ {
|
||||
ctx, retErr = hs.hooks[hookIndex].BeforeProcess(ctx, cmd)
|
||||
if retErr != nil {
|
||||
cmd.SetErr(retErr)
|
||||
}
|
||||
}
|
||||
|
||||
if retErr == nil {
|
||||
retErr = fn(ctx, cmd)
|
||||
cmd.SetErr(retErr)
|
||||
}
|
||||
|
||||
for hookIndex--; hookIndex >= 0; hookIndex-- {
|
||||
if err := hs.hooks[hookIndex].AfterProcess(ctx, cmd); err != nil {
|
||||
retErr = err
|
||||
cmd.SetErr(retErr)
|
||||
}
|
||||
}
|
||||
|
||||
return retErr
|
||||
}
|
||||
|
||||
func (hs hooks) processPipeline(
|
||||
ctx context.Context, cmds []Cmder, fn func(context.Context, []Cmder) error,
|
||||
) error {
|
||||
if len(hs.hooks) == 0 {
|
||||
err := fn(ctx, cmds)
|
||||
return err
|
||||
}
|
||||
|
||||
var hookIndex int
|
||||
var retErr error
|
||||
|
||||
for ; hookIndex < len(hs.hooks) && retErr == nil; hookIndex++ {
|
||||
ctx, retErr = hs.hooks[hookIndex].BeforeProcessPipeline(ctx, cmds)
|
||||
if retErr != nil {
|
||||
setCmdsErr(cmds, retErr)
|
||||
}
|
||||
}
|
||||
|
||||
if retErr == nil {
|
||||
retErr = fn(ctx, cmds)
|
||||
}
|
||||
|
||||
for hookIndex--; hookIndex >= 0; hookIndex-- {
|
||||
if err := hs.hooks[hookIndex].AfterProcessPipeline(ctx, cmds); err != nil {
|
||||
retErr = err
|
||||
setCmdsErr(cmds, retErr)
|
||||
}
|
||||
}
|
||||
|
||||
return retErr
|
||||
}
|
||||
|
||||
func (hs hooks) processTxPipeline(
|
||||
ctx context.Context, cmds []Cmder, fn func(context.Context, []Cmder) error,
|
||||
) error {
|
||||
cmds = wrapMultiExec(ctx, cmds)
|
||||
return hs.processPipeline(ctx, cmds, fn)
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
type baseClient struct {
|
||||
opt *Options
|
||||
connPool pool.Pooler
|
||||
|
||||
onClose func() error // hook called when client is closed
|
||||
}
|
||||
|
||||
func newBaseClient(opt *Options, connPool pool.Pooler) *baseClient {
|
||||
return &baseClient{
|
||||
opt: opt,
|
||||
connPool: connPool,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *baseClient) clone() *baseClient {
|
||||
clone := *c
|
||||
return &clone
|
||||
}
|
||||
|
||||
func (c *baseClient) withTimeout(timeout time.Duration) *baseClient {
|
||||
opt := c.opt.clone()
|
||||
opt.ReadTimeout = timeout
|
||||
opt.WriteTimeout = timeout
|
||||
|
||||
clone := c.clone()
|
||||
clone.opt = opt
|
||||
|
||||
return clone
|
||||
}
|
||||
|
||||
func (c *baseClient) String() string {
|
||||
return fmt.Sprintf("Redis<%s db:%d>", c.getAddr(), c.opt.DB)
|
||||
}
|
||||
|
||||
func (c *baseClient) newConn(ctx context.Context) (*pool.Conn, error) {
|
||||
cn, err := c.connPool.NewConn(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = c.initConn(ctx, cn)
|
||||
if err != nil {
|
||||
_ = c.connPool.CloseConn(cn)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return cn, nil
|
||||
}
|
||||
|
||||
func (c *baseClient) getConn(ctx context.Context) (*pool.Conn, error) {
|
||||
if c.opt.Limiter != nil {
|
||||
err := c.opt.Limiter.Allow()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
cn, err := c._getConn(ctx)
|
||||
if err != nil {
|
||||
if c.opt.Limiter != nil {
|
||||
c.opt.Limiter.ReportResult(err)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return cn, nil
|
||||
}
|
||||
|
||||
func (c *baseClient) _getConn(ctx context.Context) (*pool.Conn, error) {
|
||||
cn, err := c.connPool.Get(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if cn.Inited {
|
||||
return cn, nil
|
||||
}
|
||||
|
||||
if err := c.initConn(ctx, cn); err != nil {
|
||||
c.connPool.Remove(ctx, cn, err)
|
||||
if err := errors.Unwrap(err); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return cn, nil
|
||||
}
|
||||
|
||||
func (c *baseClient) initConn(ctx context.Context, cn *pool.Conn) error {
|
||||
if cn.Inited {
|
||||
return nil
|
||||
}
|
||||
cn.Inited = true
|
||||
|
||||
if c.opt.Password == "" &&
|
||||
c.opt.DB == 0 &&
|
||||
!c.opt.readOnly &&
|
||||
c.opt.OnConnect == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
connPool := pool.NewSingleConnPool(c.connPool, cn)
|
||||
conn := newConn(ctx, c.opt, connPool)
|
||||
|
||||
_, err := conn.Pipelined(ctx, func(pipe Pipeliner) error {
|
||||
if c.opt.Password != "" {
|
||||
if c.opt.Username != "" {
|
||||
pipe.AuthACL(ctx, c.opt.Username, c.opt.Password)
|
||||
} else {
|
||||
pipe.Auth(ctx, c.opt.Password)
|
||||
}
|
||||
}
|
||||
|
||||
if c.opt.DB > 0 {
|
||||
pipe.Select(ctx, c.opt.DB)
|
||||
}
|
||||
|
||||
if c.opt.readOnly {
|
||||
pipe.ReadOnly(ctx)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if c.opt.OnConnect != nil {
|
||||
return c.opt.OnConnect(ctx, conn)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *baseClient) releaseConn(ctx context.Context, cn *pool.Conn, err error) {
|
||||
if c.opt.Limiter != nil {
|
||||
c.opt.Limiter.ReportResult(err)
|
||||
}
|
||||
|
||||
if isBadConn(err, false, c.opt.Addr) {
|
||||
c.connPool.Remove(ctx, cn, err)
|
||||
} else {
|
||||
c.connPool.Put(ctx, cn)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *baseClient) withConn(
|
||||
ctx context.Context, fn func(context.Context, *pool.Conn) error,
|
||||
) error {
|
||||
cn, err := c.getConn(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
c.releaseConn(ctx, cn, err)
|
||||
}()
|
||||
|
||||
done := ctx.Done() //nolint:ifshort
|
||||
|
||||
if done == nil {
|
||||
err = fn(ctx, cn)
|
||||
return err
|
||||
}
|
||||
|
||||
errc := make(chan error, 1)
|
||||
go func() { errc <- fn(ctx, cn) }()
|
||||
|
||||
select {
|
||||
case <-done:
|
||||
_ = cn.Close()
|
||||
// Wait for the goroutine to finish and send something.
|
||||
<-errc
|
||||
|
||||
err = ctx.Err()
|
||||
return err
|
||||
case err = <-errc:
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
func (c *baseClient) process(ctx context.Context, cmd Cmder) error {
|
||||
var lastErr error
|
||||
for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ {
|
||||
attempt := attempt
|
||||
|
||||
retry, err := c._process(ctx, cmd, attempt)
|
||||
if err == nil || !retry {
|
||||
return err
|
||||
}
|
||||
|
||||
lastErr = err
|
||||
}
|
||||
return lastErr
|
||||
}
|
||||
|
||||
func (c *baseClient) _process(ctx context.Context, cmd Cmder, attempt int) (bool, error) {
|
||||
if attempt > 0 {
|
||||
if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
retryTimeout := uint32(1)
|
||||
err := c.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
|
||||
err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
|
||||
return writeCmd(wr, cmd)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = cn.WithReader(ctx, c.cmdTimeout(cmd), cmd.readReply)
|
||||
if err != nil {
|
||||
if cmd.readTimeout() == nil {
|
||||
atomic.StoreUint32(&retryTimeout, 1)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err == nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
retry := shouldRetry(err, atomic.LoadUint32(&retryTimeout) == 1)
|
||||
return retry, err
|
||||
}
|
||||
|
||||
func (c *baseClient) retryBackoff(attempt int) time.Duration {
|
||||
return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff)
|
||||
}
|
||||
|
||||
func (c *baseClient) cmdTimeout(cmd Cmder) time.Duration {
|
||||
if timeout := cmd.readTimeout(); timeout != nil {
|
||||
t := *timeout
|
||||
if t == 0 {
|
||||
return 0
|
||||
}
|
||||
return t + 10*time.Second
|
||||
}
|
||||
return c.opt.ReadTimeout
|
||||
}
|
||||
|
||||
// Close closes the client, releasing any open resources.
|
||||
//
|
||||
// It is rare to Close a Client, as the Client is meant to be
|
||||
// long-lived and shared between many goroutines.
|
||||
func (c *baseClient) Close() error {
|
||||
var firstErr error
|
||||
if c.onClose != nil {
|
||||
if err := c.onClose(); err != nil {
|
||||
firstErr = err
|
||||
}
|
||||
}
|
||||
if err := c.connPool.Close(); err != nil && firstErr == nil {
|
||||
firstErr = err
|
||||
}
|
||||
return firstErr
|
||||
}
|
||||
|
||||
func (c *baseClient) getAddr() string {
|
||||
return c.opt.Addr
|
||||
}
|
||||
|
||||
func (c *baseClient) processPipeline(ctx context.Context, cmds []Cmder) error {
|
||||
return c.generalProcessPipeline(ctx, cmds, c.pipelineProcessCmds)
|
||||
}
|
||||
|
||||
func (c *baseClient) processTxPipeline(ctx context.Context, cmds []Cmder) error {
|
||||
return c.generalProcessPipeline(ctx, cmds, c.txPipelineProcessCmds)
|
||||
}
|
||||
|
||||
type pipelineProcessor func(context.Context, *pool.Conn, []Cmder) (bool, error)
|
||||
|
||||
func (c *baseClient) generalProcessPipeline(
|
||||
ctx context.Context, cmds []Cmder, p pipelineProcessor,
|
||||
) error {
|
||||
err := c._generalProcessPipeline(ctx, cmds, p)
|
||||
if err != nil {
|
||||
setCmdsErr(cmds, err)
|
||||
return err
|
||||
}
|
||||
return cmdsFirstErr(cmds)
|
||||
}
|
||||
|
||||
func (c *baseClient) _generalProcessPipeline(
|
||||
ctx context.Context, cmds []Cmder, p pipelineProcessor,
|
||||
) error {
|
||||
var lastErr error
|
||||
for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ {
|
||||
if attempt > 0 {
|
||||
if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
var canRetry bool
|
||||
lastErr = c.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
|
||||
var err error
|
||||
canRetry, err = p(ctx, cn, cmds)
|
||||
return err
|
||||
})
|
||||
if lastErr == nil || !canRetry || !shouldRetry(lastErr, true) {
|
||||
return lastErr
|
||||
}
|
||||
}
|
||||
return lastErr
|
||||
}
|
||||
|
||||
func (c *baseClient) pipelineProcessCmds(
|
||||
ctx context.Context, cn *pool.Conn, cmds []Cmder,
|
||||
) (bool, error) {
|
||||
err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
|
||||
return writeCmds(wr, cmds)
|
||||
})
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
|
||||
err = cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error {
|
||||
return pipelineReadCmds(rd, cmds)
|
||||
})
|
||||
return true, err
|
||||
}
|
||||
|
||||
func pipelineReadCmds(rd *proto.Reader, cmds []Cmder) error {
|
||||
for _, cmd := range cmds {
|
||||
err := cmd.readReply(rd)
|
||||
cmd.SetErr(err)
|
||||
if err != nil && !isRedisError(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *baseClient) txPipelineProcessCmds(
|
||||
ctx context.Context, cn *pool.Conn, cmds []Cmder,
|
||||
) (bool, error) {
|
||||
err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
|
||||
return writeCmds(wr, cmds)
|
||||
})
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
|
||||
err = cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error {
|
||||
statusCmd := cmds[0].(*StatusCmd)
|
||||
// Trim multi and exec.
|
||||
cmds = cmds[1 : len(cmds)-1]
|
||||
|
||||
err := txPipelineReadQueued(rd, statusCmd, cmds)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return pipelineReadCmds(rd, cmds)
|
||||
})
|
||||
return false, err
|
||||
}
|
||||
|
||||
func wrapMultiExec(ctx context.Context, cmds []Cmder) []Cmder {
|
||||
if len(cmds) == 0 {
|
||||
panic("not reached")
|
||||
}
|
||||
cmdCopy := make([]Cmder, len(cmds)+2)
|
||||
cmdCopy[0] = NewStatusCmd(ctx, "multi")
|
||||
copy(cmdCopy[1:], cmds)
|
||||
cmdCopy[len(cmdCopy)-1] = NewSliceCmd(ctx, "exec")
|
||||
return cmdCopy
|
||||
}
|
||||
|
||||
func txPipelineReadQueued(rd *proto.Reader, statusCmd *StatusCmd, cmds []Cmder) error {
|
||||
// Parse queued replies.
|
||||
if err := statusCmd.readReply(rd); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for range cmds {
|
||||
if err := statusCmd.readReply(rd); err != nil && !isRedisError(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Parse number of replies.
|
||||
line, err := rd.ReadLine()
|
||||
if err != nil {
|
||||
if err == Nil {
|
||||
err = TxFailedErr
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
switch line[0] {
|
||||
case proto.ErrorReply:
|
||||
return proto.ParseErrorReply(line)
|
||||
case proto.ArrayReply:
|
||||
// ok
|
||||
default:
|
||||
err := fmt.Errorf("redis: expected '*', but got line %q", line)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
// Client is a Redis client representing a pool of zero or more
|
||||
// underlying connections. It's safe for concurrent use by multiple
|
||||
// goroutines.
|
||||
type Client struct {
|
||||
*baseClient
|
||||
cmdable
|
||||
hooks
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
// NewClient returns a client to the Redis Server specified by Options.
|
||||
func NewClient(opt *Options) *Client {
|
||||
opt.init()
|
||||
|
||||
c := Client{
|
||||
baseClient: newBaseClient(opt, newConnPool(opt)),
|
||||
ctx: context.Background(),
|
||||
}
|
||||
c.cmdable = c.Process
|
||||
|
||||
return &c
|
||||
}
|
||||
|
||||
func (c *Client) clone() *Client {
|
||||
clone := *c
|
||||
clone.cmdable = clone.Process
|
||||
clone.hooks.lock()
|
||||
return &clone
|
||||
}
|
||||
|
||||
func (c *Client) WithTimeout(timeout time.Duration) *Client {
|
||||
clone := c.clone()
|
||||
clone.baseClient = c.baseClient.withTimeout(timeout)
|
||||
return clone
|
||||
}
|
||||
|
||||
func (c *Client) Context() context.Context {
|
||||
return c.ctx
|
||||
}
|
||||
|
||||
func (c *Client) WithContext(ctx context.Context) *Client {
|
||||
if ctx == nil {
|
||||
panic("nil context")
|
||||
}
|
||||
clone := c.clone()
|
||||
clone.ctx = ctx
|
||||
return clone
|
||||
}
|
||||
|
||||
func (c *Client) Conn(ctx context.Context) *Conn {
|
||||
return newConn(ctx, c.opt, pool.NewStickyConnPool(c.connPool))
|
||||
}
|
||||
|
||||
// Do creates a Cmd from the args and processes the cmd.
|
||||
func (c *Client) Do(ctx context.Context, args ...interface{}) *Cmd {
|
||||
cmd := NewCmd(ctx, args...)
|
||||
_ = c.Process(ctx, cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
func (c *Client) Process(ctx context.Context, cmd Cmder) error {
|
||||
return c.hooks.process(ctx, cmd, c.baseClient.process)
|
||||
}
|
||||
|
||||
func (c *Client) processPipeline(ctx context.Context, cmds []Cmder) error {
|
||||
return c.hooks.processPipeline(ctx, cmds, c.baseClient.processPipeline)
|
||||
}
|
||||
|
||||
func (c *Client) processTxPipeline(ctx context.Context, cmds []Cmder) error {
|
||||
return c.hooks.processTxPipeline(ctx, cmds, c.baseClient.processTxPipeline)
|
||||
}
|
||||
|
||||
// Options returns read-only Options that were used to create the client.
|
||||
func (c *Client) Options() *Options {
|
||||
return c.opt
|
||||
}
|
||||
|
||||
type PoolStats pool.Stats
|
||||
|
||||
// PoolStats returns connection pool stats.
|
||||
func (c *Client) PoolStats() *PoolStats {
|
||||
stats := c.connPool.Stats()
|
||||
return (*PoolStats)(stats)
|
||||
}
|
||||
|
||||
func (c *Client) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
|
||||
return c.Pipeline().Pipelined(ctx, fn)
|
||||
}
|
||||
|
||||
func (c *Client) Pipeline() Pipeliner {
|
||||
pipe := Pipeline{
|
||||
ctx: c.ctx,
|
||||
exec: c.processPipeline,
|
||||
}
|
||||
pipe.init()
|
||||
return &pipe
|
||||
}
|
||||
|
||||
func (c *Client) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
|
||||
return c.TxPipeline().Pipelined(ctx, fn)
|
||||
}
|
||||
|
||||
// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC.
|
||||
func (c *Client) TxPipeline() Pipeliner {
|
||||
pipe := Pipeline{
|
||||
ctx: c.ctx,
|
||||
exec: c.processTxPipeline,
|
||||
}
|
||||
pipe.init()
|
||||
return &pipe
|
||||
}
|
||||
|
||||
func (c *Client) pubSub() *PubSub {
|
||||
pubsub := &PubSub{
|
||||
opt: c.opt,
|
||||
|
||||
newConn: func(ctx context.Context, channels []string) (*pool.Conn, error) {
|
||||
return c.newConn(ctx)
|
||||
},
|
||||
closeConn: c.connPool.CloseConn,
|
||||
}
|
||||
pubsub.init()
|
||||
return pubsub
|
||||
}
|
||||
|
||||
// Subscribe subscribes the client to the specified channels.
|
||||
// Channels can be omitted to create empty subscription.
|
||||
// Note that this method does not wait on a response from Redis, so the
|
||||
// subscription may not be active immediately. To force the connection to wait,
|
||||
// you may call the Receive() method on the returned *PubSub like so:
|
||||
//
|
||||
// sub := client.Subscribe(queryResp)
|
||||
// iface, err := sub.Receive()
|
||||
// if err != nil {
|
||||
// // handle error
|
||||
// }
|
||||
//
|
||||
// // Should be *Subscription, but others are possible if other actions have been
|
||||
// // taken on sub since it was created.
|
||||
// switch iface.(type) {
|
||||
// case *Subscription:
|
||||
// // subscribe succeeded
|
||||
// case *Message:
|
||||
// // received first message
|
||||
// case *Pong:
|
||||
// // pong received
|
||||
// default:
|
||||
// // handle error
|
||||
// }
|
||||
//
|
||||
// ch := sub.Channel()
|
||||
func (c *Client) Subscribe(ctx context.Context, channels ...string) *PubSub {
|
||||
pubsub := c.pubSub()
|
||||
if len(channels) > 0 {
|
||||
_ = pubsub.Subscribe(ctx, channels...)
|
||||
}
|
||||
return pubsub
|
||||
}
|
||||
|
||||
// PSubscribe subscribes the client to the given patterns.
|
||||
// Patterns can be omitted to create empty subscription.
|
||||
func (c *Client) PSubscribe(ctx context.Context, channels ...string) *PubSub {
|
||||
pubsub := c.pubSub()
|
||||
if len(channels) > 0 {
|
||||
_ = pubsub.PSubscribe(ctx, channels...)
|
||||
}
|
||||
return pubsub
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
type conn struct {
|
||||
baseClient
|
||||
cmdable
|
||||
statefulCmdable
|
||||
hooks // TODO: inherit hooks
|
||||
}
|
||||
|
||||
// Conn represents a single Redis connection rather than a pool of connections.
|
||||
// Prefer running commands from Client unless there is a specific need
|
||||
// for a continuous single Redis connection.
|
||||
type Conn struct {
|
||||
*conn
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
func newConn(ctx context.Context, opt *Options, connPool pool.Pooler) *Conn {
|
||||
c := Conn{
|
||||
conn: &conn{
|
||||
baseClient: baseClient{
|
||||
opt: opt,
|
||||
connPool: connPool,
|
||||
},
|
||||
},
|
||||
ctx: ctx,
|
||||
}
|
||||
c.cmdable = c.Process
|
||||
c.statefulCmdable = c.Process
|
||||
return &c
|
||||
}
|
||||
|
||||
func (c *Conn) Process(ctx context.Context, cmd Cmder) error {
|
||||
return c.hooks.process(ctx, cmd, c.baseClient.process)
|
||||
}
|
||||
|
||||
func (c *Conn) processPipeline(ctx context.Context, cmds []Cmder) error {
|
||||
return c.hooks.processPipeline(ctx, cmds, c.baseClient.processPipeline)
|
||||
}
|
||||
|
||||
func (c *Conn) processTxPipeline(ctx context.Context, cmds []Cmder) error {
|
||||
return c.hooks.processTxPipeline(ctx, cmds, c.baseClient.processTxPipeline)
|
||||
}
|
||||
|
||||
func (c *Conn) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
|
||||
return c.Pipeline().Pipelined(ctx, fn)
|
||||
}
|
||||
|
||||
func (c *Conn) Pipeline() Pipeliner {
|
||||
pipe := Pipeline{
|
||||
ctx: c.ctx,
|
||||
exec: c.processPipeline,
|
||||
}
|
||||
pipe.init()
|
||||
return &pipe
|
||||
}
|
||||
|
||||
func (c *Conn) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
|
||||
return c.TxPipeline().Pipelined(ctx, fn)
|
||||
}
|
||||
|
||||
// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC.
|
||||
func (c *Conn) TxPipeline() Pipeliner {
|
||||
pipe := Pipeline{
|
||||
ctx: c.ctx,
|
||||
exec: c.processTxPipeline,
|
||||
}
|
||||
pipe.init()
|
||||
return &pipe
|
||||
}
|
||||
|
|
@ -0,0 +1,180 @@
|
|||
package redis
|
||||
|
||||
import "time"
|
||||
|
||||
// NewCmdResult returns a Cmd initialised with val and err for testing.
|
||||
func NewCmdResult(val interface{}, err error) *Cmd {
|
||||
var cmd Cmd
|
||||
cmd.val = val
|
||||
cmd.SetErr(err)
|
||||
return &cmd
|
||||
}
|
||||
|
||||
// NewSliceResult returns a SliceCmd initialised with val and err for testing.
|
||||
func NewSliceResult(val []interface{}, err error) *SliceCmd {
|
||||
var cmd SliceCmd
|
||||
cmd.val = val
|
||||
cmd.SetErr(err)
|
||||
return &cmd
|
||||
}
|
||||
|
||||
// NewStatusResult returns a StatusCmd initialised with val and err for testing.
|
||||
func NewStatusResult(val string, err error) *StatusCmd {
|
||||
var cmd StatusCmd
|
||||
cmd.val = val
|
||||
cmd.SetErr(err)
|
||||
return &cmd
|
||||
}
|
||||
|
||||
// NewIntResult returns an IntCmd initialised with val and err for testing.
|
||||
func NewIntResult(val int64, err error) *IntCmd {
|
||||
var cmd IntCmd
|
||||
cmd.val = val
|
||||
cmd.SetErr(err)
|
||||
return &cmd
|
||||
}
|
||||
|
||||
// NewDurationResult returns a DurationCmd initialised with val and err for testing.
|
||||
func NewDurationResult(val time.Duration, err error) *DurationCmd {
|
||||
var cmd DurationCmd
|
||||
cmd.val = val
|
||||
cmd.SetErr(err)
|
||||
return &cmd
|
||||
}
|
||||
|
||||
// NewBoolResult returns a BoolCmd initialised with val and err for testing.
|
||||
func NewBoolResult(val bool, err error) *BoolCmd {
|
||||
var cmd BoolCmd
|
||||
cmd.val = val
|
||||
cmd.SetErr(err)
|
||||
return &cmd
|
||||
}
|
||||
|
||||
// NewStringResult returns a StringCmd initialised with val and err for testing.
|
||||
func NewStringResult(val string, err error) *StringCmd {
|
||||
var cmd StringCmd
|
||||
cmd.val = val
|
||||
cmd.SetErr(err)
|
||||
return &cmd
|
||||
}
|
||||
|
||||
// NewFloatResult returns a FloatCmd initialised with val and err for testing.
|
||||
func NewFloatResult(val float64, err error) *FloatCmd {
|
||||
var cmd FloatCmd
|
||||
cmd.val = val
|
||||
cmd.SetErr(err)
|
||||
return &cmd
|
||||
}
|
||||
|
||||
// NewStringSliceResult returns a StringSliceCmd initialised with val and err for testing.
|
||||
func NewStringSliceResult(val []string, err error) *StringSliceCmd {
|
||||
var cmd StringSliceCmd
|
||||
cmd.val = val
|
||||
cmd.SetErr(err)
|
||||
return &cmd
|
||||
}
|
||||
|
||||
// NewBoolSliceResult returns a BoolSliceCmd initialised with val and err for testing.
|
||||
func NewBoolSliceResult(val []bool, err error) *BoolSliceCmd {
|
||||
var cmd BoolSliceCmd
|
||||
cmd.val = val
|
||||
cmd.SetErr(err)
|
||||
return &cmd
|
||||
}
|
||||
|
||||
// NewStringStringMapResult returns a StringStringMapCmd initialised with val and err for testing.
|
||||
func NewStringStringMapResult(val map[string]string, err error) *StringStringMapCmd {
|
||||
var cmd StringStringMapCmd
|
||||
cmd.val = val
|
||||
cmd.SetErr(err)
|
||||
return &cmd
|
||||
}
|
||||
|
||||
// NewStringIntMapCmdResult returns a StringIntMapCmd initialised with val and err for testing.
|
||||
func NewStringIntMapCmdResult(val map[string]int64, err error) *StringIntMapCmd {
|
||||
var cmd StringIntMapCmd
|
||||
cmd.val = val
|
||||
cmd.SetErr(err)
|
||||
return &cmd
|
||||
}
|
||||
|
||||
// NewTimeCmdResult returns a TimeCmd initialised with val and err for testing.
|
||||
func NewTimeCmdResult(val time.Time, err error) *TimeCmd {
|
||||
var cmd TimeCmd
|
||||
cmd.val = val
|
||||
cmd.SetErr(err)
|
||||
return &cmd
|
||||
}
|
||||
|
||||
// NewZSliceCmdResult returns a ZSliceCmd initialised with val and err for testing.
|
||||
func NewZSliceCmdResult(val []Z, err error) *ZSliceCmd {
|
||||
var cmd ZSliceCmd
|
||||
cmd.val = val
|
||||
cmd.SetErr(err)
|
||||
return &cmd
|
||||
}
|
||||
|
||||
// NewZWithKeyCmdResult returns a NewZWithKeyCmd initialised with val and err for testing.
|
||||
func NewZWithKeyCmdResult(val *ZWithKey, err error) *ZWithKeyCmd {
|
||||
var cmd ZWithKeyCmd
|
||||
cmd.val = val
|
||||
cmd.SetErr(err)
|
||||
return &cmd
|
||||
}
|
||||
|
||||
// NewScanCmdResult returns a ScanCmd initialised with val and err for testing.
|
||||
func NewScanCmdResult(keys []string, cursor uint64, err error) *ScanCmd {
|
||||
var cmd ScanCmd
|
||||
cmd.page = keys
|
||||
cmd.cursor = cursor
|
||||
cmd.SetErr(err)
|
||||
return &cmd
|
||||
}
|
||||
|
||||
// NewClusterSlotsCmdResult returns a ClusterSlotsCmd initialised with val and err for testing.
|
||||
func NewClusterSlotsCmdResult(val []ClusterSlot, err error) *ClusterSlotsCmd {
|
||||
var cmd ClusterSlotsCmd
|
||||
cmd.val = val
|
||||
cmd.SetErr(err)
|
||||
return &cmd
|
||||
}
|
||||
|
||||
// NewGeoLocationCmdResult returns a GeoLocationCmd initialised with val and err for testing.
|
||||
func NewGeoLocationCmdResult(val []GeoLocation, err error) *GeoLocationCmd {
|
||||
var cmd GeoLocationCmd
|
||||
cmd.locations = val
|
||||
cmd.SetErr(err)
|
||||
return &cmd
|
||||
}
|
||||
|
||||
// NewGeoPosCmdResult returns a GeoPosCmd initialised with val and err for testing.
|
||||
func NewGeoPosCmdResult(val []*GeoPos, err error) *GeoPosCmd {
|
||||
var cmd GeoPosCmd
|
||||
cmd.val = val
|
||||
cmd.SetErr(err)
|
||||
return &cmd
|
||||
}
|
||||
|
||||
// NewCommandsInfoCmdResult returns a CommandsInfoCmd initialised with val and err for testing.
|
||||
func NewCommandsInfoCmdResult(val map[string]*CommandInfo, err error) *CommandsInfoCmd {
|
||||
var cmd CommandsInfoCmd
|
||||
cmd.val = val
|
||||
cmd.SetErr(err)
|
||||
return &cmd
|
||||
}
|
||||
|
||||
// NewXMessageSliceCmdResult returns a XMessageSliceCmd initialised with val and err for testing.
|
||||
func NewXMessageSliceCmdResult(val []XMessage, err error) *XMessageSliceCmd {
|
||||
var cmd XMessageSliceCmd
|
||||
cmd.val = val
|
||||
cmd.SetErr(err)
|
||||
return &cmd
|
||||
}
|
||||
|
||||
// NewXStreamSliceCmdResult returns a XStreamSliceCmd initialised with val and err for testing.
|
||||
func NewXStreamSliceCmdResult(val []XStream, err error) *XStreamSliceCmd {
|
||||
var cmd XStreamSliceCmd
|
||||
cmd.val = val
|
||||
cmd.SetErr(err)
|
||||
return &cmd
|
||||
}
|
||||
|
|
@ -0,0 +1,736 @@
|
|||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"strconv"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/cespare/xxhash/v2"
|
||||
rendezvous "github.com/dgryski/go-rendezvous" //nolint
|
||||
|
||||
"github.com/go-redis/redis/v8/internal"
|
||||
"github.com/go-redis/redis/v8/internal/hashtag"
|
||||
"github.com/go-redis/redis/v8/internal/pool"
|
||||
"github.com/go-redis/redis/v8/internal/rand"
|
||||
)
|
||||
|
||||
var errRingShardsDown = errors.New("redis: all ring shards are down")
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
type ConsistentHash interface {
|
||||
Get(string) string
|
||||
}
|
||||
|
||||
type rendezvousWrapper struct {
|
||||
*rendezvous.Rendezvous
|
||||
}
|
||||
|
||||
func (w rendezvousWrapper) Get(key string) string {
|
||||
return w.Lookup(key)
|
||||
}
|
||||
|
||||
func newRendezvous(shards []string) ConsistentHash {
|
||||
return rendezvousWrapper{rendezvous.New(shards, xxhash.Sum64String)}
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
// RingOptions are used to configure a ring client and should be
|
||||
// passed to NewRing.
|
||||
type RingOptions struct {
|
||||
// Map of name => host:port addresses of ring shards.
|
||||
Addrs map[string]string
|
||||
|
||||
// NewClient creates a shard client with provided name and options.
|
||||
NewClient func(name string, opt *Options) *Client
|
||||
|
||||
// Frequency of PING commands sent to check shards availability.
|
||||
// Shard is considered down after 3 subsequent failed checks.
|
||||
HeartbeatFrequency time.Duration
|
||||
|
||||
// NewConsistentHash returns a consistent hash that is used
|
||||
// to distribute keys across the shards.
|
||||
//
|
||||
// See https://medium.com/@dgryski/consistent-hashing-algorithmic-tradeoffs-ef6b8e2fcae8
|
||||
// for consistent hashing algorithmic tradeoffs.
|
||||
NewConsistentHash func(shards []string) ConsistentHash
|
||||
|
||||
// Following options are copied from Options struct.
|
||||
|
||||
Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
|
||||
OnConnect func(ctx context.Context, cn *Conn) error
|
||||
|
||||
Username string
|
||||
Password string
|
||||
DB int
|
||||
|
||||
MaxRetries int
|
||||
MinRetryBackoff time.Duration
|
||||
MaxRetryBackoff time.Duration
|
||||
|
||||
DialTimeout time.Duration
|
||||
ReadTimeout time.Duration
|
||||
WriteTimeout time.Duration
|
||||
|
||||
// PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO).
|
||||
PoolFIFO bool
|
||||
|
||||
PoolSize int
|
||||
MinIdleConns int
|
||||
MaxConnAge time.Duration
|
||||
PoolTimeout time.Duration
|
||||
IdleTimeout time.Duration
|
||||
IdleCheckFrequency time.Duration
|
||||
|
||||
TLSConfig *tls.Config
|
||||
Limiter Limiter
|
||||
}
|
||||
|
||||
func (opt *RingOptions) init() {
|
||||
if opt.NewClient == nil {
|
||||
opt.NewClient = func(name string, opt *Options) *Client {
|
||||
return NewClient(opt)
|
||||
}
|
||||
}
|
||||
|
||||
if opt.HeartbeatFrequency == 0 {
|
||||
opt.HeartbeatFrequency = 500 * time.Millisecond
|
||||
}
|
||||
|
||||
if opt.NewConsistentHash == nil {
|
||||
opt.NewConsistentHash = newRendezvous
|
||||
}
|
||||
|
||||
if opt.MaxRetries == -1 {
|
||||
opt.MaxRetries = 0
|
||||
} else if opt.MaxRetries == 0 {
|
||||
opt.MaxRetries = 3
|
||||
}
|
||||
switch opt.MinRetryBackoff {
|
||||
case -1:
|
||||
opt.MinRetryBackoff = 0
|
||||
case 0:
|
||||
opt.MinRetryBackoff = 8 * time.Millisecond
|
||||
}
|
||||
switch opt.MaxRetryBackoff {
|
||||
case -1:
|
||||
opt.MaxRetryBackoff = 0
|
||||
case 0:
|
||||
opt.MaxRetryBackoff = 512 * time.Millisecond
|
||||
}
|
||||
}
|
||||
|
||||
func (opt *RingOptions) clientOptions() *Options {
|
||||
return &Options{
|
||||
Dialer: opt.Dialer,
|
||||
OnConnect: opt.OnConnect,
|
||||
|
||||
Username: opt.Username,
|
||||
Password: opt.Password,
|
||||
DB: opt.DB,
|
||||
|
||||
MaxRetries: -1,
|
||||
|
||||
DialTimeout: opt.DialTimeout,
|
||||
ReadTimeout: opt.ReadTimeout,
|
||||
WriteTimeout: opt.WriteTimeout,
|
||||
|
||||
PoolFIFO: opt.PoolFIFO,
|
||||
PoolSize: opt.PoolSize,
|
||||
MinIdleConns: opt.MinIdleConns,
|
||||
MaxConnAge: opt.MaxConnAge,
|
||||
PoolTimeout: opt.PoolTimeout,
|
||||
IdleTimeout: opt.IdleTimeout,
|
||||
IdleCheckFrequency: opt.IdleCheckFrequency,
|
||||
|
||||
TLSConfig: opt.TLSConfig,
|
||||
Limiter: opt.Limiter,
|
||||
}
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
type ringShard struct {
|
||||
Client *Client
|
||||
down int32
|
||||
}
|
||||
|
||||
func newRingShard(opt *RingOptions, name, addr string) *ringShard {
|
||||
clopt := opt.clientOptions()
|
||||
clopt.Addr = addr
|
||||
|
||||
return &ringShard{
|
||||
Client: opt.NewClient(name, clopt),
|
||||
}
|
||||
}
|
||||
|
||||
func (shard *ringShard) String() string {
|
||||
var state string
|
||||
if shard.IsUp() {
|
||||
state = "up"
|
||||
} else {
|
||||
state = "down"
|
||||
}
|
||||
return fmt.Sprintf("%s is %s", shard.Client, state)
|
||||
}
|
||||
|
||||
func (shard *ringShard) IsDown() bool {
|
||||
const threshold = 3
|
||||
return atomic.LoadInt32(&shard.down) >= threshold
|
||||
}
|
||||
|
||||
func (shard *ringShard) IsUp() bool {
|
||||
return !shard.IsDown()
|
||||
}
|
||||
|
||||
// Vote votes to set shard state and returns true if state was changed.
|
||||
func (shard *ringShard) Vote(up bool) bool {
|
||||
if up {
|
||||
changed := shard.IsDown()
|
||||
atomic.StoreInt32(&shard.down, 0)
|
||||
return changed
|
||||
}
|
||||
|
||||
if shard.IsDown() {
|
||||
return false
|
||||
}
|
||||
|
||||
atomic.AddInt32(&shard.down, 1)
|
||||
return shard.IsDown()
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
type ringShards struct {
|
||||
opt *RingOptions
|
||||
|
||||
mu sync.RWMutex
|
||||
hash ConsistentHash
|
||||
shards map[string]*ringShard // read only
|
||||
list []*ringShard // read only
|
||||
numShard int
|
||||
closed bool
|
||||
}
|
||||
|
||||
func newRingShards(opt *RingOptions) *ringShards {
|
||||
shards := make(map[string]*ringShard, len(opt.Addrs))
|
||||
list := make([]*ringShard, 0, len(shards))
|
||||
|
||||
for name, addr := range opt.Addrs {
|
||||
shard := newRingShard(opt, name, addr)
|
||||
shards[name] = shard
|
||||
|
||||
list = append(list, shard)
|
||||
}
|
||||
|
||||
c := &ringShards{
|
||||
opt: opt,
|
||||
|
||||
shards: shards,
|
||||
list: list,
|
||||
}
|
||||
c.rebalance()
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *ringShards) List() []*ringShard {
|
||||
var list []*ringShard
|
||||
|
||||
c.mu.RLock()
|
||||
if !c.closed {
|
||||
list = c.list
|
||||
}
|
||||
c.mu.RUnlock()
|
||||
|
||||
return list
|
||||
}
|
||||
|
||||
func (c *ringShards) Hash(key string) string {
|
||||
key = hashtag.Key(key)
|
||||
|
||||
var hash string
|
||||
|
||||
c.mu.RLock()
|
||||
if c.numShard > 0 {
|
||||
hash = c.hash.Get(key)
|
||||
}
|
||||
c.mu.RUnlock()
|
||||
|
||||
return hash
|
||||
}
|
||||
|
||||
func (c *ringShards) GetByKey(key string) (*ringShard, error) {
|
||||
key = hashtag.Key(key)
|
||||
|
||||
c.mu.RLock()
|
||||
|
||||
if c.closed {
|
||||
c.mu.RUnlock()
|
||||
return nil, pool.ErrClosed
|
||||
}
|
||||
|
||||
if c.numShard == 0 {
|
||||
c.mu.RUnlock()
|
||||
return nil, errRingShardsDown
|
||||
}
|
||||
|
||||
hash := c.hash.Get(key)
|
||||
if hash == "" {
|
||||
c.mu.RUnlock()
|
||||
return nil, errRingShardsDown
|
||||
}
|
||||
|
||||
shard := c.shards[hash]
|
||||
c.mu.RUnlock()
|
||||
|
||||
return shard, nil
|
||||
}
|
||||
|
||||
func (c *ringShards) GetByName(shardName string) (*ringShard, error) {
|
||||
if shardName == "" {
|
||||
return c.Random()
|
||||
}
|
||||
|
||||
c.mu.RLock()
|
||||
shard := c.shards[shardName]
|
||||
c.mu.RUnlock()
|
||||
return shard, nil
|
||||
}
|
||||
|
||||
func (c *ringShards) Random() (*ringShard, error) {
|
||||
return c.GetByKey(strconv.Itoa(rand.Int()))
|
||||
}
|
||||
|
||||
// heartbeat monitors state of each shard in the ring.
|
||||
func (c *ringShards) Heartbeat(frequency time.Duration) {
|
||||
ticker := time.NewTicker(frequency)
|
||||
defer ticker.Stop()
|
||||
|
||||
ctx := context.Background()
|
||||
for range ticker.C {
|
||||
var rebalance bool
|
||||
|
||||
for _, shard := range c.List() {
|
||||
err := shard.Client.Ping(ctx).Err()
|
||||
isUp := err == nil || err == pool.ErrPoolTimeout
|
||||
if shard.Vote(isUp) {
|
||||
internal.Logger.Printf(context.Background(), "ring shard state changed: %s", shard)
|
||||
rebalance = true
|
||||
}
|
||||
}
|
||||
|
||||
if rebalance {
|
||||
c.rebalance()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// rebalance removes dead shards from the Ring.
|
||||
func (c *ringShards) rebalance() {
|
||||
c.mu.RLock()
|
||||
shards := c.shards
|
||||
c.mu.RUnlock()
|
||||
|
||||
liveShards := make([]string, 0, len(shards))
|
||||
|
||||
for name, shard := range shards {
|
||||
if shard.IsUp() {
|
||||
liveShards = append(liveShards, name)
|
||||
}
|
||||
}
|
||||
|
||||
hash := c.opt.NewConsistentHash(liveShards)
|
||||
|
||||
c.mu.Lock()
|
||||
c.hash = hash
|
||||
c.numShard = len(liveShards)
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
func (c *ringShards) Len() int {
|
||||
c.mu.RLock()
|
||||
l := c.numShard
|
||||
c.mu.RUnlock()
|
||||
return l
|
||||
}
|
||||
|
||||
func (c *ringShards) Close() error {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
if c.closed {
|
||||
return nil
|
||||
}
|
||||
c.closed = true
|
||||
|
||||
var firstErr error
|
||||
for _, shard := range c.shards {
|
||||
if err := shard.Client.Close(); err != nil && firstErr == nil {
|
||||
firstErr = err
|
||||
}
|
||||
}
|
||||
c.hash = nil
|
||||
c.shards = nil
|
||||
c.list = nil
|
||||
|
||||
return firstErr
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
type ring struct {
|
||||
opt *RingOptions
|
||||
shards *ringShards
|
||||
cmdsInfoCache *cmdsInfoCache //nolint:structcheck
|
||||
}
|
||||
|
||||
// Ring is a Redis client that uses consistent hashing to distribute
|
||||
// keys across multiple Redis servers (shards). It's safe for
|
||||
// concurrent use by multiple goroutines.
|
||||
//
|
||||
// Ring monitors the state of each shard and removes dead shards from
|
||||
// the ring. When a shard comes online it is added back to the ring. This
|
||||
// gives you maximum availability and partition tolerance, but no
|
||||
// consistency between different shards or even clients. Each client
|
||||
// uses shards that are available to the client and does not do any
|
||||
// coordination when shard state is changed.
|
||||
//
|
||||
// Ring should be used when you need multiple Redis servers for caching
|
||||
// and can tolerate losing data when one of the servers dies.
|
||||
// Otherwise you should use Redis Cluster.
|
||||
type Ring struct {
|
||||
*ring
|
||||
cmdable
|
||||
hooks
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
func NewRing(opt *RingOptions) *Ring {
|
||||
opt.init()
|
||||
|
||||
ring := Ring{
|
||||
ring: &ring{
|
||||
opt: opt,
|
||||
shards: newRingShards(opt),
|
||||
},
|
||||
ctx: context.Background(),
|
||||
}
|
||||
|
||||
ring.cmdsInfoCache = newCmdsInfoCache(ring.cmdsInfo)
|
||||
ring.cmdable = ring.Process
|
||||
|
||||
go ring.shards.Heartbeat(opt.HeartbeatFrequency)
|
||||
|
||||
return &ring
|
||||
}
|
||||
|
||||
func (c *Ring) Context() context.Context {
|
||||
return c.ctx
|
||||
}
|
||||
|
||||
func (c *Ring) WithContext(ctx context.Context) *Ring {
|
||||
if ctx == nil {
|
||||
panic("nil context")
|
||||
}
|
||||
clone := *c
|
||||
clone.cmdable = clone.Process
|
||||
clone.hooks.lock()
|
||||
clone.ctx = ctx
|
||||
return &clone
|
||||
}
|
||||
|
||||
// Do creates a Cmd from the args and processes the cmd.
|
||||
func (c *Ring) Do(ctx context.Context, args ...interface{}) *Cmd {
|
||||
cmd := NewCmd(ctx, args...)
|
||||
_ = c.Process(ctx, cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
func (c *Ring) Process(ctx context.Context, cmd Cmder) error {
|
||||
return c.hooks.process(ctx, cmd, c.process)
|
||||
}
|
||||
|
||||
// Options returns read-only Options that were used to create the client.
|
||||
func (c *Ring) Options() *RingOptions {
|
||||
return c.opt
|
||||
}
|
||||
|
||||
func (c *Ring) retryBackoff(attempt int) time.Duration {
|
||||
return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff)
|
||||
}
|
||||
|
||||
// PoolStats returns accumulated connection pool stats.
|
||||
func (c *Ring) PoolStats() *PoolStats {
|
||||
shards := c.shards.List()
|
||||
var acc PoolStats
|
||||
for _, shard := range shards {
|
||||
s := shard.Client.connPool.Stats()
|
||||
acc.Hits += s.Hits
|
||||
acc.Misses += s.Misses
|
||||
acc.Timeouts += s.Timeouts
|
||||
acc.TotalConns += s.TotalConns
|
||||
acc.IdleConns += s.IdleConns
|
||||
}
|
||||
return &acc
|
||||
}
|
||||
|
||||
// Len returns the current number of shards in the ring.
|
||||
func (c *Ring) Len() int {
|
||||
return c.shards.Len()
|
||||
}
|
||||
|
||||
// Subscribe subscribes the client to the specified channels.
|
||||
func (c *Ring) Subscribe(ctx context.Context, channels ...string) *PubSub {
|
||||
if len(channels) == 0 {
|
||||
panic("at least one channel is required")
|
||||
}
|
||||
|
||||
shard, err := c.shards.GetByKey(channels[0])
|
||||
if err != nil {
|
||||
// TODO: return PubSub with sticky error
|
||||
panic(err)
|
||||
}
|
||||
return shard.Client.Subscribe(ctx, channels...)
|
||||
}
|
||||
|
||||
// PSubscribe subscribes the client to the given patterns.
|
||||
func (c *Ring) PSubscribe(ctx context.Context, channels ...string) *PubSub {
|
||||
if len(channels) == 0 {
|
||||
panic("at least one channel is required")
|
||||
}
|
||||
|
||||
shard, err := c.shards.GetByKey(channels[0])
|
||||
if err != nil {
|
||||
// TODO: return PubSub with sticky error
|
||||
panic(err)
|
||||
}
|
||||
return shard.Client.PSubscribe(ctx, channels...)
|
||||
}
|
||||
|
||||
// ForEachShard concurrently calls the fn on each live shard in the ring.
|
||||
// It returns the first error if any.
|
||||
func (c *Ring) ForEachShard(
|
||||
ctx context.Context,
|
||||
fn func(ctx context.Context, client *Client) error,
|
||||
) error {
|
||||
shards := c.shards.List()
|
||||
var wg sync.WaitGroup
|
||||
errCh := make(chan error, 1)
|
||||
for _, shard := range shards {
|
||||
if shard.IsDown() {
|
||||
continue
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
go func(shard *ringShard) {
|
||||
defer wg.Done()
|
||||
err := fn(ctx, shard.Client)
|
||||
if err != nil {
|
||||
select {
|
||||
case errCh <- err:
|
||||
default:
|
||||
}
|
||||
}
|
||||
}(shard)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
select {
|
||||
case err := <-errCh:
|
||||
return err
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Ring) cmdsInfo(ctx context.Context) (map[string]*CommandInfo, error) {
|
||||
shards := c.shards.List()
|
||||
var firstErr error
|
||||
for _, shard := range shards {
|
||||
cmdsInfo, err := shard.Client.Command(ctx).Result()
|
||||
if err == nil {
|
||||
return cmdsInfo, nil
|
||||
}
|
||||
if firstErr == nil {
|
||||
firstErr = err
|
||||
}
|
||||
}
|
||||
if firstErr == nil {
|
||||
return nil, errRingShardsDown
|
||||
}
|
||||
return nil, firstErr
|
||||
}
|
||||
|
||||
func (c *Ring) cmdInfo(ctx context.Context, name string) *CommandInfo {
|
||||
cmdsInfo, err := c.cmdsInfoCache.Get(ctx)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
info := cmdsInfo[name]
|
||||
if info == nil {
|
||||
internal.Logger.Printf(c.Context(), "info for cmd=%s not found", name)
|
||||
}
|
||||
return info
|
||||
}
|
||||
|
||||
func (c *Ring) cmdShard(ctx context.Context, cmd Cmder) (*ringShard, error) {
|
||||
cmdInfo := c.cmdInfo(ctx, cmd.Name())
|
||||
pos := cmdFirstKeyPos(cmd, cmdInfo)
|
||||
if pos == 0 {
|
||||
return c.shards.Random()
|
||||
}
|
||||
firstKey := cmd.stringArg(pos)
|
||||
return c.shards.GetByKey(firstKey)
|
||||
}
|
||||
|
||||
func (c *Ring) process(ctx context.Context, cmd Cmder) error {
|
||||
var lastErr error
|
||||
for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ {
|
||||
if attempt > 0 {
|
||||
if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
shard, err := c.cmdShard(ctx, cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
lastErr = shard.Client.Process(ctx, cmd)
|
||||
if lastErr == nil || !shouldRetry(lastErr, cmd.readTimeout() == nil) {
|
||||
return lastErr
|
||||
}
|
||||
}
|
||||
return lastErr
|
||||
}
|
||||
|
||||
func (c *Ring) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
|
||||
return c.Pipeline().Pipelined(ctx, fn)
|
||||
}
|
||||
|
||||
func (c *Ring) Pipeline() Pipeliner {
|
||||
pipe := Pipeline{
|
||||
ctx: c.ctx,
|
||||
exec: c.processPipeline,
|
||||
}
|
||||
pipe.init()
|
||||
return &pipe
|
||||
}
|
||||
|
||||
func (c *Ring) processPipeline(ctx context.Context, cmds []Cmder) error {
|
||||
return c.hooks.processPipeline(ctx, cmds, func(ctx context.Context, cmds []Cmder) error {
|
||||
return c.generalProcessPipeline(ctx, cmds, false)
|
||||
})
|
||||
}
|
||||
|
||||
func (c *Ring) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
|
||||
return c.TxPipeline().Pipelined(ctx, fn)
|
||||
}
|
||||
|
||||
func (c *Ring) TxPipeline() Pipeliner {
|
||||
pipe := Pipeline{
|
||||
ctx: c.ctx,
|
||||
exec: c.processTxPipeline,
|
||||
}
|
||||
pipe.init()
|
||||
return &pipe
|
||||
}
|
||||
|
||||
func (c *Ring) processTxPipeline(ctx context.Context, cmds []Cmder) error {
|
||||
return c.hooks.processPipeline(ctx, cmds, func(ctx context.Context, cmds []Cmder) error {
|
||||
return c.generalProcessPipeline(ctx, cmds, true)
|
||||
})
|
||||
}
|
||||
|
||||
func (c *Ring) generalProcessPipeline(
|
||||
ctx context.Context, cmds []Cmder, tx bool,
|
||||
) error {
|
||||
cmdsMap := make(map[string][]Cmder)
|
||||
for _, cmd := range cmds {
|
||||
cmdInfo := c.cmdInfo(ctx, cmd.Name())
|
||||
hash := cmd.stringArg(cmdFirstKeyPos(cmd, cmdInfo))
|
||||
if hash != "" {
|
||||
hash = c.shards.Hash(hash)
|
||||
}
|
||||
cmdsMap[hash] = append(cmdsMap[hash], cmd)
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for hash, cmds := range cmdsMap {
|
||||
wg.Add(1)
|
||||
go func(hash string, cmds []Cmder) {
|
||||
defer wg.Done()
|
||||
|
||||
_ = c.processShardPipeline(ctx, hash, cmds, tx)
|
||||
}(hash, cmds)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
return cmdsFirstErr(cmds)
|
||||
}
|
||||
|
||||
func (c *Ring) processShardPipeline(
|
||||
ctx context.Context, hash string, cmds []Cmder, tx bool,
|
||||
) error {
|
||||
// TODO: retry?
|
||||
shard, err := c.shards.GetByName(hash)
|
||||
if err != nil {
|
||||
setCmdsErr(cmds, err)
|
||||
return err
|
||||
}
|
||||
|
||||
if tx {
|
||||
return shard.Client.processTxPipeline(ctx, cmds)
|
||||
}
|
||||
return shard.Client.processPipeline(ctx, cmds)
|
||||
}
|
||||
|
||||
func (c *Ring) Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error {
|
||||
if len(keys) == 0 {
|
||||
return fmt.Errorf("redis: Watch requires at least one key")
|
||||
}
|
||||
|
||||
var shards []*ringShard
|
||||
for _, key := range keys {
|
||||
if key != "" {
|
||||
shard, err := c.shards.GetByKey(hashtag.Key(key))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
shards = append(shards, shard)
|
||||
}
|
||||
}
|
||||
|
||||
if len(shards) == 0 {
|
||||
return fmt.Errorf("redis: Watch requires at least one shard")
|
||||
}
|
||||
|
||||
if len(shards) > 1 {
|
||||
for _, shard := range shards[1:] {
|
||||
if shard.Client != shards[0].Client {
|
||||
err := fmt.Errorf("redis: Watch requires all keys to be in the same shard")
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return shards[0].Client.Watch(ctx, fn, keys...)
|
||||
}
|
||||
|
||||
// Close closes the ring client, releasing any open resources.
|
||||
//
|
||||
// It is rare to Close a Ring, as the Ring is meant to be long-lived
|
||||
// and shared between many goroutines.
|
||||
func (c *Ring) Close() error {
|
||||
return c.shards.Close()
|
||||
}
|
||||
|
|
@ -0,0 +1,65 @@
|
|||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha1"
|
||||
"encoding/hex"
|
||||
"io"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Scripter interface {
|
||||
Eval(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd
|
||||
EvalSha(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd
|
||||
ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd
|
||||
ScriptLoad(ctx context.Context, script string) *StringCmd
|
||||
}
|
||||
|
||||
var (
|
||||
_ Scripter = (*Client)(nil)
|
||||
_ Scripter = (*Ring)(nil)
|
||||
_ Scripter = (*ClusterClient)(nil)
|
||||
)
|
||||
|
||||
type Script struct {
|
||||
src, hash string
|
||||
}
|
||||
|
||||
func NewScript(src string) *Script {
|
||||
h := sha1.New()
|
||||
_, _ = io.WriteString(h, src)
|
||||
return &Script{
|
||||
src: src,
|
||||
hash: hex.EncodeToString(h.Sum(nil)),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Script) Hash() string {
|
||||
return s.hash
|
||||
}
|
||||
|
||||
func (s *Script) Load(ctx context.Context, c Scripter) *StringCmd {
|
||||
return c.ScriptLoad(ctx, s.src)
|
||||
}
|
||||
|
||||
func (s *Script) Exists(ctx context.Context, c Scripter) *BoolSliceCmd {
|
||||
return c.ScriptExists(ctx, s.hash)
|
||||
}
|
||||
|
||||
func (s *Script) Eval(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd {
|
||||
return c.Eval(ctx, s.src, keys, args...)
|
||||
}
|
||||
|
||||
func (s *Script) EvalSha(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd {
|
||||
return c.EvalSha(ctx, s.hash, keys, args...)
|
||||
}
|
||||
|
||||
// Run optimistically uses EVALSHA to run the script. If script does not exist
|
||||
// it is retried using EVAL.
|
||||
func (s *Script) Run(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd {
|
||||
r := s.EvalSha(ctx, c, keys, args...)
|
||||
if err := r.Err(); err != nil && strings.HasPrefix(err.Error(), "NOSCRIPT ") {
|
||||
return s.Eval(ctx, c, keys, args...)
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
|
@ -0,0 +1,796 @@
|
|||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"net"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/go-redis/redis/v8/internal"
|
||||
"github.com/go-redis/redis/v8/internal/pool"
|
||||
"github.com/go-redis/redis/v8/internal/rand"
|
||||
)
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
// FailoverOptions are used to configure a failover client and should
|
||||
// be passed to NewFailoverClient.
|
||||
type FailoverOptions struct {
|
||||
// The master name.
|
||||
MasterName string
|
||||
// A seed list of host:port addresses of sentinel nodes.
|
||||
SentinelAddrs []string
|
||||
|
||||
// If specified with SentinelPassword, enables ACL-based authentication (via
|
||||
// AUTH <user> <pass>).
|
||||
SentinelUsername string
|
||||
// Sentinel password from "requirepass <password>" (if enabled) in Sentinel
|
||||
// configuration, or, if SentinelUsername is also supplied, used for ACL-based
|
||||
// authentication.
|
||||
SentinelPassword string
|
||||
|
||||
// Allows routing read-only commands to the closest master or slave node.
|
||||
// This option only works with NewFailoverClusterClient.
|
||||
RouteByLatency bool
|
||||
// Allows routing read-only commands to the random master or slave node.
|
||||
// This option only works with NewFailoverClusterClient.
|
||||
RouteRandomly bool
|
||||
|
||||
// Route all commands to slave read-only nodes.
|
||||
SlaveOnly bool
|
||||
|
||||
// Use slaves disconnected with master when cannot get connected slaves
|
||||
// Now, this option only works in RandomSlaveAddr function.
|
||||
UseDisconnectedSlaves bool
|
||||
|
||||
// Following options are copied from Options struct.
|
||||
|
||||
Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
|
||||
OnConnect func(ctx context.Context, cn *Conn) error
|
||||
|
||||
Username string
|
||||
Password string
|
||||
DB int
|
||||
|
||||
MaxRetries int
|
||||
MinRetryBackoff time.Duration
|
||||
MaxRetryBackoff time.Duration
|
||||
|
||||
DialTimeout time.Duration
|
||||
ReadTimeout time.Duration
|
||||
WriteTimeout time.Duration
|
||||
|
||||
// PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO).
|
||||
PoolFIFO bool
|
||||
|
||||
PoolSize int
|
||||
MinIdleConns int
|
||||
MaxConnAge time.Duration
|
||||
PoolTimeout time.Duration
|
||||
IdleTimeout time.Duration
|
||||
IdleCheckFrequency time.Duration
|
||||
|
||||
TLSConfig *tls.Config
|
||||
}
|
||||
|
||||
func (opt *FailoverOptions) clientOptions() *Options {
|
||||
return &Options{
|
||||
Addr: "FailoverClient",
|
||||
|
||||
Dialer: opt.Dialer,
|
||||
OnConnect: opt.OnConnect,
|
||||
|
||||
DB: opt.DB,
|
||||
Username: opt.Username,
|
||||
Password: opt.Password,
|
||||
|
||||
MaxRetries: opt.MaxRetries,
|
||||
MinRetryBackoff: opt.MinRetryBackoff,
|
||||
MaxRetryBackoff: opt.MaxRetryBackoff,
|
||||
|
||||
DialTimeout: opt.DialTimeout,
|
||||
ReadTimeout: opt.ReadTimeout,
|
||||
WriteTimeout: opt.WriteTimeout,
|
||||
|
||||
PoolFIFO: opt.PoolFIFO,
|
||||
PoolSize: opt.PoolSize,
|
||||
PoolTimeout: opt.PoolTimeout,
|
||||
IdleTimeout: opt.IdleTimeout,
|
||||
IdleCheckFrequency: opt.IdleCheckFrequency,
|
||||
MinIdleConns: opt.MinIdleConns,
|
||||
MaxConnAge: opt.MaxConnAge,
|
||||
|
||||
TLSConfig: opt.TLSConfig,
|
||||
}
|
||||
}
|
||||
|
||||
func (opt *FailoverOptions) sentinelOptions(addr string) *Options {
|
||||
return &Options{
|
||||
Addr: addr,
|
||||
|
||||
Dialer: opt.Dialer,
|
||||
OnConnect: opt.OnConnect,
|
||||
|
||||
DB: 0,
|
||||
Username: opt.SentinelUsername,
|
||||
Password: opt.SentinelPassword,
|
||||
|
||||
MaxRetries: opt.MaxRetries,
|
||||
MinRetryBackoff: opt.MinRetryBackoff,
|
||||
MaxRetryBackoff: opt.MaxRetryBackoff,
|
||||
|
||||
DialTimeout: opt.DialTimeout,
|
||||
ReadTimeout: opt.ReadTimeout,
|
||||
WriteTimeout: opt.WriteTimeout,
|
||||
|
||||
PoolFIFO: opt.PoolFIFO,
|
||||
PoolSize: opt.PoolSize,
|
||||
PoolTimeout: opt.PoolTimeout,
|
||||
IdleTimeout: opt.IdleTimeout,
|
||||
IdleCheckFrequency: opt.IdleCheckFrequency,
|
||||
MinIdleConns: opt.MinIdleConns,
|
||||
MaxConnAge: opt.MaxConnAge,
|
||||
|
||||
TLSConfig: opt.TLSConfig,
|
||||
}
|
||||
}
|
||||
|
||||
func (opt *FailoverOptions) clusterOptions() *ClusterOptions {
|
||||
return &ClusterOptions{
|
||||
Dialer: opt.Dialer,
|
||||
OnConnect: opt.OnConnect,
|
||||
|
||||
Username: opt.Username,
|
||||
Password: opt.Password,
|
||||
|
||||
MaxRedirects: opt.MaxRetries,
|
||||
|
||||
RouteByLatency: opt.RouteByLatency,
|
||||
RouteRandomly: opt.RouteRandomly,
|
||||
|
||||
MinRetryBackoff: opt.MinRetryBackoff,
|
||||
MaxRetryBackoff: opt.MaxRetryBackoff,
|
||||
|
||||
DialTimeout: opt.DialTimeout,
|
||||
ReadTimeout: opt.ReadTimeout,
|
||||
WriteTimeout: opt.WriteTimeout,
|
||||
|
||||
PoolFIFO: opt.PoolFIFO,
|
||||
PoolSize: opt.PoolSize,
|
||||
PoolTimeout: opt.PoolTimeout,
|
||||
IdleTimeout: opt.IdleTimeout,
|
||||
IdleCheckFrequency: opt.IdleCheckFrequency,
|
||||
MinIdleConns: opt.MinIdleConns,
|
||||
MaxConnAge: opt.MaxConnAge,
|
||||
|
||||
TLSConfig: opt.TLSConfig,
|
||||
}
|
||||
}
|
||||
|
||||
// NewFailoverClient returns a Redis client that uses Redis Sentinel
|
||||
// for automatic failover. It's safe for concurrent use by multiple
|
||||
// goroutines.
|
||||
func NewFailoverClient(failoverOpt *FailoverOptions) *Client {
|
||||
if failoverOpt.RouteByLatency {
|
||||
panic("to route commands by latency, use NewFailoverClusterClient")
|
||||
}
|
||||
if failoverOpt.RouteRandomly {
|
||||
panic("to route commands randomly, use NewFailoverClusterClient")
|
||||
}
|
||||
|
||||
sentinelAddrs := make([]string, len(failoverOpt.SentinelAddrs))
|
||||
copy(sentinelAddrs, failoverOpt.SentinelAddrs)
|
||||
|
||||
rand.Shuffle(len(sentinelAddrs), func(i, j int) {
|
||||
sentinelAddrs[i], sentinelAddrs[j] = sentinelAddrs[j], sentinelAddrs[i]
|
||||
})
|
||||
|
||||
failover := &sentinelFailover{
|
||||
opt: failoverOpt,
|
||||
sentinelAddrs: sentinelAddrs,
|
||||
}
|
||||
|
||||
opt := failoverOpt.clientOptions()
|
||||
opt.Dialer = masterSlaveDialer(failover)
|
||||
opt.init()
|
||||
|
||||
connPool := newConnPool(opt)
|
||||
|
||||
failover.mu.Lock()
|
||||
failover.onFailover = func(ctx context.Context, addr string) {
|
||||
_ = connPool.Filter(func(cn *pool.Conn) bool {
|
||||
return cn.RemoteAddr().String() != addr
|
||||
})
|
||||
}
|
||||
failover.mu.Unlock()
|
||||
|
||||
c := Client{
|
||||
baseClient: newBaseClient(opt, connPool),
|
||||
ctx: context.Background(),
|
||||
}
|
||||
c.cmdable = c.Process
|
||||
c.onClose = failover.Close
|
||||
|
||||
return &c
|
||||
}
|
||||
|
||||
func masterSlaveDialer(
|
||||
failover *sentinelFailover,
|
||||
) func(ctx context.Context, network, addr string) (net.Conn, error) {
|
||||
return func(ctx context.Context, network, _ string) (net.Conn, error) {
|
||||
var addr string
|
||||
var err error
|
||||
|
||||
if failover.opt.SlaveOnly {
|
||||
addr, err = failover.RandomSlaveAddr(ctx)
|
||||
} else {
|
||||
addr, err = failover.MasterAddr(ctx)
|
||||
if err == nil {
|
||||
failover.trySwitchMaster(ctx, addr)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if failover.opt.Dialer != nil {
|
||||
return failover.opt.Dialer(ctx, network, addr)
|
||||
}
|
||||
|
||||
netDialer := &net.Dialer{
|
||||
Timeout: failover.opt.DialTimeout,
|
||||
KeepAlive: 5 * time.Minute,
|
||||
}
|
||||
if failover.opt.TLSConfig == nil {
|
||||
return netDialer.DialContext(ctx, network, addr)
|
||||
}
|
||||
return tls.DialWithDialer(netDialer, network, addr, failover.opt.TLSConfig)
|
||||
}
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
// SentinelClient is a client for a Redis Sentinel.
|
||||
type SentinelClient struct {
|
||||
*baseClient
|
||||
hooks
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
func NewSentinelClient(opt *Options) *SentinelClient {
|
||||
opt.init()
|
||||
c := &SentinelClient{
|
||||
baseClient: &baseClient{
|
||||
opt: opt,
|
||||
connPool: newConnPool(opt),
|
||||
},
|
||||
ctx: context.Background(),
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *SentinelClient) Context() context.Context {
|
||||
return c.ctx
|
||||
}
|
||||
|
||||
func (c *SentinelClient) WithContext(ctx context.Context) *SentinelClient {
|
||||
if ctx == nil {
|
||||
panic("nil context")
|
||||
}
|
||||
clone := *c
|
||||
clone.ctx = ctx
|
||||
return &clone
|
||||
}
|
||||
|
||||
func (c *SentinelClient) Process(ctx context.Context, cmd Cmder) error {
|
||||
return c.hooks.process(ctx, cmd, c.baseClient.process)
|
||||
}
|
||||
|
||||
func (c *SentinelClient) pubSub() *PubSub {
|
||||
pubsub := &PubSub{
|
||||
opt: c.opt,
|
||||
|
||||
newConn: func(ctx context.Context, channels []string) (*pool.Conn, error) {
|
||||
return c.newConn(ctx)
|
||||
},
|
||||
closeConn: c.connPool.CloseConn,
|
||||
}
|
||||
pubsub.init()
|
||||
return pubsub
|
||||
}
|
||||
|
||||
// Ping is used to test if a connection is still alive, or to
|
||||
// measure latency.
|
||||
func (c *SentinelClient) Ping(ctx context.Context) *StringCmd {
|
||||
cmd := NewStringCmd(ctx, "ping")
|
||||
_ = c.Process(ctx, cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
// Subscribe subscribes the client to the specified channels.
|
||||
// Channels can be omitted to create empty subscription.
|
||||
func (c *SentinelClient) Subscribe(ctx context.Context, channels ...string) *PubSub {
|
||||
pubsub := c.pubSub()
|
||||
if len(channels) > 0 {
|
||||
_ = pubsub.Subscribe(ctx, channels...)
|
||||
}
|
||||
return pubsub
|
||||
}
|
||||
|
||||
// PSubscribe subscribes the client to the given patterns.
|
||||
// Patterns can be omitted to create empty subscription.
|
||||
func (c *SentinelClient) PSubscribe(ctx context.Context, channels ...string) *PubSub {
|
||||
pubsub := c.pubSub()
|
||||
if len(channels) > 0 {
|
||||
_ = pubsub.PSubscribe(ctx, channels...)
|
||||
}
|
||||
return pubsub
|
||||
}
|
||||
|
||||
func (c *SentinelClient) GetMasterAddrByName(ctx context.Context, name string) *StringSliceCmd {
|
||||
cmd := NewStringSliceCmd(ctx, "sentinel", "get-master-addr-by-name", name)
|
||||
_ = c.Process(ctx, cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
func (c *SentinelClient) Sentinels(ctx context.Context, name string) *SliceCmd {
|
||||
cmd := NewSliceCmd(ctx, "sentinel", "sentinels", name)
|
||||
_ = c.Process(ctx, cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
// Failover forces a failover as if the master was not reachable, and without
|
||||
// asking for agreement to other Sentinels.
|
||||
func (c *SentinelClient) Failover(ctx context.Context, name string) *StatusCmd {
|
||||
cmd := NewStatusCmd(ctx, "sentinel", "failover", name)
|
||||
_ = c.Process(ctx, cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
// Reset resets all the masters with matching name. The pattern argument is a
|
||||
// glob-style pattern. The reset process clears any previous state in a master
|
||||
// (including a failover in progress), and removes every slave and sentinel
|
||||
// already discovered and associated with the master.
|
||||
func (c *SentinelClient) Reset(ctx context.Context, pattern string) *IntCmd {
|
||||
cmd := NewIntCmd(ctx, "sentinel", "reset", pattern)
|
||||
_ = c.Process(ctx, cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
// FlushConfig forces Sentinel to rewrite its configuration on disk, including
|
||||
// the current Sentinel state.
|
||||
func (c *SentinelClient) FlushConfig(ctx context.Context) *StatusCmd {
|
||||
cmd := NewStatusCmd(ctx, "sentinel", "flushconfig")
|
||||
_ = c.Process(ctx, cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
// Master shows the state and info of the specified master.
|
||||
func (c *SentinelClient) Master(ctx context.Context, name string) *StringStringMapCmd {
|
||||
cmd := NewStringStringMapCmd(ctx, "sentinel", "master", name)
|
||||
_ = c.Process(ctx, cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
// Masters shows a list of monitored masters and their state.
|
||||
func (c *SentinelClient) Masters(ctx context.Context) *SliceCmd {
|
||||
cmd := NewSliceCmd(ctx, "sentinel", "masters")
|
||||
_ = c.Process(ctx, cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
// Slaves shows a list of slaves for the specified master and their state.
|
||||
func (c *SentinelClient) Slaves(ctx context.Context, name string) *SliceCmd {
|
||||
cmd := NewSliceCmd(ctx, "sentinel", "slaves", name)
|
||||
_ = c.Process(ctx, cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
// CkQuorum checks if the current Sentinel configuration is able to reach the
|
||||
// quorum needed to failover a master, and the majority needed to authorize the
|
||||
// failover. This command should be used in monitoring systems to check if a
|
||||
// Sentinel deployment is ok.
|
||||
func (c *SentinelClient) CkQuorum(ctx context.Context, name string) *StringCmd {
|
||||
cmd := NewStringCmd(ctx, "sentinel", "ckquorum", name)
|
||||
_ = c.Process(ctx, cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
// Monitor tells the Sentinel to start monitoring a new master with the specified
|
||||
// name, ip, port, and quorum.
|
||||
func (c *SentinelClient) Monitor(ctx context.Context, name, ip, port, quorum string) *StringCmd {
|
||||
cmd := NewStringCmd(ctx, "sentinel", "monitor", name, ip, port, quorum)
|
||||
_ = c.Process(ctx, cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
// Set is used in order to change configuration parameters of a specific master.
|
||||
func (c *SentinelClient) Set(ctx context.Context, name, option, value string) *StringCmd {
|
||||
cmd := NewStringCmd(ctx, "sentinel", "set", name, option, value)
|
||||
_ = c.Process(ctx, cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
// Remove is used in order to remove the specified master: the master will no
|
||||
// longer be monitored, and will totally be removed from the internal state of
|
||||
// the Sentinel.
|
||||
func (c *SentinelClient) Remove(ctx context.Context, name string) *StringCmd {
|
||||
cmd := NewStringCmd(ctx, "sentinel", "remove", name)
|
||||
_ = c.Process(ctx, cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
type sentinelFailover struct {
|
||||
opt *FailoverOptions
|
||||
|
||||
sentinelAddrs []string
|
||||
|
||||
onFailover func(ctx context.Context, addr string)
|
||||
onUpdate func(ctx context.Context)
|
||||
|
||||
mu sync.RWMutex
|
||||
_masterAddr string
|
||||
sentinel *SentinelClient
|
||||
pubsub *PubSub
|
||||
}
|
||||
|
||||
func (c *sentinelFailover) Close() error {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
if c.sentinel != nil {
|
||||
return c.closeSentinel()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *sentinelFailover) closeSentinel() error {
|
||||
firstErr := c.pubsub.Close()
|
||||
c.pubsub = nil
|
||||
|
||||
err := c.sentinel.Close()
|
||||
if err != nil && firstErr == nil {
|
||||
firstErr = err
|
||||
}
|
||||
c.sentinel = nil
|
||||
|
||||
return firstErr
|
||||
}
|
||||
|
||||
func (c *sentinelFailover) RandomSlaveAddr(ctx context.Context) (string, error) {
|
||||
if c.opt == nil {
|
||||
return "", errors.New("opt is nil")
|
||||
}
|
||||
|
||||
addresses, err := c.slaveAddrs(ctx, false)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if len(addresses) == 0 && c.opt.UseDisconnectedSlaves {
|
||||
addresses, err = c.slaveAddrs(ctx, true)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
if len(addresses) == 0 {
|
||||
return c.MasterAddr(ctx)
|
||||
}
|
||||
return addresses[rand.Intn(len(addresses))], nil
|
||||
}
|
||||
|
||||
func (c *sentinelFailover) MasterAddr(ctx context.Context) (string, error) {
|
||||
c.mu.RLock()
|
||||
sentinel := c.sentinel
|
||||
c.mu.RUnlock()
|
||||
|
||||
if sentinel != nil {
|
||||
addr := c.getMasterAddr(ctx, sentinel)
|
||||
if addr != "" {
|
||||
return addr, nil
|
||||
}
|
||||
}
|
||||
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
if c.sentinel != nil {
|
||||
addr := c.getMasterAddr(ctx, c.sentinel)
|
||||
if addr != "" {
|
||||
return addr, nil
|
||||
}
|
||||
_ = c.closeSentinel()
|
||||
}
|
||||
|
||||
for i, sentinelAddr := range c.sentinelAddrs {
|
||||
sentinel := NewSentinelClient(c.opt.sentinelOptions(sentinelAddr))
|
||||
|
||||
masterAddr, err := sentinel.GetMasterAddrByName(ctx, c.opt.MasterName).Result()
|
||||
if err != nil {
|
||||
internal.Logger.Printf(ctx, "sentinel: GetMasterAddrByName master=%q failed: %s",
|
||||
c.opt.MasterName, err)
|
||||
_ = sentinel.Close()
|
||||
continue
|
||||
}
|
||||
|
||||
// Push working sentinel to the top.
|
||||
c.sentinelAddrs[0], c.sentinelAddrs[i] = c.sentinelAddrs[i], c.sentinelAddrs[0]
|
||||
c.setSentinel(ctx, sentinel)
|
||||
|
||||
addr := net.JoinHostPort(masterAddr[0], masterAddr[1])
|
||||
return addr, nil
|
||||
}
|
||||
|
||||
return "", errors.New("redis: all sentinels specified in configuration are unreachable")
|
||||
}
|
||||
|
||||
func (c *sentinelFailover) slaveAddrs(ctx context.Context, useDisconnected bool) ([]string, error) {
|
||||
c.mu.RLock()
|
||||
sentinel := c.sentinel
|
||||
c.mu.RUnlock()
|
||||
|
||||
if sentinel != nil {
|
||||
addrs := c.getSlaveAddrs(ctx, sentinel)
|
||||
if len(addrs) > 0 {
|
||||
return addrs, nil
|
||||
}
|
||||
}
|
||||
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
if c.sentinel != nil {
|
||||
addrs := c.getSlaveAddrs(ctx, c.sentinel)
|
||||
if len(addrs) > 0 {
|
||||
return addrs, nil
|
||||
}
|
||||
_ = c.closeSentinel()
|
||||
}
|
||||
|
||||
var sentinelReachable bool
|
||||
|
||||
for i, sentinelAddr := range c.sentinelAddrs {
|
||||
sentinel := NewSentinelClient(c.opt.sentinelOptions(sentinelAddr))
|
||||
|
||||
slaves, err := sentinel.Slaves(ctx, c.opt.MasterName).Result()
|
||||
if err != nil {
|
||||
internal.Logger.Printf(ctx, "sentinel: Slaves master=%q failed: %s",
|
||||
c.opt.MasterName, err)
|
||||
_ = sentinel.Close()
|
||||
continue
|
||||
}
|
||||
sentinelReachable = true
|
||||
addrs := parseSlaveAddrs(slaves, useDisconnected)
|
||||
if len(addrs) == 0 {
|
||||
continue
|
||||
}
|
||||
// Push working sentinel to the top.
|
||||
c.sentinelAddrs[0], c.sentinelAddrs[i] = c.sentinelAddrs[i], c.sentinelAddrs[0]
|
||||
c.setSentinel(ctx, sentinel)
|
||||
|
||||
return addrs, nil
|
||||
}
|
||||
|
||||
if sentinelReachable {
|
||||
return []string{}, nil
|
||||
}
|
||||
return []string{}, errors.New("redis: all sentinels specified in configuration are unreachable")
|
||||
}
|
||||
|
||||
func (c *sentinelFailover) getMasterAddr(ctx context.Context, sentinel *SentinelClient) string {
|
||||
addr, err := sentinel.GetMasterAddrByName(ctx, c.opt.MasterName).Result()
|
||||
if err != nil {
|
||||
internal.Logger.Printf(ctx, "sentinel: GetMasterAddrByName name=%q failed: %s",
|
||||
c.opt.MasterName, err)
|
||||
return ""
|
||||
}
|
||||
return net.JoinHostPort(addr[0], addr[1])
|
||||
}
|
||||
|
||||
func (c *sentinelFailover) getSlaveAddrs(ctx context.Context, sentinel *SentinelClient) []string {
|
||||
addrs, err := sentinel.Slaves(ctx, c.opt.MasterName).Result()
|
||||
if err != nil {
|
||||
internal.Logger.Printf(ctx, "sentinel: Slaves name=%q failed: %s",
|
||||
c.opt.MasterName, err)
|
||||
return []string{}
|
||||
}
|
||||
return parseSlaveAddrs(addrs, false)
|
||||
}
|
||||
|
||||
func parseSlaveAddrs(addrs []interface{}, keepDisconnected bool) []string {
|
||||
nodes := make([]string, 0, len(addrs))
|
||||
for _, node := range addrs {
|
||||
ip := ""
|
||||
port := ""
|
||||
flags := []string{}
|
||||
lastkey := ""
|
||||
isDown := false
|
||||
|
||||
for _, key := range node.([]interface{}) {
|
||||
switch lastkey {
|
||||
case "ip":
|
||||
ip = key.(string)
|
||||
case "port":
|
||||
port = key.(string)
|
||||
case "flags":
|
||||
flags = strings.Split(key.(string), ",")
|
||||
}
|
||||
lastkey = key.(string)
|
||||
}
|
||||
|
||||
for _, flag := range flags {
|
||||
switch flag {
|
||||
case "s_down", "o_down":
|
||||
isDown = true
|
||||
case "disconnected":
|
||||
if !keepDisconnected {
|
||||
isDown = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !isDown {
|
||||
nodes = append(nodes, net.JoinHostPort(ip, port))
|
||||
}
|
||||
}
|
||||
|
||||
return nodes
|
||||
}
|
||||
|
||||
func (c *sentinelFailover) trySwitchMaster(ctx context.Context, addr string) {
|
||||
c.mu.RLock()
|
||||
currentAddr := c._masterAddr //nolint:ifshort
|
||||
c.mu.RUnlock()
|
||||
|
||||
if addr == currentAddr {
|
||||
return
|
||||
}
|
||||
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
if addr == c._masterAddr {
|
||||
return
|
||||
}
|
||||
c._masterAddr = addr
|
||||
|
||||
internal.Logger.Printf(ctx, "sentinel: new master=%q addr=%q",
|
||||
c.opt.MasterName, addr)
|
||||
if c.onFailover != nil {
|
||||
c.onFailover(ctx, addr)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *sentinelFailover) setSentinel(ctx context.Context, sentinel *SentinelClient) {
|
||||
if c.sentinel != nil {
|
||||
panic("not reached")
|
||||
}
|
||||
c.sentinel = sentinel
|
||||
c.discoverSentinels(ctx)
|
||||
|
||||
c.pubsub = sentinel.Subscribe(ctx, "+switch-master", "+slave-reconf-done")
|
||||
go c.listen(c.pubsub)
|
||||
}
|
||||
|
||||
func (c *sentinelFailover) discoverSentinels(ctx context.Context) {
|
||||
sentinels, err := c.sentinel.Sentinels(ctx, c.opt.MasterName).Result()
|
||||
if err != nil {
|
||||
internal.Logger.Printf(ctx, "sentinel: Sentinels master=%q failed: %s", c.opt.MasterName, err)
|
||||
return
|
||||
}
|
||||
for _, sentinel := range sentinels {
|
||||
vals := sentinel.([]interface{})
|
||||
var ip, port string
|
||||
for i := 0; i < len(vals); i += 2 {
|
||||
key := vals[i].(string)
|
||||
switch key {
|
||||
case "ip":
|
||||
ip = vals[i+1].(string)
|
||||
case "port":
|
||||
port = vals[i+1].(string)
|
||||
}
|
||||
}
|
||||
if ip != "" && port != "" {
|
||||
sentinelAddr := net.JoinHostPort(ip, port)
|
||||
if !contains(c.sentinelAddrs, sentinelAddr) {
|
||||
internal.Logger.Printf(ctx, "sentinel: discovered new sentinel=%q for master=%q",
|
||||
sentinelAddr, c.opt.MasterName)
|
||||
c.sentinelAddrs = append(c.sentinelAddrs, sentinelAddr)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *sentinelFailover) listen(pubsub *PubSub) {
|
||||
ctx := context.TODO()
|
||||
|
||||
if c.onUpdate != nil {
|
||||
c.onUpdate(ctx)
|
||||
}
|
||||
|
||||
ch := pubsub.Channel()
|
||||
for msg := range ch {
|
||||
if msg.Channel == "+switch-master" {
|
||||
parts := strings.Split(msg.Payload, " ")
|
||||
if parts[0] != c.opt.MasterName {
|
||||
internal.Logger.Printf(pubsub.getContext(), "sentinel: ignore addr for master=%q", parts[0])
|
||||
continue
|
||||
}
|
||||
addr := net.JoinHostPort(parts[3], parts[4])
|
||||
c.trySwitchMaster(pubsub.getContext(), addr)
|
||||
}
|
||||
|
||||
if c.onUpdate != nil {
|
||||
c.onUpdate(ctx)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func contains(slice []string, str string) bool {
|
||||
for _, s := range slice {
|
||||
if s == str {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
// NewFailoverClusterClient returns a client that supports routing read-only commands
|
||||
// to a slave node.
|
||||
func NewFailoverClusterClient(failoverOpt *FailoverOptions) *ClusterClient {
|
||||
sentinelAddrs := make([]string, len(failoverOpt.SentinelAddrs))
|
||||
copy(sentinelAddrs, failoverOpt.SentinelAddrs)
|
||||
|
||||
failover := &sentinelFailover{
|
||||
opt: failoverOpt,
|
||||
sentinelAddrs: sentinelAddrs,
|
||||
}
|
||||
|
||||
opt := failoverOpt.clusterOptions()
|
||||
opt.ClusterSlots = func(ctx context.Context) ([]ClusterSlot, error) {
|
||||
masterAddr, err := failover.MasterAddr(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
nodes := []ClusterNode{{
|
||||
Addr: masterAddr,
|
||||
}}
|
||||
|
||||
slaveAddrs, err := failover.slaveAddrs(ctx, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, slaveAddr := range slaveAddrs {
|
||||
nodes = append(nodes, ClusterNode{
|
||||
Addr: slaveAddr,
|
||||
})
|
||||
}
|
||||
|
||||
slots := []ClusterSlot{
|
||||
{
|
||||
Start: 0,
|
||||
End: 16383,
|
||||
Nodes: nodes,
|
||||
},
|
||||
}
|
||||
return slots, nil
|
||||
}
|
||||
|
||||
c := NewClusterClient(opt)
|
||||
|
||||
failover.mu.Lock()
|
||||
failover.onUpdate = func(ctx context.Context) {
|
||||
c.ReloadState(ctx)
|
||||
}
|
||||
failover.mu.Unlock()
|
||||
|
||||
return c
|
||||
}
|
||||
|
|
@ -0,0 +1,149 @@
|
|||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/go-redis/redis/v8/internal/pool"
|
||||
"github.com/go-redis/redis/v8/internal/proto"
|
||||
)
|
||||
|
||||
// TxFailedErr transaction redis failed.
|
||||
const TxFailedErr = proto.RedisError("redis: transaction failed")
|
||||
|
||||
// Tx implements Redis transactions as described in
|
||||
// http://redis.io/topics/transactions. It's NOT safe for concurrent use
|
||||
// by multiple goroutines, because Exec resets list of watched keys.
|
||||
//
|
||||
// If you don't need WATCH, use Pipeline instead.
|
||||
type Tx struct {
|
||||
baseClient
|
||||
cmdable
|
||||
statefulCmdable
|
||||
hooks
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
func (c *Client) newTx(ctx context.Context) *Tx {
|
||||
tx := Tx{
|
||||
baseClient: baseClient{
|
||||
opt: c.opt,
|
||||
connPool: pool.NewStickyConnPool(c.connPool),
|
||||
},
|
||||
hooks: c.hooks.clone(),
|
||||
ctx: ctx,
|
||||
}
|
||||
tx.init()
|
||||
return &tx
|
||||
}
|
||||
|
||||
func (c *Tx) init() {
|
||||
c.cmdable = c.Process
|
||||
c.statefulCmdable = c.Process
|
||||
}
|
||||
|
||||
func (c *Tx) Context() context.Context {
|
||||
return c.ctx
|
||||
}
|
||||
|
||||
func (c *Tx) WithContext(ctx context.Context) *Tx {
|
||||
if ctx == nil {
|
||||
panic("nil context")
|
||||
}
|
||||
clone := *c
|
||||
clone.init()
|
||||
clone.hooks.lock()
|
||||
clone.ctx = ctx
|
||||
return &clone
|
||||
}
|
||||
|
||||
func (c *Tx) Process(ctx context.Context, cmd Cmder) error {
|
||||
return c.hooks.process(ctx, cmd, c.baseClient.process)
|
||||
}
|
||||
|
||||
// Watch prepares a transaction and marks the keys to be watched
|
||||
// for conditional execution if there are any keys.
|
||||
//
|
||||
// The transaction is automatically closed when fn exits.
|
||||
func (c *Client) Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error {
|
||||
tx := c.newTx(ctx)
|
||||
defer tx.Close(ctx)
|
||||
if len(keys) > 0 {
|
||||
if err := tx.Watch(ctx, keys...).Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return fn(tx)
|
||||
}
|
||||
|
||||
// Close closes the transaction, releasing any open resources.
|
||||
func (c *Tx) Close(ctx context.Context) error {
|
||||
_ = c.Unwatch(ctx).Err()
|
||||
return c.baseClient.Close()
|
||||
}
|
||||
|
||||
// Watch marks the keys to be watched for conditional execution
|
||||
// of a transaction.
|
||||
func (c *Tx) Watch(ctx context.Context, keys ...string) *StatusCmd {
|
||||
args := make([]interface{}, 1+len(keys))
|
||||
args[0] = "watch"
|
||||
for i, key := range keys {
|
||||
args[1+i] = key
|
||||
}
|
||||
cmd := NewStatusCmd(ctx, args...)
|
||||
_ = c.Process(ctx, cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
// Unwatch flushes all the previously watched keys for a transaction.
|
||||
func (c *Tx) Unwatch(ctx context.Context, keys ...string) *StatusCmd {
|
||||
args := make([]interface{}, 1+len(keys))
|
||||
args[0] = "unwatch"
|
||||
for i, key := range keys {
|
||||
args[1+i] = key
|
||||
}
|
||||
cmd := NewStatusCmd(ctx, args...)
|
||||
_ = c.Process(ctx, cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
// Pipeline creates a pipeline. Usually it is more convenient to use Pipelined.
|
||||
func (c *Tx) Pipeline() Pipeliner {
|
||||
pipe := Pipeline{
|
||||
ctx: c.ctx,
|
||||
exec: func(ctx context.Context, cmds []Cmder) error {
|
||||
return c.hooks.processPipeline(ctx, cmds, c.baseClient.processPipeline)
|
||||
},
|
||||
}
|
||||
pipe.init()
|
||||
return &pipe
|
||||
}
|
||||
|
||||
// Pipelined executes commands queued in the fn outside of the transaction.
|
||||
// Use TxPipelined if you need transactional behavior.
|
||||
func (c *Tx) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
|
||||
return c.Pipeline().Pipelined(ctx, fn)
|
||||
}
|
||||
|
||||
// TxPipelined executes commands queued in the fn in the transaction.
|
||||
//
|
||||
// When using WATCH, EXEC will execute commands only if the watched keys
|
||||
// were not modified, allowing for a check-and-set mechanism.
|
||||
//
|
||||
// Exec always returns list of commands. If transaction fails
|
||||
// TxFailedErr is returned. Otherwise Exec returns an error of the first
|
||||
// failed command or nil.
|
||||
func (c *Tx) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
|
||||
return c.TxPipeline().Pipelined(ctx, fn)
|
||||
}
|
||||
|
||||
// TxPipeline creates a pipeline. Usually it is more convenient to use TxPipelined.
|
||||
func (c *Tx) TxPipeline() Pipeliner {
|
||||
pipe := Pipeline{
|
||||
ctx: c.ctx,
|
||||
exec: func(ctx context.Context, cmds []Cmder) error {
|
||||
return c.hooks.processTxPipeline(ctx, cmds, c.baseClient.processTxPipeline)
|
||||
},
|
||||
}
|
||||
pipe.init()
|
||||
return &pipe
|
||||
}
|
||||
|
|
@ -0,0 +1,213 @@
|
|||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"net"
|
||||
"time"
|
||||
)
|
||||
|
||||
// UniversalOptions information is required by UniversalClient to establish
|
||||
// connections.
|
||||
type UniversalOptions struct {
|
||||
// Either a single address or a seed list of host:port addresses
|
||||
// of cluster/sentinel nodes.
|
||||
Addrs []string
|
||||
|
||||
// Database to be selected after connecting to the server.
|
||||
// Only single-node and failover clients.
|
||||
DB int
|
||||
|
||||
// Common options.
|
||||
|
||||
Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
|
||||
OnConnect func(ctx context.Context, cn *Conn) error
|
||||
|
||||
Username string
|
||||
Password string
|
||||
SentinelPassword string
|
||||
|
||||
MaxRetries int
|
||||
MinRetryBackoff time.Duration
|
||||
MaxRetryBackoff time.Duration
|
||||
|
||||
DialTimeout time.Duration
|
||||
ReadTimeout time.Duration
|
||||
WriteTimeout time.Duration
|
||||
|
||||
// PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO).
|
||||
PoolFIFO bool
|
||||
|
||||
PoolSize int
|
||||
MinIdleConns int
|
||||
MaxConnAge time.Duration
|
||||
PoolTimeout time.Duration
|
||||
IdleTimeout time.Duration
|
||||
IdleCheckFrequency time.Duration
|
||||
|
||||
TLSConfig *tls.Config
|
||||
|
||||
// Only cluster clients.
|
||||
|
||||
MaxRedirects int
|
||||
ReadOnly bool
|
||||
RouteByLatency bool
|
||||
RouteRandomly bool
|
||||
|
||||
// The sentinel master name.
|
||||
// Only failover clients.
|
||||
|
||||
MasterName string
|
||||
}
|
||||
|
||||
// Cluster returns cluster options created from the universal options.
|
||||
func (o *UniversalOptions) Cluster() *ClusterOptions {
|
||||
if len(o.Addrs) == 0 {
|
||||
o.Addrs = []string{"127.0.0.1:6379"}
|
||||
}
|
||||
|
||||
return &ClusterOptions{
|
||||
Addrs: o.Addrs,
|
||||
Dialer: o.Dialer,
|
||||
OnConnect: o.OnConnect,
|
||||
|
||||
Username: o.Username,
|
||||
Password: o.Password,
|
||||
|
||||
MaxRedirects: o.MaxRedirects,
|
||||
ReadOnly: o.ReadOnly,
|
||||
RouteByLatency: o.RouteByLatency,
|
||||
RouteRandomly: o.RouteRandomly,
|
||||
|
||||
MaxRetries: o.MaxRetries,
|
||||
MinRetryBackoff: o.MinRetryBackoff,
|
||||
MaxRetryBackoff: o.MaxRetryBackoff,
|
||||
|
||||
DialTimeout: o.DialTimeout,
|
||||
ReadTimeout: o.ReadTimeout,
|
||||
WriteTimeout: o.WriteTimeout,
|
||||
PoolFIFO: o.PoolFIFO,
|
||||
PoolSize: o.PoolSize,
|
||||
MinIdleConns: o.MinIdleConns,
|
||||
MaxConnAge: o.MaxConnAge,
|
||||
PoolTimeout: o.PoolTimeout,
|
||||
IdleTimeout: o.IdleTimeout,
|
||||
IdleCheckFrequency: o.IdleCheckFrequency,
|
||||
|
||||
TLSConfig: o.TLSConfig,
|
||||
}
|
||||
}
|
||||
|
||||
// Failover returns failover options created from the universal options.
|
||||
func (o *UniversalOptions) Failover() *FailoverOptions {
|
||||
if len(o.Addrs) == 0 {
|
||||
o.Addrs = []string{"127.0.0.1:26379"}
|
||||
}
|
||||
|
||||
return &FailoverOptions{
|
||||
SentinelAddrs: o.Addrs,
|
||||
MasterName: o.MasterName,
|
||||
|
||||
Dialer: o.Dialer,
|
||||
OnConnect: o.OnConnect,
|
||||
|
||||
DB: o.DB,
|
||||
Username: o.Username,
|
||||
Password: o.Password,
|
||||
SentinelPassword: o.SentinelPassword,
|
||||
|
||||
MaxRetries: o.MaxRetries,
|
||||
MinRetryBackoff: o.MinRetryBackoff,
|
||||
MaxRetryBackoff: o.MaxRetryBackoff,
|
||||
|
||||
DialTimeout: o.DialTimeout,
|
||||
ReadTimeout: o.ReadTimeout,
|
||||
WriteTimeout: o.WriteTimeout,
|
||||
|
||||
PoolFIFO: o.PoolFIFO,
|
||||
PoolSize: o.PoolSize,
|
||||
MinIdleConns: o.MinIdleConns,
|
||||
MaxConnAge: o.MaxConnAge,
|
||||
PoolTimeout: o.PoolTimeout,
|
||||
IdleTimeout: o.IdleTimeout,
|
||||
IdleCheckFrequency: o.IdleCheckFrequency,
|
||||
|
||||
TLSConfig: o.TLSConfig,
|
||||
}
|
||||
}
|
||||
|
||||
// Simple returns basic options created from the universal options.
|
||||
func (o *UniversalOptions) Simple() *Options {
|
||||
addr := "127.0.0.1:6379"
|
||||
if len(o.Addrs) > 0 {
|
||||
addr = o.Addrs[0]
|
||||
}
|
||||
|
||||
return &Options{
|
||||
Addr: addr,
|
||||
Dialer: o.Dialer,
|
||||
OnConnect: o.OnConnect,
|
||||
|
||||
DB: o.DB,
|
||||
Username: o.Username,
|
||||
Password: o.Password,
|
||||
|
||||
MaxRetries: o.MaxRetries,
|
||||
MinRetryBackoff: o.MinRetryBackoff,
|
||||
MaxRetryBackoff: o.MaxRetryBackoff,
|
||||
|
||||
DialTimeout: o.DialTimeout,
|
||||
ReadTimeout: o.ReadTimeout,
|
||||
WriteTimeout: o.WriteTimeout,
|
||||
|
||||
PoolFIFO: o.PoolFIFO,
|
||||
PoolSize: o.PoolSize,
|
||||
MinIdleConns: o.MinIdleConns,
|
||||
MaxConnAge: o.MaxConnAge,
|
||||
PoolTimeout: o.PoolTimeout,
|
||||
IdleTimeout: o.IdleTimeout,
|
||||
IdleCheckFrequency: o.IdleCheckFrequency,
|
||||
|
||||
TLSConfig: o.TLSConfig,
|
||||
}
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------
|
||||
|
||||
// UniversalClient is an abstract client which - based on the provided options -
|
||||
// represents either a ClusterClient, a FailoverClient, or a single-node Client.
|
||||
// This can be useful for testing cluster-specific applications locally or having different
|
||||
// clients in different environments.
|
||||
type UniversalClient interface {
|
||||
Cmdable
|
||||
Context() context.Context
|
||||
AddHook(Hook)
|
||||
Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error
|
||||
Do(ctx context.Context, args ...interface{}) *Cmd
|
||||
Process(ctx context.Context, cmd Cmder) error
|
||||
Subscribe(ctx context.Context, channels ...string) *PubSub
|
||||
PSubscribe(ctx context.Context, channels ...string) *PubSub
|
||||
Close() error
|
||||
PoolStats() *PoolStats
|
||||
}
|
||||
|
||||
var (
|
||||
_ UniversalClient = (*Client)(nil)
|
||||
_ UniversalClient = (*ClusterClient)(nil)
|
||||
_ UniversalClient = (*Ring)(nil)
|
||||
)
|
||||
|
||||
// NewUniversalClient returns a new multi client. The type of the returned client depends
|
||||
// on the following conditions:
|
||||
//
|
||||
// 1. If the MasterName option is specified, a sentinel-backed FailoverClient is returned.
|
||||
// 2. if the number of Addrs is two or more, a ClusterClient is returned.
|
||||
// 3. Otherwise, a single-node Client is returned.
|
||||
func NewUniversalClient(opts *UniversalOptions) UniversalClient {
|
||||
if opts.MasterName != "" {
|
||||
return NewFailoverClient(opts.Failover())
|
||||
} else if len(opts.Addrs) > 1 {
|
||||
return NewClusterClient(opts.Cluster())
|
||||
}
|
||||
return NewClient(opts.Simple())
|
||||
}
|
||||
|
|
@ -0,0 +1,6 @@
|
|||
package redis
|
||||
|
||||
// Version is the current release version.
|
||||
func Version() string {
|
||||
return "8.11.4"
|
||||
}
|
||||
|
|
@ -2,8 +2,10 @@
|
|||
github.com/beeker1121/goque
|
||||
# github.com/beorn7/perks v1.0.1
|
||||
github.com/beorn7/perks/quantile
|
||||
# github.com/cespare/xxhash/v2 v2.1.1
|
||||
# github.com/cespare/xxhash/v2 v2.1.2
|
||||
github.com/cespare/xxhash/v2
|
||||
# github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f
|
||||
github.com/dgryski/go-rendezvous
|
||||
# github.com/eggsampler/acme/v3 v3.0.0
|
||||
github.com/eggsampler/acme/v3
|
||||
# github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a
|
||||
|
|
@ -16,9 +18,18 @@ github.com/facebookgo/muster
|
|||
github.com/felixge/httpsnoop
|
||||
# github.com/go-gorp/gorp/v3 v3.0.2
|
||||
github.com/go-gorp/gorp/v3
|
||||
# github.com/go-redis/redis/v8 v8.11.4
|
||||
github.com/go-redis/redis/v8
|
||||
github.com/go-redis/redis/v8/internal
|
||||
github.com/go-redis/redis/v8/internal/hashtag
|
||||
github.com/go-redis/redis/v8/internal/hscan
|
||||
github.com/go-redis/redis/v8/internal/pool
|
||||
github.com/go-redis/redis/v8/internal/proto
|
||||
github.com/go-redis/redis/v8/internal/rand
|
||||
github.com/go-redis/redis/v8/internal/util
|
||||
# github.com/go-sql-driver/mysql v1.5.0
|
||||
github.com/go-sql-driver/mysql
|
||||
# github.com/golang/protobuf v1.5.0
|
||||
# github.com/golang/protobuf v1.5.2
|
||||
github.com/golang/protobuf/proto
|
||||
github.com/golang/protobuf/ptypes
|
||||
github.com/golang/protobuf/ptypes/any
|
||||
|
|
|
|||
Loading…
Reference in New Issue