* add redis queue * finished indexer redis queue * add redis vendor * fix vet * Update docs/content/doc/advanced/config-cheat-sheet.en-us.md Co-Authored-By: lunny <xiaolunwen@gmail.com> * switch to go mod * Update required changes for new logging func signaturestags/v1.21.12.1
| @@ -264,6 +264,8 @@ ISSUE_INDEXER_QUEUE_TYPE = levelqueue | |||
| ; When ISSUE_INDEXER_QUEUE_TYPE is levelqueue, this will be the queue will be saved path, | |||
| ; default is indexers/issues.queue | |||
| ISSUE_INDEXER_QUEUE_DIR = indexers/issues.queue | |||
| ; When `ISSUE_INDEXER_QUEUE_TYPE` is `redis`, this will store the redis connection string. | |||
| ISSUE_INDEXER_QUEUE_CONN_STR = "addrs=127.0.0.1:6379 db=0" | |||
| ; Batch queue number, default is 20 | |||
| ISSUE_INDEXER_QUEUE_BATCH_NUMBER = 20 | |||
| @@ -158,9 +158,10 @@ Values containing `#` or `;` must be quoted using `` ` `` or `"""`. | |||
| - `ISSUE_INDEXER_TYPE`: **bleve**: Issue indexer type, currently support: bleve or db, if it's db, below issue indexer item will be invalid. | |||
| - `ISSUE_INDEXER_PATH`: **indexers/issues.bleve**: Index file used for issue search. | |||
| - `ISSUE_INDEXER_QUEUE_TYPE`: **levelqueue**: Issue indexer queue, currently support: channel or levelqueue | |||
| - `ISSUE_INDEXER_QUEUE_DIR`: **indexers/issues.queue**: When ISSUE_INDEXER_QUEUE_TYPE is levelqueue, this will be the queue will be saved path | |||
| - `ISSUE_INDEXER_QUEUE_BATCH_NUMBER`: **20**: Batch queue number | |||
| - `ISSUE_INDEXER_QUEUE_TYPE`: **levelqueue**: Issue indexer queue, currently supports:`channel`, `levelqueue`, `redis`. | |||
| - `ISSUE_INDEXER_QUEUE_DIR`: **indexers/issues.queue**: When `ISSUE_INDEXER_QUEUE_TYPE` is `levelqueue`, this will be the queue will be saved path. | |||
| - `ISSUE_INDEXER_QUEUE_CONN_STR`: **addrs=127.0.0.1:6379 db=0**: When `ISSUE_INDEXER_QUEUE_TYPE` is `redis`, this will store the redis connection string. | |||
| - `ISSUE_INDEXER_QUEUE_BATCH_NUMBER`: **20**: Batch queue number. | |||
| - `REPO_INDEXER_ENABLED`: **false**: Enables code search (uses a lot of disk space, about 6 times more than the repository size). | |||
| - `REPO_INDEXER_PATH`: **indexers/repos.bleve**: Index file used for code search. | |||
| @@ -82,13 +82,13 @@ menu: | |||
| - `PATH`: Tidb 或者 SQLite3 数据文件存放路径。 | |||
| - `LOG_SQL`: **true**: 显示生成的SQL,默认为真。 | |||
| ## Indexer (`indexer`) | |||
| - `ISSUE_INDEXER_TYPE`: **bleve**: 工单索引类型,当前支持 `bleve` 或 `db`,当为 `db` 时其它工单索引项可不用设置。 | |||
| - `ISSUE_INDEXER_PATH`: **indexers/issues.bleve**: 工单索引文件存放路径,当索引类型为 `bleve` 时有效。 | |||
| - `ISSUE_INDEXER_QUEUE_TYPE`: **levelqueue**: 工单索引队列类型,当前支持 `channel` 或 `levelqueue`。 | |||
| - `ISSUE_INDEXER_QUEUE_TYPE`: **levelqueue**: 工单索引队列类型,当前支持 `channel`, `levelqueue` 或 `redis`。 | |||
| - `ISSUE_INDEXER_QUEUE_DIR`: **indexers/issues.queue**: 当 `ISSUE_INDEXER_QUEUE_TYPE` 为 `levelqueue` 时,保存索引队列的磁盘路径。 | |||
| - `ISSUE_INDEXER_QUEUE_CONN_STR`: **addrs=127.0.0.1:6379 db=0**: 当 `ISSUE_INDEXER_QUEUE_TYPE` 为 `redis` 时,保存Redis队列的连接字符串。 | |||
| - `ISSUE_INDEXER_QUEUE_BATCH_NUMBER`: **20**: 队列处理中批量提交数量。 | |||
| - `REPO_INDEXER_ENABLED`: **false**: 是否启用代码搜索(启用后会占用比较大的磁盘空间)。 | |||
| @@ -54,6 +54,7 @@ require ( | |||
| github.com/go-macaron/inject v0.0.0-20160627170012-d8a0b8677191 | |||
| github.com/go-macaron/session v0.0.0-20190131233854-0a0a789bf193 | |||
| github.com/go-macaron/toolbox v0.0.0-20180818072302-a77f45a7ce90 | |||
| github.com/go-redis/redis v6.15.2+incompatible | |||
| github.com/go-sql-driver/mysql v1.4.0 | |||
| github.com/go-xorm/builder v0.3.3 | |||
| github.com/go-xorm/core v0.6.0 | |||
| @@ -116,6 +116,8 @@ github.com/go-macaron/session v0.0.0-20190131233854-0a0a789bf193 h1:z/nqwd+ql/r6 | |||
| github.com/go-macaron/session v0.0.0-20190131233854-0a0a789bf193/go.mod h1:ScEJm9Gk+ez5JJTml5WlBIqavAfuE5nF8e4Gvyz/X+A= | |||
| github.com/go-macaron/toolbox v0.0.0-20180818072302-a77f45a7ce90 h1:3wYKrRg9IjUMfaf3H0Hh7M5Li9ge79Y7aw2yujHa2jQ= | |||
| github.com/go-macaron/toolbox v0.0.0-20180818072302-a77f45a7ce90/go.mod h1:Ut/NmkIMGVYlEdJBzEZgWVWG5ZpYS9BLmUgXfAgi+qM= | |||
| github.com/go-redis/redis v6.15.2+incompatible h1:9SpNVG76gr6InJGxoZ6IuuxaCOQwDAhzyXg+Bs+0Sb4= | |||
| github.com/go-redis/redis v6.15.2+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= | |||
| github.com/go-sql-driver/mysql v0.0.0-20181218123637-c45f530f8e7f h1:fbIzwEaXt5b2bl9mm+PIufKTSGKk6ZuwSSTQ7iZj7Lo= | |||
| github.com/go-sql-driver/mysql v0.0.0-20181218123637-c45f530f8e7f/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= | |||
| github.com/go-xorm/builder v0.3.2/go.mod h1:v8mE3MFBgtL+RGFNfUnAMUqqfk/Y4W5KuwCFQIEpQLk= | |||
| @@ -46,9 +46,9 @@ type Indexer interface { | |||
| } | |||
| var ( | |||
| // issueIndexerUpdateQueue queue of issue ids to be updated | |||
| issueIndexerUpdateQueue Queue | |||
| issueIndexer Indexer | |||
| // issueIndexerQueue queue of issue ids to be updated | |||
| issueIndexerQueue Queue | |||
| issueIndexer Indexer | |||
| ) | |||
| // InitIssueIndexer initialize issue indexer, syncReindex is true then reindex until | |||
| @@ -72,27 +72,36 @@ func InitIssueIndexer(syncReindex bool) error { | |||
| } | |||
| if dummyQueue { | |||
| issueIndexerUpdateQueue = &DummyQueue{} | |||
| issueIndexerQueue = &DummyQueue{} | |||
| return nil | |||
| } | |||
| var err error | |||
| switch setting.Indexer.IssueIndexerQueueType { | |||
| switch setting.Indexer.IssueQueueType { | |||
| case setting.LevelQueueType: | |||
| issueIndexerUpdateQueue, err = NewLevelQueue( | |||
| issueIndexerQueue, err = NewLevelQueue( | |||
| issueIndexer, | |||
| setting.Indexer.IssueIndexerQueueDir, | |||
| setting.Indexer.IssueIndexerQueueBatchNumber) | |||
| setting.Indexer.IssueQueueDir, | |||
| setting.Indexer.IssueQueueBatchNumber) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| case setting.ChannelQueueType: | |||
| issueIndexerUpdateQueue = NewChannelQueue(issueIndexer, setting.Indexer.IssueIndexerQueueBatchNumber) | |||
| issueIndexerQueue = NewChannelQueue(issueIndexer, setting.Indexer.IssueQueueBatchNumber) | |||
| case setting.RedisQueueType: | |||
| addrs, pass, idx, err := parseConnStr(setting.Indexer.IssueQueueConnStr) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| issueIndexerQueue, err = NewRedisQueue(addrs, pass, idx, issueIndexer, setting.Indexer.IssueQueueBatchNumber) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| default: | |||
| return fmt.Errorf("Unsupported indexer queue type: %v", setting.Indexer.IssueIndexerQueueType) | |||
| return fmt.Errorf("Unsupported indexer queue type: %v", setting.Indexer.IssueQueueType) | |||
| } | |||
| go issueIndexerUpdateQueue.Run() | |||
| go issueIndexerQueue.Run() | |||
| if populate { | |||
| if syncReindex { | |||
| @@ -152,7 +161,7 @@ func UpdateIssueIndexer(issue *models.Issue) { | |||
| comments = append(comments, comment.Content) | |||
| } | |||
| } | |||
| issueIndexerUpdateQueue.Push(&IndexerData{ | |||
| issueIndexerQueue.Push(&IndexerData{ | |||
| ID: issue.ID, | |||
| RepoID: issue.RepoID, | |||
| Title: issue.Title, | |||
| @@ -174,7 +183,7 @@ func DeleteRepoIssueIndexer(repo *models.Repository) { | |||
| return | |||
| } | |||
| issueIndexerUpdateQueue.Push(&IndexerData{ | |||
| issueIndexerQueue.Push(&IndexerData{ | |||
| IDs: ids, | |||
| IsDelete: true, | |||
| }) | |||
| @@ -29,7 +29,7 @@ func TestMain(m *testing.M) { | |||
| func TestBleveSearchIssues(t *testing.T) { | |||
| assert.NoError(t, models.PrepareTestDatabase()) | |||
| os.RemoveAll(setting.Indexer.IssueIndexerQueueDir) | |||
| os.RemoveAll(setting.Indexer.IssueQueueDir) | |||
| os.RemoveAll(setting.Indexer.IssuePath) | |||
| setting.Indexer.IssueType = "bleve" | |||
| if err := InitIssueIndexer(true); err != nil { | |||
| @@ -0,0 +1,145 @@ | |||
| // Copyright 2019 The Gitea Authors. All rights reserved. | |||
| // Use of this source code is governed by a MIT-style | |||
| // license that can be found in the LICENSE file. | |||
| package issues | |||
| import ( | |||
| "encoding/json" | |||
| "errors" | |||
| "strconv" | |||
| "strings" | |||
| "time" | |||
| "code.gitea.io/gitea/modules/log" | |||
| "github.com/go-redis/redis" | |||
| ) | |||
| var ( | |||
| _ Queue = &RedisQueue{} | |||
| ) | |||
| type redisClient interface { | |||
| RPush(key string, args ...interface{}) *redis.IntCmd | |||
| LPop(key string) *redis.StringCmd | |||
| Ping() *redis.StatusCmd | |||
| } | |||
| // RedisQueue redis queue | |||
| type RedisQueue struct { | |||
| client redisClient | |||
| queueName string | |||
| indexer Indexer | |||
| batchNumber int | |||
| } | |||
| func parseConnStr(connStr string) (addrs, password string, dbIdx int, err error) { | |||
| fields := strings.Fields(connStr) | |||
| for _, f := range fields { | |||
| items := strings.SplitN(f, "=", 2) | |||
| if len(items) < 2 { | |||
| continue | |||
| } | |||
| switch strings.ToLower(items[0]) { | |||
| case "addrs": | |||
| addrs = items[1] | |||
| case "password": | |||
| password = items[1] | |||
| case "db": | |||
| dbIdx, err = strconv.Atoi(items[1]) | |||
| if err != nil { | |||
| return | |||
| } | |||
| } | |||
| } | |||
| return | |||
| } | |||
| // NewRedisQueue creates single redis or cluster redis queue | |||
| func NewRedisQueue(addrs string, password string, dbIdx int, indexer Indexer, batchNumber int) (*RedisQueue, error) { | |||
| dbs := strings.Split(addrs, ",") | |||
| var queue = RedisQueue{ | |||
| queueName: "issue_indexer_queue", | |||
| indexer: indexer, | |||
| batchNumber: batchNumber, | |||
| } | |||
| if len(dbs) == 0 { | |||
| return nil, errors.New("no redis host found") | |||
| } else if len(dbs) == 1 { | |||
| queue.client = redis.NewClient(&redis.Options{ | |||
| Addr: strings.TrimSpace(dbs[0]), // use default Addr | |||
| Password: password, // no password set | |||
| DB: dbIdx, // use default DB | |||
| }) | |||
| } else { | |||
| queue.client = redis.NewClusterClient(&redis.ClusterOptions{ | |||
| Addrs: dbs, | |||
| }) | |||
| } | |||
| if err := queue.client.Ping().Err(); err != nil { | |||
| return nil, err | |||
| } | |||
| return &queue, nil | |||
| } | |||
| // Run runs the redis queue | |||
| func (r *RedisQueue) Run() error { | |||
| var i int | |||
| var datas = make([]*IndexerData, 0, r.batchNumber) | |||
| for { | |||
| bs, err := r.client.LPop(r.queueName).Bytes() | |||
| if err != nil && err != redis.Nil { | |||
| log.Error("LPop faile: %v", err) | |||
| time.Sleep(time.Millisecond * 100) | |||
| continue | |||
| } | |||
| i++ | |||
| if len(datas) > r.batchNumber || (len(datas) > 0 && i > 3) { | |||
| r.indexer.Index(datas) | |||
| datas = make([]*IndexerData, 0, r.batchNumber) | |||
| i = 0 | |||
| } | |||
| if len(bs) <= 0 { | |||
| time.Sleep(time.Millisecond * 100) | |||
| continue | |||
| } | |||
| var data IndexerData | |||
| err = json.Unmarshal(bs, &data) | |||
| if err != nil { | |||
| log.Error("Unmarshal: %v", err) | |||
| time.Sleep(time.Millisecond * 100) | |||
| continue | |||
| } | |||
| log.Trace("RedisQueue: task found: %#v", data) | |||
| if data.IsDelete { | |||
| if data.ID > 0 { | |||
| if err = r.indexer.Delete(data.ID); err != nil { | |||
| log.Error("indexer.Delete: %v", err) | |||
| } | |||
| } else if len(data.IDs) > 0 { | |||
| if err = r.indexer.Delete(data.IDs...); err != nil { | |||
| log.Error("indexer.Delete: %v", err) | |||
| } | |||
| } | |||
| time.Sleep(time.Millisecond * 100) | |||
| continue | |||
| } | |||
| datas = append(datas, &data) | |||
| time.Sleep(time.Millisecond * 100) | |||
| } | |||
| } | |||
| // Push implements Queue | |||
| func (r *RedisQueue) Push(data *IndexerData) error { | |||
| bs, err := json.Marshal(data) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| return r.client.RPush(r.queueName, bs).Err() | |||
| } | |||
| @@ -13,26 +13,29 @@ import ( | |||
| const ( | |||
| LevelQueueType = "levelqueue" | |||
| ChannelQueueType = "channel" | |||
| RedisQueueType = "redis" | |||
| ) | |||
| var ( | |||
| // Indexer settings | |||
| Indexer = struct { | |||
| IssueType string | |||
| IssuePath string | |||
| RepoIndexerEnabled bool | |||
| RepoPath string | |||
| UpdateQueueLength int | |||
| MaxIndexerFileSize int64 | |||
| IssueIndexerQueueType string | |||
| IssueIndexerQueueDir string | |||
| IssueIndexerQueueBatchNumber int | |||
| IssueType string | |||
| IssuePath string | |||
| RepoIndexerEnabled bool | |||
| RepoPath string | |||
| UpdateQueueLength int | |||
| MaxIndexerFileSize int64 | |||
| IssueQueueType string | |||
| IssueQueueDir string | |||
| IssueQueueConnStr string | |||
| IssueQueueBatchNumber int | |||
| }{ | |||
| IssueType: "bleve", | |||
| IssuePath: "indexers/issues.bleve", | |||
| IssueIndexerQueueType: LevelQueueType, | |||
| IssueIndexerQueueDir: "indexers/issues.queue", | |||
| IssueIndexerQueueBatchNumber: 20, | |||
| IssueType: "bleve", | |||
| IssuePath: "indexers/issues.bleve", | |||
| IssueQueueType: LevelQueueType, | |||
| IssueQueueDir: "indexers/issues.queue", | |||
| IssueQueueConnStr: "", | |||
| IssueQueueBatchNumber: 20, | |||
| } | |||
| ) | |||
| @@ -50,7 +53,8 @@ func newIndexerService() { | |||
| } | |||
| Indexer.UpdateQueueLength = sec.Key("UPDATE_BUFFER_LEN").MustInt(20) | |||
| Indexer.MaxIndexerFileSize = sec.Key("MAX_FILE_SIZE").MustInt64(1024 * 1024) | |||
| Indexer.IssueIndexerQueueType = sec.Key("ISSUE_INDEXER_QUEUE_TYPE").MustString(LevelQueueType) | |||
| Indexer.IssueIndexerQueueDir = sec.Key("ISSUE_INDEXER_QUEUE_DIR").MustString(path.Join(AppDataPath, "indexers/issues.queue")) | |||
| Indexer.IssueIndexerQueueBatchNumber = sec.Key("ISSUE_INDEXER_QUEUE_BATCH_NUMBER").MustInt(20) | |||
| Indexer.IssueQueueType = sec.Key("ISSUE_INDEXER_QUEUE_TYPE").MustString(LevelQueueType) | |||
| Indexer.IssueQueueDir = sec.Key("ISSUE_INDEXER_QUEUE_DIR").MustString(path.Join(AppDataPath, "indexers/issues.queue")) | |||
| Indexer.IssueQueueConnStr = sec.Key("ISSUE_INDEXER_QUEUE_CONN_STR").MustString(path.Join(AppDataPath, "")) | |||
| Indexer.IssueQueueBatchNumber = sec.Key("ISSUE_INDEXER_QUEUE_BATCH_NUMBER").MustInt(20) | |||
| } | |||
| @@ -0,0 +1,2 @@ | |||
| *.rdb | |||
| testdata/*/ | |||
| @@ -0,0 +1,19 @@ | |||
| sudo: false | |||
| language: go | |||
| services: | |||
| - redis-server | |||
| go: | |||
| - 1.9.x | |||
| - 1.10.x | |||
| - 1.11.x | |||
| - tip | |||
| matrix: | |||
| allow_failures: | |||
| - go: tip | |||
| install: | |||
| - go get github.com/onsi/ginkgo | |||
| - go get github.com/onsi/gomega | |||
| @@ -0,0 +1,25 @@ | |||
| # Changelog | |||
| ## Unreleased | |||
| - Cluster and Ring pipelines process commands for each node in its own goroutine. | |||
| ## 6.14 | |||
| - Added Options.MinIdleConns. | |||
| - Added Options.MaxConnAge. | |||
| - PoolStats.FreeConns is renamed to PoolStats.IdleConns. | |||
| - Add Client.Do to simplify creating custom commands. | |||
| - Add Cmd.String, Cmd.Int, Cmd.Int64, Cmd.Uint64, Cmd.Float64, and Cmd.Bool helpers. | |||
| - Lower memory usage. | |||
| ## v6.13 | |||
| - Ring got new options called `HashReplicas` and `Hash`. It is recommended to set `HashReplicas = 1000` for better keys distribution between shards. | |||
| - Cluster client was optimized to use much less memory when reloading cluster state. | |||
| - PubSub.ReceiveMessage is re-worked to not use ReceiveTimeout so it does not lose data when timeout occurres. In most cases it is recommended to use PubSub.Channel instead. | |||
| - Dialer.KeepAlive is set to 5 minutes by default. | |||
| ## v6.12 | |||
| - ClusterClient got new option called `ClusterSlots` which allows to build cluster of normal Redis Servers that don't have cluster mode enabled. See https://godoc.org/github.com/go-redis/redis#example-NewClusterClient--ManualSetup | |||
| @@ -0,0 +1,25 @@ | |||
| Copyright (c) 2013 The github.com/go-redis/redis Authors. | |||
| All rights reserved. | |||
| Redistribution and use in source and binary forms, with or without | |||
| modification, are permitted provided that the following conditions are | |||
| met: | |||
| * Redistributions of source code must retain the above copyright | |||
| notice, this list of conditions and the following disclaimer. | |||
| * Redistributions in binary form must reproduce the above | |||
| copyright notice, this list of conditions and the following disclaimer | |||
| in the documentation and/or other materials provided with the | |||
| distribution. | |||
| THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |||
| "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |||
| LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |||
| A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |||
| OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |||
| SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |||
| LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |||
| DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |||
| THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |||
| (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |||
| OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |||
| @@ -0,0 +1,22 @@ | |||
| all: testdeps | |||
| go test ./... | |||
| go test ./... -short -race | |||
| env GOOS=linux GOARCH=386 go test ./... | |||
| go vet | |||
| go get github.com/gordonklaus/ineffassign | |||
| ineffassign . | |||
| testdeps: testdata/redis/src/redis-server | |||
| bench: testdeps | |||
| go test ./... -test.run=NONE -test.bench=. -test.benchmem | |||
| .PHONY: all test testdeps bench | |||
| testdata/redis: | |||
| mkdir -p $@ | |||
| wget -qO- https://github.com/antirez/redis/archive/5.0.tar.gz | tar xvz --strip-components=1 -C $@ | |||
| testdata/redis/src/redis-server: testdata/redis | |||
| sed -i.bak 's/libjemalloc.a/libjemalloc.a -lrt/g' $</src/Makefile | |||
| cd $< && make all | |||
| @@ -0,0 +1,146 @@ | |||
| # Redis client for Golang | |||
| [](https://travis-ci.org/go-redis/redis) | |||
| [](https://godoc.org/github.com/go-redis/redis) | |||
| [](https://airbrake.io) | |||
| Supports: | |||
| - Redis 3 commands except QUIT, MONITOR, SLOWLOG and SYNC. | |||
| - Automatic connection pooling with [circuit breaker](https://en.wikipedia.org/wiki/Circuit_breaker_design_pattern) support. | |||
| - [Pub/Sub](https://godoc.org/github.com/go-redis/redis#PubSub). | |||
| - [Transactions](https://godoc.org/github.com/go-redis/redis#Multi). | |||
| - [Pipeline](https://godoc.org/github.com/go-redis/redis#example-Client-Pipeline) and [TxPipeline](https://godoc.org/github.com/go-redis/redis#example-Client-TxPipeline). | |||
| - [Scripting](https://godoc.org/github.com/go-redis/redis#Script). | |||
| - [Timeouts](https://godoc.org/github.com/go-redis/redis#Options). | |||
| - [Redis Sentinel](https://godoc.org/github.com/go-redis/redis#NewFailoverClient). | |||
| - [Redis Cluster](https://godoc.org/github.com/go-redis/redis#NewClusterClient). | |||
| - [Cluster of Redis Servers](https://godoc.org/github.com/go-redis/redis#example-NewClusterClient--ManualSetup) without using cluster mode and Redis Sentinel. | |||
| - [Ring](https://godoc.org/github.com/go-redis/redis#NewRing). | |||
| - [Instrumentation](https://godoc.org/github.com/go-redis/redis#ex-package--Instrumentation). | |||
| - [Cache friendly](https://github.com/go-redis/cache). | |||
| - [Rate limiting](https://github.com/go-redis/redis_rate). | |||
| - [Distributed Locks](https://github.com/bsm/redis-lock). | |||
| API docs: https://godoc.org/github.com/go-redis/redis. | |||
| Examples: https://godoc.org/github.com/go-redis/redis#pkg-examples. | |||
| ## Installation | |||
| Install: | |||
| ```shell | |||
| go get -u github.com/go-redis/redis | |||
| ``` | |||
| Import: | |||
| ```go | |||
| import "github.com/go-redis/redis" | |||
| ``` | |||
| ## Quickstart | |||
| ```go | |||
| func ExampleNewClient() { | |||
| client := redis.NewClient(&redis.Options{ | |||
| Addr: "localhost:6379", | |||
| Password: "", // no password set | |||
| DB: 0, // use default DB | |||
| }) | |||
| pong, err := client.Ping().Result() | |||
| fmt.Println(pong, err) | |||
| // Output: PONG <nil> | |||
| } | |||
| func ExampleClient() { | |||
| err := client.Set("key", "value", 0).Err() | |||
| if err != nil { | |||
| panic(err) | |||
| } | |||
| val, err := client.Get("key").Result() | |||
| if err != nil { | |||
| panic(err) | |||
| } | |||
| fmt.Println("key", val) | |||
| val2, err := client.Get("key2").Result() | |||
| if err == redis.Nil { | |||
| fmt.Println("key2 does not exist") | |||
| } else if err != nil { | |||
| panic(err) | |||
| } else { | |||
| fmt.Println("key2", val2) | |||
| } | |||
| // Output: key value | |||
| // key2 does not exist | |||
| } | |||
| ``` | |||
| ## Howto | |||
| Please go through [examples](https://godoc.org/github.com/go-redis/redis#pkg-examples) to get an idea how to use this package. | |||
| ## Look and feel | |||
| Some corner cases: | |||
| ```go | |||
| // SET key value EX 10 NX | |||
| set, err := client.SetNX("key", "value", 10*time.Second).Result() | |||
| // SORT list LIMIT 0 2 ASC | |||
| vals, err := client.Sort("list", redis.Sort{Offset: 0, Count: 2, Order: "ASC"}).Result() | |||
| // ZRANGEBYSCORE zset -inf +inf WITHSCORES LIMIT 0 2 | |||
| vals, err := client.ZRangeByScoreWithScores("zset", redis.ZRangeBy{ | |||
| Min: "-inf", | |||
| Max: "+inf", | |||
| Offset: 0, | |||
| Count: 2, | |||
| }).Result() | |||
| // ZINTERSTORE out 2 zset1 zset2 WEIGHTS 2 3 AGGREGATE SUM | |||
| vals, err := client.ZInterStore("out", redis.ZStore{Weights: []int64{2, 3}}, "zset1", "zset2").Result() | |||
| // EVAL "return {KEYS[1],ARGV[1]}" 1 "key" "hello" | |||
| vals, err := client.Eval("return {KEYS[1],ARGV[1]}", []string{"key"}, "hello").Result() | |||
| ``` | |||
| ## Benchmark | |||
| go-redis vs redigo: | |||
| ``` | |||
| BenchmarkSetGoRedis10Conns64Bytes-4 200000 7621 ns/op 210 B/op 6 allocs/op | |||
| BenchmarkSetGoRedis100Conns64Bytes-4 200000 7554 ns/op 210 B/op 6 allocs/op | |||
| BenchmarkSetGoRedis10Conns1KB-4 200000 7697 ns/op 210 B/op 6 allocs/op | |||
| BenchmarkSetGoRedis100Conns1KB-4 200000 7688 ns/op 210 B/op 6 allocs/op | |||
| BenchmarkSetGoRedis10Conns10KB-4 200000 9214 ns/op 210 B/op 6 allocs/op | |||
| BenchmarkSetGoRedis100Conns10KB-4 200000 9181 ns/op 210 B/op 6 allocs/op | |||
| BenchmarkSetGoRedis10Conns1MB-4 2000 583242 ns/op 2337 B/op 6 allocs/op | |||
| BenchmarkSetGoRedis100Conns1MB-4 2000 583089 ns/op 2338 B/op 6 allocs/op | |||
| BenchmarkSetRedigo10Conns64Bytes-4 200000 7576 ns/op 208 B/op 7 allocs/op | |||
| BenchmarkSetRedigo100Conns64Bytes-4 200000 7782 ns/op 208 B/op 7 allocs/op | |||
| BenchmarkSetRedigo10Conns1KB-4 200000 7958 ns/op 208 B/op 7 allocs/op | |||
| BenchmarkSetRedigo100Conns1KB-4 200000 7725 ns/op 208 B/op 7 allocs/op | |||
| BenchmarkSetRedigo10Conns10KB-4 100000 18442 ns/op 208 B/op 7 allocs/op | |||
| BenchmarkSetRedigo100Conns10KB-4 100000 18818 ns/op 208 B/op 7 allocs/op | |||
| BenchmarkSetRedigo10Conns1MB-4 2000 668829 ns/op 226 B/op 7 allocs/op | |||
| BenchmarkSetRedigo100Conns1MB-4 2000 679542 ns/op 226 B/op 7 allocs/op | |||
| ``` | |||
| Redis Cluster: | |||
| ``` | |||
| BenchmarkRedisPing-4 200000 6983 ns/op 116 B/op 4 allocs/op | |||
| BenchmarkRedisClusterPing-4 100000 11535 ns/op 117 B/op 4 allocs/op | |||
| ``` | |||
| ## See also | |||
| - [Golang PostgreSQL ORM](https://github.com/go-pg/pg) | |||
| - [Golang msgpack](https://github.com/vmihailenco/msgpack) | |||
| - [Golang message task queue](https://github.com/go-msgqueue/msgqueue) | |||
| @@ -0,0 +1,22 @@ | |||
| package redis | |||
| import "sync/atomic" | |||
| func (c *ClusterClient) DBSize() *IntCmd { | |||
| cmd := NewIntCmd("dbsize") | |||
| var size int64 | |||
| err := c.ForEachMaster(func(master *Client) error { | |||
| n, err := master.DBSize().Result() | |||
| if err != nil { | |||
| return err | |||
| } | |||
| atomic.AddInt64(&size, n) | |||
| return nil | |||
| }) | |||
| if err != nil { | |||
| cmd.setErr(err) | |||
| return cmd | |||
| } | |||
| cmd.val = size | |||
| return cmd | |||
| } | |||
| @@ -0,0 +1,4 @@ | |||
| /* | |||
| Package redis implements a Redis client. | |||
| */ | |||
| package redis | |||
| @@ -0,0 +1,81 @@ | |||
| /* | |||
| Copyright 2013 Google Inc. | |||
| Licensed under the Apache License, Version 2.0 (the "License"); | |||
| you may not use this file except in compliance with the License. | |||
| You may obtain a copy of the License at | |||
| http://www.apache.org/licenses/LICENSE-2.0 | |||
| Unless required by applicable law or agreed to in writing, software | |||
| distributed under the License is distributed on an "AS IS" BASIS, | |||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| See the License for the specific language governing permissions and | |||
| limitations under the License. | |||
| */ | |||
| // Package consistenthash provides an implementation of a ring hash. | |||
| package consistenthash | |||
| import ( | |||
| "hash/crc32" | |||
| "sort" | |||
| "strconv" | |||
| ) | |||
| type Hash func(data []byte) uint32 | |||
| type Map struct { | |||
| hash Hash | |||
| replicas int | |||
| keys []int // Sorted | |||
| hashMap map[int]string | |||
| } | |||
| func New(replicas int, fn Hash) *Map { | |||
| m := &Map{ | |||
| replicas: replicas, | |||
| hash: fn, | |||
| hashMap: make(map[int]string), | |||
| } | |||
| if m.hash == nil { | |||
| m.hash = crc32.ChecksumIEEE | |||
| } | |||
| return m | |||
| } | |||
| // Returns true if there are no items available. | |||
| func (m *Map) IsEmpty() bool { | |||
| return len(m.keys) == 0 | |||
| } | |||
| // Adds some keys to the hash. | |||
| func (m *Map) Add(keys ...string) { | |||
| for _, key := range keys { | |||
| for i := 0; i < m.replicas; i++ { | |||
| hash := int(m.hash([]byte(strconv.Itoa(i) + key))) | |||
| m.keys = append(m.keys, hash) | |||
| m.hashMap[hash] = key | |||
| } | |||
| } | |||
| sort.Ints(m.keys) | |||
| } | |||
| // Gets the closest item in the hash to the provided key. | |||
| func (m *Map) Get(key string) string { | |||
| if m.IsEmpty() { | |||
| return "" | |||
| } | |||
| hash := int(m.hash([]byte(key))) | |||
| // Binary search for appropriate replica. | |||
| idx := sort.Search(len(m.keys), func(i int) bool { return m.keys[i] >= hash }) | |||
| // Means we have cycled back to the first replica. | |||
| if idx == len(m.keys) { | |||
| idx = 0 | |||
| } | |||
| return m.hashMap[m.keys[idx]] | |||
| } | |||
| @@ -0,0 +1,89 @@ | |||
| package internal | |||
| import ( | |||
| "io" | |||
| "net" | |||
| "strings" | |||
| "github.com/go-redis/redis/internal/proto" | |||
| ) | |||
| func IsRetryableError(err error, retryTimeout bool) bool { | |||
| if err == nil { | |||
| return false | |||
| } | |||
| if err == io.EOF { | |||
| return true | |||
| } | |||
| if netErr, ok := err.(net.Error); ok { | |||
| if netErr.Timeout() { | |||
| return retryTimeout | |||
| } | |||
| return true | |||
| } | |||
| s := err.Error() | |||
| if s == "ERR max number of clients reached" { | |||
| return true | |||
| } | |||
| if strings.HasPrefix(s, "LOADING ") { | |||
| return true | |||
| } | |||
| if strings.HasPrefix(s, "READONLY ") { | |||
| return true | |||
| } | |||
| if strings.HasPrefix(s, "CLUSTERDOWN ") { | |||
| return true | |||
| } | |||
| return false | |||
| } | |||
| func IsRedisError(err error) bool { | |||
| _, ok := err.(proto.RedisError) | |||
| return ok | |||
| } | |||
| func IsBadConn(err error, allowTimeout bool) bool { | |||
| if err == nil { | |||
| return false | |||
| } | |||
| if IsRedisError(err) { | |||
| // #790 | |||
| return IsReadOnlyError(err) | |||
| } | |||
| if allowTimeout { | |||
| if netErr, ok := err.(net.Error); ok && netErr.Timeout() { | |||
| return false | |||
| } | |||
| } | |||
| return true | |||
| } | |||
| func IsMovedError(err error) (moved bool, ask bool, addr string) { | |||
| if !IsRedisError(err) { | |||
| return | |||
| } | |||
| s := err.Error() | |||
| if strings.HasPrefix(s, "MOVED ") { | |||
| moved = true | |||
| } else if strings.HasPrefix(s, "ASK ") { | |||
| ask = true | |||
| } else { | |||
| return | |||
| } | |||
| ind := strings.LastIndex(s, " ") | |||
| if ind == -1 { | |||
| return false, false, "" | |||
| } | |||
| addr = s[ind+1:] | |||
| return | |||
| } | |||
| func IsLoadingError(err error) bool { | |||
| return strings.HasPrefix(err.Error(), "LOADING ") | |||
| } | |||
| func IsReadOnlyError(err error) bool { | |||
| return strings.HasPrefix(err.Error(), "READONLY ") | |||
| } | |||
| @@ -0,0 +1,77 @@ | |||
| package hashtag | |||
| import ( | |||
| "math/rand" | |||
| "strings" | |||
| ) | |||
| const slotNumber = 16384 | |||
| // CRC16 implementation according to CCITT standards. | |||
| // Copyright 2001-2010 Georges Menie (www.menie.org) | |||
| // Copyright 2013 The Go Authors. All rights reserved. | |||
| // http://redis.io/topics/cluster-spec#appendix-a-crc16-reference-implementation-in-ansi-c | |||
| var crc16tab = [256]uint16{ | |||
| 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7, | |||
| 0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef, | |||
| 0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6, | |||
| 0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de, | |||
| 0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485, | |||
| 0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d, | |||
| 0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4, | |||
| 0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc, | |||
| 0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823, | |||
| 0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b, | |||
| 0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12, | |||
| 0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a, | |||
| 0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41, | |||
| 0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49, | |||
| 0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70, | |||
| 0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78, | |||
| 0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f, | |||
| 0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067, | |||
| 0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e, | |||
| 0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256, | |||
| 0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d, | |||
| 0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405, | |||
| 0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c, | |||
| 0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634, | |||
| 0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab, | |||
| 0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3, | |||
| 0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a, | |||
| 0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92, | |||
| 0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9, | |||
| 0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1, | |||
| 0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8, | |||
| 0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0, | |||
| } | |||
| func Key(key string) string { | |||
| if s := strings.IndexByte(key, '{'); s > -1 { | |||
| if e := strings.IndexByte(key[s+1:], '}'); e > 0 { | |||
| return key[s+1 : s+e+1] | |||
| } | |||
| } | |||
| return key | |||
| } | |||
| func RandomSlot() int { | |||
| return rand.Intn(slotNumber) | |||
| } | |||
| // hashSlot returns a consistent slot number between 0 and 16383 | |||
| // for any given string key. | |||
| func Slot(key string) int { | |||
| if key == "" { | |||
| return RandomSlot() | |||
| } | |||
| key = Key(key) | |||
| return int(crc16sum(key)) % slotNumber | |||
| } | |||
| func crc16sum(key string) (crc uint16) { | |||
| for i := 0; i < len(key); i++ { | |||
| crc = (crc << 8) ^ crc16tab[(byte(crc>>8)^key[i])&0x00ff] | |||
| } | |||
| return | |||
| } | |||
| @@ -0,0 +1,24 @@ | |||
| package internal | |||
| import ( | |||
| "math/rand" | |||
| "time" | |||
| ) | |||
| // Retry backoff with jitter sleep to prevent overloaded conditions during intervals | |||
| // https://www.awsarchitectureblog.com/2015/03/backoff.html | |||
| func RetryBackoff(retry int, minBackoff, maxBackoff time.Duration) time.Duration { | |||
| if retry < 0 { | |||
| retry = 0 | |||
| } | |||
| backoff := minBackoff << uint(retry) | |||
| if backoff > maxBackoff || backoff < minBackoff { | |||
| backoff = maxBackoff | |||
| } | |||
| if backoff == 0 { | |||
| return 0 | |||
| } | |||
| return time.Duration(rand.Int63n(int64(backoff))) | |||
| } | |||
| @@ -0,0 +1,15 @@ | |||
| package internal | |||
| import ( | |||
| "fmt" | |||
| "log" | |||
| ) | |||
| var Logger *log.Logger | |||
| func Logf(s string, args ...interface{}) { | |||
| if Logger == nil { | |||
| return | |||
| } | |||
| Logger.Output(2, fmt.Sprintf(s, args...)) | |||
| } | |||
| @@ -0,0 +1,60 @@ | |||
| /* | |||
| Copyright 2014 The Camlistore Authors | |||
| Licensed under the Apache License, Version 2.0 (the "License"); | |||
| you may not use this file except in compliance with the License. | |||
| You may obtain a copy of the License at | |||
| http://www.apache.org/licenses/LICENSE-2.0 | |||
| Unless required by applicable law or agreed to in writing, software | |||
| distributed under the License is distributed on an "AS IS" BASIS, | |||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| See the License for the specific language governing permissions and | |||
| limitations under the License. | |||
| */ | |||
| package internal | |||
| import ( | |||
| "sync" | |||
| "sync/atomic" | |||
| ) | |||
| // A Once will perform a successful action exactly once. | |||
| // | |||
| // Unlike a sync.Once, this Once's func returns an error | |||
| // and is re-armed on failure. | |||
| type Once struct { | |||
| m sync.Mutex | |||
| done uint32 | |||
| } | |||
| // Do calls the function f if and only if Do has not been invoked | |||
| // without error for this instance of Once. In other words, given | |||
| // var once Once | |||
| // if once.Do(f) is called multiple times, only the first call will | |||
| // invoke f, even if f has a different value in each invocation unless | |||
| // f returns an error. A new instance of Once is required for each | |||
| // function to execute. | |||
| // | |||
| // Do is intended for initialization that must be run exactly once. Since f | |||
| // is niladic, it may be necessary to use a function literal to capture the | |||
| // arguments to a function to be invoked by Do: | |||
| // err := config.once.Do(func() error { return config.init(filename) }) | |||
| func (o *Once) Do(f func() error) error { | |||
| if atomic.LoadUint32(&o.done) == 1 { | |||
| return nil | |||
| } | |||
| // Slow-path. | |||
| o.m.Lock() | |||
| defer o.m.Unlock() | |||
| var err error | |||
| if o.done == 0 { | |||
| err = f() | |||
| if err == nil { | |||
| atomic.StoreUint32(&o.done, 1) | |||
| } | |||
| } | |||
| return err | |||
| } | |||
| @@ -0,0 +1,93 @@ | |||
| package pool | |||
| import ( | |||
| "net" | |||
| "sync/atomic" | |||
| "time" | |||
| "github.com/go-redis/redis/internal/proto" | |||
| ) | |||
| var noDeadline = time.Time{} | |||
| type Conn struct { | |||
| netConn net.Conn | |||
| rd *proto.Reader | |||
| rdLocked bool | |||
| wr *proto.Writer | |||
| InitedAt time.Time | |||
| pooled bool | |||
| usedAt atomic.Value | |||
| } | |||
| func NewConn(netConn net.Conn) *Conn { | |||
| cn := &Conn{ | |||
| netConn: netConn, | |||
| } | |||
| cn.rd = proto.NewReader(netConn) | |||
| cn.wr = proto.NewWriter(netConn) | |||
| cn.SetUsedAt(time.Now()) | |||
| return cn | |||
| } | |||
| func (cn *Conn) UsedAt() time.Time { | |||
| return cn.usedAt.Load().(time.Time) | |||
| } | |||
| func (cn *Conn) SetUsedAt(tm time.Time) { | |||
| cn.usedAt.Store(tm) | |||
| } | |||
| func (cn *Conn) SetNetConn(netConn net.Conn) { | |||
| cn.netConn = netConn | |||
| cn.rd.Reset(netConn) | |||
| cn.wr.Reset(netConn) | |||
| } | |||
| func (cn *Conn) setReadTimeout(timeout time.Duration) error { | |||
| now := time.Now() | |||
| cn.SetUsedAt(now) | |||
| if timeout > 0 { | |||
| return cn.netConn.SetReadDeadline(now.Add(timeout)) | |||
| } | |||
| return cn.netConn.SetReadDeadline(noDeadline) | |||
| } | |||
| func (cn *Conn) setWriteTimeout(timeout time.Duration) error { | |||
| now := time.Now() | |||
| cn.SetUsedAt(now) | |||
| if timeout > 0 { | |||
| return cn.netConn.SetWriteDeadline(now.Add(timeout)) | |||
| } | |||
| return cn.netConn.SetWriteDeadline(noDeadline) | |||
| } | |||
| func (cn *Conn) Write(b []byte) (int, error) { | |||
| return cn.netConn.Write(b) | |||
| } | |||
| func (cn *Conn) RemoteAddr() net.Addr { | |||
| return cn.netConn.RemoteAddr() | |||
| } | |||
| func (cn *Conn) WithReader(timeout time.Duration, fn func(rd *proto.Reader) error) error { | |||
| _ = cn.setReadTimeout(timeout) | |||
| return fn(cn.rd) | |||
| } | |||
| func (cn *Conn) WithWriter(timeout time.Duration, fn func(wr *proto.Writer) error) error { | |||
| _ = cn.setWriteTimeout(timeout) | |||
| firstErr := fn(cn.wr) | |||
| err := cn.wr.Flush() | |||
| if err != nil && firstErr == nil { | |||
| firstErr = err | |||
| } | |||
| return firstErr | |||
| } | |||
| func (cn *Conn) Close() error { | |||
| return cn.netConn.Close() | |||
| } | |||
| @@ -0,0 +1,476 @@ | |||
| package pool | |||
| import ( | |||
| "errors" | |||
| "net" | |||
| "sync" | |||
| "sync/atomic" | |||
| "time" | |||
| "github.com/go-redis/redis/internal" | |||
| ) | |||
| var ErrClosed = errors.New("redis: client is closed") | |||
| var ErrPoolTimeout = errors.New("redis: connection pool timeout") | |||
| var timers = sync.Pool{ | |||
| New: func() interface{} { | |||
| t := time.NewTimer(time.Hour) | |||
| t.Stop() | |||
| return t | |||
| }, | |||
| } | |||
| // Stats contains pool state information and accumulated stats. | |||
| type Stats struct { | |||
| Hits uint32 // number of times free connection was found in the pool | |||
| Misses uint32 // number of times free connection was NOT found in the pool | |||
| Timeouts uint32 // number of times a wait timeout occurred | |||
| TotalConns uint32 // number of total connections in the pool | |||
| IdleConns uint32 // number of idle connections in the pool | |||
| StaleConns uint32 // number of stale connections removed from the pool | |||
| } | |||
| type Pooler interface { | |||
| NewConn() (*Conn, error) | |||
| CloseConn(*Conn) error | |||
| Get() (*Conn, error) | |||
| Put(*Conn) | |||
| Remove(*Conn) | |||
| Len() int | |||
| IdleLen() int | |||
| Stats() *Stats | |||
| Close() error | |||
| } | |||
| type Options struct { | |||
| Dialer func() (net.Conn, error) | |||
| OnClose func(*Conn) error | |||
| PoolSize int | |||
| MinIdleConns int | |||
| MaxConnAge time.Duration | |||
| PoolTimeout time.Duration | |||
| IdleTimeout time.Duration | |||
| IdleCheckFrequency time.Duration | |||
| } | |||
| type ConnPool struct { | |||
| opt *Options | |||
| dialErrorsNum uint32 // atomic | |||
| lastDialErrorMu sync.RWMutex | |||
| lastDialError error | |||
| queue chan struct{} | |||
| connsMu sync.Mutex | |||
| conns []*Conn | |||
| idleConns []*Conn | |||
| poolSize int | |||
| idleConnsLen int | |||
| stats Stats | |||
| _closed uint32 // atomic | |||
| } | |||
| var _ Pooler = (*ConnPool)(nil) | |||
| func NewConnPool(opt *Options) *ConnPool { | |||
| p := &ConnPool{ | |||
| opt: opt, | |||
| queue: make(chan struct{}, opt.PoolSize), | |||
| conns: make([]*Conn, 0, opt.PoolSize), | |||
| idleConns: make([]*Conn, 0, opt.PoolSize), | |||
| } | |||
| for i := 0; i < opt.MinIdleConns; i++ { | |||
| p.checkMinIdleConns() | |||
| } | |||
| if opt.IdleTimeout > 0 && opt.IdleCheckFrequency > 0 { | |||
| go p.reaper(opt.IdleCheckFrequency) | |||
| } | |||
| return p | |||
| } | |||
| func (p *ConnPool) checkMinIdleConns() { | |||
| if p.opt.MinIdleConns == 0 { | |||
| return | |||
| } | |||
| if p.poolSize < p.opt.PoolSize && p.idleConnsLen < p.opt.MinIdleConns { | |||
| p.poolSize++ | |||
| p.idleConnsLen++ | |||
| go p.addIdleConn() | |||
| } | |||
| } | |||
| func (p *ConnPool) addIdleConn() { | |||
| cn, err := p.newConn(true) | |||
| if err != nil { | |||
| return | |||
| } | |||
| p.connsMu.Lock() | |||
| p.conns = append(p.conns, cn) | |||
| p.idleConns = append(p.idleConns, cn) | |||
| p.connsMu.Unlock() | |||
| } | |||
| func (p *ConnPool) NewConn() (*Conn, error) { | |||
| return p._NewConn(false) | |||
| } | |||
| func (p *ConnPool) _NewConn(pooled bool) (*Conn, error) { | |||
| cn, err := p.newConn(pooled) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| p.connsMu.Lock() | |||
| p.conns = append(p.conns, cn) | |||
| if pooled { | |||
| if p.poolSize < p.opt.PoolSize { | |||
| p.poolSize++ | |||
| } else { | |||
| cn.pooled = false | |||
| } | |||
| } | |||
| p.connsMu.Unlock() | |||
| return cn, nil | |||
| } | |||
| func (p *ConnPool) newConn(pooled bool) (*Conn, error) { | |||
| if p.closed() { | |||
| return nil, ErrClosed | |||
| } | |||
| if atomic.LoadUint32(&p.dialErrorsNum) >= uint32(p.opt.PoolSize) { | |||
| return nil, p.getLastDialError() | |||
| } | |||
| netConn, err := p.opt.Dialer() | |||
| if err != nil { | |||
| p.setLastDialError(err) | |||
| if atomic.AddUint32(&p.dialErrorsNum, 1) == uint32(p.opt.PoolSize) { | |||
| go p.tryDial() | |||
| } | |||
| return nil, err | |||
| } | |||
| cn := NewConn(netConn) | |||
| cn.pooled = pooled | |||
| return cn, nil | |||
| } | |||
| func (p *ConnPool) tryDial() { | |||
| for { | |||
| if p.closed() { | |||
| return | |||
| } | |||
| conn, err := p.opt.Dialer() | |||
| if err != nil { | |||
| p.setLastDialError(err) | |||
| time.Sleep(time.Second) | |||
| continue | |||
| } | |||
| atomic.StoreUint32(&p.dialErrorsNum, 0) | |||
| _ = conn.Close() | |||
| return | |||
| } | |||
| } | |||
| func (p *ConnPool) setLastDialError(err error) { | |||
| p.lastDialErrorMu.Lock() | |||
| p.lastDialError = err | |||
| p.lastDialErrorMu.Unlock() | |||
| } | |||
| func (p *ConnPool) getLastDialError() error { | |||
| p.lastDialErrorMu.RLock() | |||
| err := p.lastDialError | |||
| p.lastDialErrorMu.RUnlock() | |||
| return err | |||
| } | |||
| // Get returns existed connection from the pool or creates a new one. | |||
| func (p *ConnPool) Get() (*Conn, error) { | |||
| if p.closed() { | |||
| return nil, ErrClosed | |||
| } | |||
| err := p.waitTurn() | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| for { | |||
| p.connsMu.Lock() | |||
| cn := p.popIdle() | |||
| p.connsMu.Unlock() | |||
| if cn == nil { | |||
| break | |||
| } | |||
| if p.isStaleConn(cn) { | |||
| _ = p.CloseConn(cn) | |||
| continue | |||
| } | |||
| atomic.AddUint32(&p.stats.Hits, 1) | |||
| return cn, nil | |||
| } | |||
| atomic.AddUint32(&p.stats.Misses, 1) | |||
| newcn, err := p._NewConn(true) | |||
| if err != nil { | |||
| p.freeTurn() | |||
| return nil, err | |||
| } | |||
| return newcn, nil | |||
| } | |||
| func (p *ConnPool) getTurn() { | |||
| p.queue <- struct{}{} | |||
| } | |||
| func (p *ConnPool) waitTurn() error { | |||
| select { | |||
| case p.queue <- struct{}{}: | |||
| return nil | |||
| default: | |||
| timer := timers.Get().(*time.Timer) | |||
| timer.Reset(p.opt.PoolTimeout) | |||
| select { | |||
| case p.queue <- struct{}{}: | |||
| if !timer.Stop() { | |||
| <-timer.C | |||
| } | |||
| timers.Put(timer) | |||
| return nil | |||
| case <-timer.C: | |||
| timers.Put(timer) | |||
| atomic.AddUint32(&p.stats.Timeouts, 1) | |||
| return ErrPoolTimeout | |||
| } | |||
| } | |||
| } | |||
| func (p *ConnPool) freeTurn() { | |||
| <-p.queue | |||
| } | |||
| func (p *ConnPool) popIdle() *Conn { | |||
| if len(p.idleConns) == 0 { | |||
| return nil | |||
| } | |||
| idx := len(p.idleConns) - 1 | |||
| cn := p.idleConns[idx] | |||
| p.idleConns = p.idleConns[:idx] | |||
| p.idleConnsLen-- | |||
| p.checkMinIdleConns() | |||
| return cn | |||
| } | |||
| func (p *ConnPool) Put(cn *Conn) { | |||
| if !cn.pooled { | |||
| p.Remove(cn) | |||
| return | |||
| } | |||
| p.connsMu.Lock() | |||
| p.idleConns = append(p.idleConns, cn) | |||
| p.idleConnsLen++ | |||
| p.connsMu.Unlock() | |||
| p.freeTurn() | |||
| } | |||
| func (p *ConnPool) Remove(cn *Conn) { | |||
| p.removeConn(cn) | |||
| p.freeTurn() | |||
| _ = p.closeConn(cn) | |||
| } | |||
| func (p *ConnPool) CloseConn(cn *Conn) error { | |||
| p.removeConn(cn) | |||
| return p.closeConn(cn) | |||
| } | |||
| func (p *ConnPool) removeConn(cn *Conn) { | |||
| p.connsMu.Lock() | |||
| for i, c := range p.conns { | |||
| if c == cn { | |||
| p.conns = append(p.conns[:i], p.conns[i+1:]...) | |||
| if cn.pooled { | |||
| p.poolSize-- | |||
| p.checkMinIdleConns() | |||
| } | |||
| break | |||
| } | |||
| } | |||
| p.connsMu.Unlock() | |||
| } | |||
| func (p *ConnPool) closeConn(cn *Conn) error { | |||
| if p.opt.OnClose != nil { | |||
| _ = p.opt.OnClose(cn) | |||
| } | |||
| return cn.Close() | |||
| } | |||
| // Len returns total number of connections. | |||
| func (p *ConnPool) Len() int { | |||
| p.connsMu.Lock() | |||
| n := len(p.conns) | |||
| p.connsMu.Unlock() | |||
| return n | |||
| } | |||
| // IdleLen returns number of idle connections. | |||
| func (p *ConnPool) IdleLen() int { | |||
| p.connsMu.Lock() | |||
| n := p.idleConnsLen | |||
| p.connsMu.Unlock() | |||
| return n | |||
| } | |||
| func (p *ConnPool) Stats() *Stats { | |||
| idleLen := p.IdleLen() | |||
| return &Stats{ | |||
| Hits: atomic.LoadUint32(&p.stats.Hits), | |||
| Misses: atomic.LoadUint32(&p.stats.Misses), | |||
| Timeouts: atomic.LoadUint32(&p.stats.Timeouts), | |||
| TotalConns: uint32(p.Len()), | |||
| IdleConns: uint32(idleLen), | |||
| StaleConns: atomic.LoadUint32(&p.stats.StaleConns), | |||
| } | |||
| } | |||
| func (p *ConnPool) closed() bool { | |||
| return atomic.LoadUint32(&p._closed) == 1 | |||
| } | |||
| func (p *ConnPool) Filter(fn func(*Conn) bool) error { | |||
| var firstErr error | |||
| p.connsMu.Lock() | |||
| for _, cn := range p.conns { | |||
| if fn(cn) { | |||
| if err := p.closeConn(cn); err != nil && firstErr == nil { | |||
| firstErr = err | |||
| } | |||
| } | |||
| } | |||
| p.connsMu.Unlock() | |||
| return firstErr | |||
| } | |||
| func (p *ConnPool) Close() error { | |||
| if !atomic.CompareAndSwapUint32(&p._closed, 0, 1) { | |||
| return ErrClosed | |||
| } | |||
| var firstErr error | |||
| p.connsMu.Lock() | |||
| for _, cn := range p.conns { | |||
| if err := p.closeConn(cn); err != nil && firstErr == nil { | |||
| firstErr = err | |||
| } | |||
| } | |||
| p.conns = nil | |||
| p.poolSize = 0 | |||
| p.idleConns = nil | |||
| p.idleConnsLen = 0 | |||
| p.connsMu.Unlock() | |||
| return firstErr | |||
| } | |||
| func (p *ConnPool) reapStaleConn() *Conn { | |||
| if len(p.idleConns) == 0 { | |||
| return nil | |||
| } | |||
| cn := p.idleConns[0] | |||
| if !p.isStaleConn(cn) { | |||
| return nil | |||
| } | |||
| p.idleConns = append(p.idleConns[:0], p.idleConns[1:]...) | |||
| p.idleConnsLen-- | |||
| return cn | |||
| } | |||
| func (p *ConnPool) ReapStaleConns() (int, error) { | |||
| var n int | |||
| for { | |||
| p.getTurn() | |||
| p.connsMu.Lock() | |||
| cn := p.reapStaleConn() | |||
| p.connsMu.Unlock() | |||
| if cn != nil { | |||
| p.removeConn(cn) | |||
| } | |||
| p.freeTurn() | |||
| if cn != nil { | |||
| p.closeConn(cn) | |||
| n++ | |||
| } else { | |||
| break | |||
| } | |||
| } | |||
| return n, nil | |||
| } | |||
| func (p *ConnPool) reaper(frequency time.Duration) { | |||
| ticker := time.NewTicker(frequency) | |||
| defer ticker.Stop() | |||
| for range ticker.C { | |||
| if p.closed() { | |||
| break | |||
| } | |||
| n, err := p.ReapStaleConns() | |||
| if err != nil { | |||
| internal.Logf("ReapStaleConns failed: %s", err) | |||
| continue | |||
| } | |||
| atomic.AddUint32(&p.stats.StaleConns, uint32(n)) | |||
| } | |||
| } | |||
| func (p *ConnPool) isStaleConn(cn *Conn) bool { | |||
| if p.opt.IdleTimeout == 0 && p.opt.MaxConnAge == 0 { | |||
| return false | |||
| } | |||
| now := time.Now() | |||
| if p.opt.IdleTimeout > 0 && now.Sub(cn.UsedAt()) >= p.opt.IdleTimeout { | |||
| return true | |||
| } | |||
| if p.opt.MaxConnAge > 0 && now.Sub(cn.InitedAt) >= p.opt.MaxConnAge { | |||
| return true | |||
| } | |||
| return false | |||
| } | |||
| @@ -0,0 +1,53 @@ | |||
| package pool | |||
| type SingleConnPool struct { | |||
| cn *Conn | |||
| } | |||
| var _ Pooler = (*SingleConnPool)(nil) | |||
| func NewSingleConnPool(cn *Conn) *SingleConnPool { | |||
| return &SingleConnPool{ | |||
| cn: cn, | |||
| } | |||
| } | |||
| func (p *SingleConnPool) NewConn() (*Conn, error) { | |||
| panic("not implemented") | |||
| } | |||
| func (p *SingleConnPool) CloseConn(*Conn) error { | |||
| panic("not implemented") | |||
| } | |||
| func (p *SingleConnPool) Get() (*Conn, error) { | |||
| return p.cn, nil | |||
| } | |||
| func (p *SingleConnPool) Put(cn *Conn) { | |||
| if p.cn != cn { | |||
| panic("p.cn != cn") | |||
| } | |||
| } | |||
| func (p *SingleConnPool) Remove(cn *Conn) { | |||
| if p.cn != cn { | |||
| panic("p.cn != cn") | |||
| } | |||
| } | |||
| func (p *SingleConnPool) Len() int { | |||
| return 1 | |||
| } | |||
| func (p *SingleConnPool) IdleLen() int { | |||
| return 0 | |||
| } | |||
| func (p *SingleConnPool) Stats() *Stats { | |||
| return nil | |||
| } | |||
| func (p *SingleConnPool) Close() error { | |||
| return nil | |||
| } | |||
| @@ -0,0 +1,109 @@ | |||
| package pool | |||
| import "sync" | |||
| type StickyConnPool struct { | |||
| pool *ConnPool | |||
| reusable bool | |||
| cn *Conn | |||
| closed bool | |||
| mu sync.Mutex | |||
| } | |||
| var _ Pooler = (*StickyConnPool)(nil) | |||
| func NewStickyConnPool(pool *ConnPool, reusable bool) *StickyConnPool { | |||
| return &StickyConnPool{ | |||
| pool: pool, | |||
| reusable: reusable, | |||
| } | |||
| } | |||
| func (p *StickyConnPool) NewConn() (*Conn, error) { | |||
| panic("not implemented") | |||
| } | |||
| func (p *StickyConnPool) CloseConn(*Conn) error { | |||
| panic("not implemented") | |||
| } | |||
| func (p *StickyConnPool) Get() (*Conn, error) { | |||
| p.mu.Lock() | |||
| defer p.mu.Unlock() | |||
| if p.closed { | |||
| return nil, ErrClosed | |||
| } | |||
| if p.cn != nil { | |||
| return p.cn, nil | |||
| } | |||
| cn, err := p.pool.Get() | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| p.cn = cn | |||
| return cn, nil | |||
| } | |||
| func (p *StickyConnPool) putUpstream() { | |||
| p.pool.Put(p.cn) | |||
| p.cn = nil | |||
| } | |||
| func (p *StickyConnPool) Put(cn *Conn) {} | |||
| func (p *StickyConnPool) removeUpstream() { | |||
| p.pool.Remove(p.cn) | |||
| p.cn = nil | |||
| } | |||
| func (p *StickyConnPool) Remove(cn *Conn) { | |||
| p.removeUpstream() | |||
| } | |||
| func (p *StickyConnPool) Len() int { | |||
| p.mu.Lock() | |||
| defer p.mu.Unlock() | |||
| if p.cn == nil { | |||
| return 0 | |||
| } | |||
| return 1 | |||
| } | |||
| func (p *StickyConnPool) IdleLen() int { | |||
| p.mu.Lock() | |||
| defer p.mu.Unlock() | |||
| if p.cn == nil { | |||
| return 1 | |||
| } | |||
| return 0 | |||
| } | |||
| func (p *StickyConnPool) Stats() *Stats { | |||
| return nil | |||
| } | |||
| func (p *StickyConnPool) Close() error { | |||
| p.mu.Lock() | |||
| defer p.mu.Unlock() | |||
| if p.closed { | |||
| return ErrClosed | |||
| } | |||
| p.closed = true | |||
| if p.cn != nil { | |||
| if p.reusable { | |||
| p.putUpstream() | |||
| } else { | |||
| p.removeUpstream() | |||
| } | |||
| } | |||
| return nil | |||
| } | |||
| @@ -0,0 +1,290 @@ | |||
| package proto | |||
| import ( | |||
| "bufio" | |||
| "fmt" | |||
| "io" | |||
| "strconv" | |||
| "github.com/go-redis/redis/internal/util" | |||
| ) | |||
| const ( | |||
| ErrorReply = '-' | |||
| StatusReply = '+' | |||
| IntReply = ':' | |||
| StringReply = '$' | |||
| ArrayReply = '*' | |||
| ) | |||
| //------------------------------------------------------------------------------ | |||
| const Nil = RedisError("redis: nil") | |||
| type RedisError string | |||
| func (e RedisError) Error() string { return string(e) } | |||
| //------------------------------------------------------------------------------ | |||
| type MultiBulkParse func(*Reader, int64) (interface{}, error) | |||
| type Reader struct { | |||
| rd *bufio.Reader | |||
| _buf []byte | |||
| } | |||
| func NewReader(rd io.Reader) *Reader { | |||
| return &Reader{ | |||
| rd: bufio.NewReader(rd), | |||
| _buf: make([]byte, 64), | |||
| } | |||
| } | |||
| func (r *Reader) Reset(rd io.Reader) { | |||
| r.rd.Reset(rd) | |||
| } | |||
| func (r *Reader) ReadLine() ([]byte, error) { | |||
| line, isPrefix, err := r.rd.ReadLine() | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| if isPrefix { | |||
| return nil, bufio.ErrBufferFull | |||
| } | |||
| if len(line) == 0 { | |||
| return nil, fmt.Errorf("redis: reply is empty") | |||
| } | |||
| if isNilReply(line) { | |||
| return nil, Nil | |||
| } | |||
| return line, nil | |||
| } | |||
| func (r *Reader) ReadReply(m MultiBulkParse) (interface{}, error) { | |||
| line, err := r.ReadLine() | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| switch line[0] { | |||
| case ErrorReply: | |||
| return nil, ParseErrorReply(line) | |||
| case StatusReply: | |||
| return string(line[1:]), nil | |||
| case IntReply: | |||
| return util.ParseInt(line[1:], 10, 64) | |||
| case StringReply: | |||
| return r.readStringReply(line) | |||
| case ArrayReply: | |||
| n, err := parseArrayLen(line) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return m(r, n) | |||
| } | |||
| return nil, fmt.Errorf("redis: can't parse %.100q", line) | |||
| } | |||
| func (r *Reader) ReadIntReply() (int64, error) { | |||
| line, err := r.ReadLine() | |||
| if err != nil { | |||
| return 0, err | |||
| } | |||
| switch line[0] { | |||
| case ErrorReply: | |||
| return 0, ParseErrorReply(line) | |||
| case IntReply: | |||
| return util.ParseInt(line[1:], 10, 64) | |||
| default: | |||
| return 0, fmt.Errorf("redis: can't parse int reply: %.100q", line) | |||
| } | |||
| } | |||
| func (r *Reader) ReadString() (string, error) { | |||
| line, err := r.ReadLine() | |||
| if err != nil { | |||
| return "", err | |||
| } | |||
| switch line[0] { | |||
| case ErrorReply: | |||
| return "", ParseErrorReply(line) | |||
| case StringReply: | |||
| return r.readStringReply(line) | |||
| case StatusReply: | |||
| return string(line[1:]), nil | |||
| case IntReply: | |||
| return string(line[1:]), nil | |||
| default: | |||
| return "", fmt.Errorf("redis: can't parse reply=%.100q reading string", line) | |||
| } | |||
| } | |||
| func (r *Reader) readStringReply(line []byte) (string, error) { | |||
| if isNilReply(line) { | |||
| return "", Nil | |||
| } | |||
| replyLen, err := strconv.Atoi(string(line[1:])) | |||
| if err != nil { | |||
| return "", err | |||
| } | |||
| b := make([]byte, replyLen+2) | |||
| _, err = io.ReadFull(r.rd, b) | |||
| if err != nil { | |||
| return "", err | |||
| } | |||
| return util.BytesToString(b[:replyLen]), nil | |||
| } | |||
| func (r *Reader) ReadArrayReply(m MultiBulkParse) (interface{}, error) { | |||
| line, err := r.ReadLine() | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| switch line[0] { | |||
| case ErrorReply: | |||
| return nil, ParseErrorReply(line) | |||
| case ArrayReply: | |||
| n, err := parseArrayLen(line) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return m(r, n) | |||
| default: | |||
| return nil, fmt.Errorf("redis: can't parse array reply: %.100q", line) | |||
| } | |||
| } | |||
| func (r *Reader) ReadArrayLen() (int64, error) { | |||
| line, err := r.ReadLine() | |||
| if err != nil { | |||
| return 0, err | |||
| } | |||
| switch line[0] { | |||
| case ErrorReply: | |||
| return 0, ParseErrorReply(line) | |||
| case ArrayReply: | |||
| return parseArrayLen(line) | |||
| default: | |||
| return 0, fmt.Errorf("redis: can't parse array reply: %.100q", line) | |||
| } | |||
| } | |||
| func (r *Reader) ReadScanReply() ([]string, uint64, error) { | |||
| n, err := r.ReadArrayLen() | |||
| if err != nil { | |||
| return nil, 0, err | |||
| } | |||
| if n != 2 { | |||
| return nil, 0, fmt.Errorf("redis: got %d elements in scan reply, expected 2", n) | |||
| } | |||
| cursor, err := r.ReadUint() | |||
| if err != nil { | |||
| return nil, 0, err | |||
| } | |||
| n, err = r.ReadArrayLen() | |||
| if err != nil { | |||
| return nil, 0, err | |||
| } | |||
| keys := make([]string, n) | |||
| for i := int64(0); i < n; i++ { | |||
| key, err := r.ReadString() | |||
| if err != nil { | |||
| return nil, 0, err | |||
| } | |||
| keys[i] = key | |||
| } | |||
| return keys, cursor, err | |||
| } | |||
| func (r *Reader) ReadInt() (int64, error) { | |||
| b, err := r.readTmpBytesReply() | |||
| if err != nil { | |||
| return 0, err | |||
| } | |||
| return util.ParseInt(b, 10, 64) | |||
| } | |||
| func (r *Reader) ReadUint() (uint64, error) { | |||
| b, err := r.readTmpBytesReply() | |||
| if err != nil { | |||
| return 0, err | |||
| } | |||
| return util.ParseUint(b, 10, 64) | |||
| } | |||
| func (r *Reader) ReadFloatReply() (float64, error) { | |||
| b, err := r.readTmpBytesReply() | |||
| if err != nil { | |||
| return 0, err | |||
| } | |||
| return util.ParseFloat(b, 64) | |||
| } | |||
| func (r *Reader) readTmpBytesReply() ([]byte, error) { | |||
| line, err := r.ReadLine() | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| switch line[0] { | |||
| case ErrorReply: | |||
| return nil, ParseErrorReply(line) | |||
| case StringReply: | |||
| return r._readTmpBytesReply(line) | |||
| case StatusReply: | |||
| return line[1:], nil | |||
| default: | |||
| return nil, fmt.Errorf("redis: can't parse string reply: %.100q", line) | |||
| } | |||
| } | |||
| func (r *Reader) _readTmpBytesReply(line []byte) ([]byte, error) { | |||
| if isNilReply(line) { | |||
| return nil, Nil | |||
| } | |||
| replyLen, err := strconv.Atoi(string(line[1:])) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| buf := r.buf(replyLen + 2) | |||
| _, err = io.ReadFull(r.rd, buf) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return buf[:replyLen], nil | |||
| } | |||
| func (r *Reader) buf(n int) []byte { | |||
| if d := n - cap(r._buf); d > 0 { | |||
| r._buf = append(r._buf, make([]byte, d)...) | |||
| } | |||
| return r._buf[:n] | |||
| } | |||
| func isNilReply(b []byte) bool { | |||
| return len(b) == 3 && | |||
| (b[0] == StringReply || b[0] == ArrayReply) && | |||
| b[1] == '-' && b[2] == '1' | |||
| } | |||
| func ParseErrorReply(line []byte) error { | |||
| return RedisError(string(line[1:])) | |||
| } | |||
| func parseArrayLen(line []byte) (int64, error) { | |||
| if isNilReply(line) { | |||
| return 0, Nil | |||
| } | |||
| return util.ParseInt(line[1:], 10, 64) | |||
| } | |||
| @@ -0,0 +1,166 @@ | |||
| package proto | |||
| import ( | |||
| "encoding" | |||
| "fmt" | |||
| "reflect" | |||
| "github.com/go-redis/redis/internal/util" | |||
| ) | |||
| func Scan(b []byte, v interface{}) error { | |||
| switch v := v.(type) { | |||
| case nil: | |||
| return fmt.Errorf("redis: Scan(nil)") | |||
| case *string: | |||
| *v = util.BytesToString(b) | |||
| return nil | |||
| case *[]byte: | |||
| *v = b | |||
| return nil | |||
| case *int: | |||
| var err error | |||
| *v, err = util.Atoi(b) | |||
| return err | |||
| case *int8: | |||
| n, err := util.ParseInt(b, 10, 8) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| *v = int8(n) | |||
| return nil | |||
| case *int16: | |||
| n, err := util.ParseInt(b, 10, 16) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| *v = int16(n) | |||
| return nil | |||
| case *int32: | |||
| n, err := util.ParseInt(b, 10, 32) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| *v = int32(n) | |||
| return nil | |||
| case *int64: | |||
| n, err := util.ParseInt(b, 10, 64) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| *v = n | |||
| return nil | |||
| case *uint: | |||
| n, err := util.ParseUint(b, 10, 64) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| *v = uint(n) | |||
| return nil | |||
| case *uint8: | |||
| n, err := util.ParseUint(b, 10, 8) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| *v = uint8(n) | |||
| return nil | |||
| case *uint16: | |||
| n, err := util.ParseUint(b, 10, 16) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| *v = uint16(n) | |||
| return nil | |||
| case *uint32: | |||
| n, err := util.ParseUint(b, 10, 32) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| *v = uint32(n) | |||
| return nil | |||
| case *uint64: | |||
| n, err := util.ParseUint(b, 10, 64) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| *v = n | |||
| return nil | |||
| case *float32: | |||
| n, err := util.ParseFloat(b, 32) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| *v = float32(n) | |||
| return err | |||
| case *float64: | |||
| var err error | |||
| *v, err = util.ParseFloat(b, 64) | |||
| return err | |||
| case *bool: | |||
| *v = len(b) == 1 && b[0] == '1' | |||
| return nil | |||
| case encoding.BinaryUnmarshaler: | |||
| return v.UnmarshalBinary(b) | |||
| default: | |||
| return fmt.Errorf( | |||
| "redis: can't unmarshal %T (consider implementing BinaryUnmarshaler)", v) | |||
| } | |||
| } | |||
| func ScanSlice(data []string, slice interface{}) error { | |||
| v := reflect.ValueOf(slice) | |||
| if !v.IsValid() { | |||
| return fmt.Errorf("redis: ScanSlice(nil)") | |||
| } | |||
| if v.Kind() != reflect.Ptr { | |||
| return fmt.Errorf("redis: ScanSlice(non-pointer %T)", slice) | |||
| } | |||
| v = v.Elem() | |||
| if v.Kind() != reflect.Slice { | |||
| return fmt.Errorf("redis: ScanSlice(non-slice %T)", slice) | |||
| } | |||
| next := makeSliceNextElemFunc(v) | |||
| for i, s := range data { | |||
| elem := next() | |||
| if err := Scan([]byte(s), elem.Addr().Interface()); err != nil { | |||
| err = fmt.Errorf("redis: ScanSlice index=%d value=%q failed: %s", i, s, err) | |||
| return err | |||
| } | |||
| } | |||
| return nil | |||
| } | |||
| func makeSliceNextElemFunc(v reflect.Value) func() reflect.Value { | |||
| elemType := v.Type().Elem() | |||
| if elemType.Kind() == reflect.Ptr { | |||
| elemType = elemType.Elem() | |||
| return func() reflect.Value { | |||
| if v.Len() < v.Cap() { | |||
| v.Set(v.Slice(0, v.Len()+1)) | |||
| elem := v.Index(v.Len() - 1) | |||
| if elem.IsNil() { | |||
| elem.Set(reflect.New(elemType)) | |||
| } | |||
| return elem.Elem() | |||
| } | |||
| elem := reflect.New(elemType) | |||
| v.Set(reflect.Append(v, elem)) | |||
| return elem.Elem() | |||
| } | |||
| } | |||
| zero := reflect.Zero(elemType) | |||
| return func() reflect.Value { | |||
| if v.Len() < v.Cap() { | |||
| v.Set(v.Slice(0, v.Len()+1)) | |||
| return v.Index(v.Len() - 1) | |||
| } | |||
| v.Set(reflect.Append(v, zero)) | |||
| return v.Index(v.Len() - 1) | |||
| } | |||
| } | |||
| @@ -0,0 +1,159 @@ | |||
| package proto | |||
| import ( | |||
| "bufio" | |||
| "encoding" | |||
| "fmt" | |||
| "io" | |||
| "strconv" | |||
| "github.com/go-redis/redis/internal/util" | |||
| ) | |||
| type Writer struct { | |||
| wr *bufio.Writer | |||
| lenBuf []byte | |||
| numBuf []byte | |||
| } | |||
| func NewWriter(wr io.Writer) *Writer { | |||
| return &Writer{ | |||
| wr: bufio.NewWriter(wr), | |||
| lenBuf: make([]byte, 64), | |||
| numBuf: make([]byte, 64), | |||
| } | |||
| } | |||
| func (w *Writer) WriteArgs(args []interface{}) error { | |||
| err := w.wr.WriteByte(ArrayReply) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| err = w.writeLen(len(args)) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| for _, arg := range args { | |||
| err := w.writeArg(arg) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| } | |||
| return nil | |||
| } | |||
| func (w *Writer) writeLen(n int) error { | |||
| w.lenBuf = strconv.AppendUint(w.lenBuf[:0], uint64(n), 10) | |||
| w.lenBuf = append(w.lenBuf, '\r', '\n') | |||
| _, err := w.wr.Write(w.lenBuf) | |||
| return err | |||
| } | |||
| func (w *Writer) writeArg(v interface{}) error { | |||
| switch v := v.(type) { | |||
| case nil: | |||
| return w.string("") | |||
| case string: | |||
| return w.string(v) | |||
| case []byte: | |||
| return w.bytes(v) | |||
| case int: | |||
| return w.int(int64(v)) | |||
| case int8: | |||
| return w.int(int64(v)) | |||
| case int16: | |||
| return w.int(int64(v)) | |||
| case int32: | |||
| return w.int(int64(v)) | |||
| case int64: | |||
| return w.int(v) | |||
| case uint: | |||
| return w.uint(uint64(v)) | |||
| case uint8: | |||
| return w.uint(uint64(v)) | |||
| case uint16: | |||
| return w.uint(uint64(v)) | |||
| case uint32: | |||
| return w.uint(uint64(v)) | |||
| case uint64: | |||
| return w.uint(v) | |||
| case float32: | |||
| return w.float(float64(v)) | |||
| case float64: | |||
| return w.float(v) | |||
| case bool: | |||
| if v { | |||
| return w.int(1) | |||
| } else { | |||
| return w.int(0) | |||
| } | |||
| case encoding.BinaryMarshaler: | |||
| b, err := v.MarshalBinary() | |||
| if err != nil { | |||
| return err | |||
| } | |||
| return w.bytes(b) | |||
| default: | |||
| return fmt.Errorf( | |||
| "redis: can't marshal %T (implement encoding.BinaryMarshaler)", v) | |||
| } | |||
| } | |||
| func (w *Writer) bytes(b []byte) error { | |||
| err := w.wr.WriteByte(StringReply) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| err = w.writeLen(len(b)) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| _, err = w.wr.Write(b) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| return w.crlf() | |||
| } | |||
| func (w *Writer) string(s string) error { | |||
| return w.bytes(util.StringToBytes(s)) | |||
| } | |||
| func (w *Writer) uint(n uint64) error { | |||
| w.numBuf = strconv.AppendUint(w.numBuf[:0], n, 10) | |||
| return w.bytes(w.numBuf) | |||
| } | |||
| func (w *Writer) int(n int64) error { | |||
| w.numBuf = strconv.AppendInt(w.numBuf[:0], n, 10) | |||
| return w.bytes(w.numBuf) | |||
| } | |||
| func (w *Writer) float(f float64) error { | |||
| w.numBuf = strconv.AppendFloat(w.numBuf[:0], f, 'f', -1, 64) | |||
| return w.bytes(w.numBuf) | |||
| } | |||
| func (w *Writer) crlf() error { | |||
| err := w.wr.WriteByte('\r') | |||
| if err != nil { | |||
| return err | |||
| } | |||
| return w.wr.WriteByte('\n') | |||
| } | |||
| func (w *Writer) Reset(wr io.Writer) { | |||
| w.wr.Reset(wr) | |||
| } | |||
| func (w *Writer) Flush() error { | |||
| return w.wr.Flush() | |||
| } | |||
| @@ -0,0 +1,29 @@ | |||
| package internal | |||
| import "github.com/go-redis/redis/internal/util" | |||
| func ToLower(s string) string { | |||
| if isLower(s) { | |||
| return s | |||
| } | |||
| b := make([]byte, len(s)) | |||
| for i := range b { | |||
| c := s[i] | |||
| if c >= 'A' && c <= 'Z' { | |||
| c += 'a' - 'A' | |||
| } | |||
| b[i] = c | |||
| } | |||
| return util.BytesToString(b) | |||
| } | |||
| func isLower(s string) bool { | |||
| for i := 0; i < len(s); i++ { | |||
| c := s[i] | |||
| if c >= 'A' && c <= 'Z' { | |||
| return false | |||
| } | |||
| } | |||
| return true | |||
| } | |||
| @@ -0,0 +1,11 @@ | |||
| // +build appengine | |||
| package util | |||
| func BytesToString(b []byte) string { | |||
| return string(b) | |||
| } | |||
| func StringToBytes(s string) []byte { | |||
| return []byte(s) | |||
| } | |||
| @@ -0,0 +1,19 @@ | |||
| package util | |||
| import "strconv" | |||
| func Atoi(b []byte) (int, error) { | |||
| return strconv.Atoi(BytesToString(b)) | |||
| } | |||
| func ParseInt(b []byte, base int, bitSize int) (int64, error) { | |||
| return strconv.ParseInt(BytesToString(b), base, bitSize) | |||
| } | |||
| func ParseUint(b []byte, base int, bitSize int) (uint64, error) { | |||
| return strconv.ParseUint(BytesToString(b), base, bitSize) | |||
| } | |||
| func ParseFloat(b []byte, bitSize int) (float64, error) { | |||
| return strconv.ParseFloat(BytesToString(b), bitSize) | |||
| } | |||
| @@ -0,0 +1,22 @@ | |||
| // +build !appengine | |||
| package util | |||
| import ( | |||
| "unsafe" | |||
| ) | |||
| // BytesToString converts byte slice to string. | |||
| func BytesToString(b []byte) string { | |||
| return *(*string)(unsafe.Pointer(&b)) | |||
| } | |||
| // StringToBytes converts string to byte slice. | |||
| func StringToBytes(s string) []byte { | |||
| return *(*[]byte)(unsafe.Pointer( | |||
| &struct { | |||
| string | |||
| Cap int | |||
| }{s, len(s)}, | |||
| )) | |||
| } | |||
| @@ -0,0 +1,73 @@ | |||
| package redis | |||
| import "sync" | |||
| // ScanIterator is used to incrementally iterate over a collection of elements. | |||
| // It's safe for concurrent use by multiple goroutines. | |||
| type ScanIterator struct { | |||
| mu sync.Mutex // protects Scanner and pos | |||
| cmd *ScanCmd | |||
| pos int | |||
| } | |||
| // Err returns the last iterator error, if any. | |||
| func (it *ScanIterator) Err() error { | |||
| it.mu.Lock() | |||
| err := it.cmd.Err() | |||
| it.mu.Unlock() | |||
| return err | |||
| } | |||
| // Next advances the cursor and returns true if more values can be read. | |||
| func (it *ScanIterator) Next() bool { | |||
| it.mu.Lock() | |||
| defer it.mu.Unlock() | |||
| // Instantly return on errors. | |||
| if it.cmd.Err() != nil { | |||
| return false | |||
| } | |||
| // Advance cursor, check if we are still within range. | |||
| if it.pos < len(it.cmd.page) { | |||
| it.pos++ | |||
| return true | |||
| } | |||
| for { | |||
| // Return if there is no more data to fetch. | |||
| if it.cmd.cursor == 0 { | |||
| return false | |||
| } | |||
| // Fetch next page. | |||
| if it.cmd._args[0] == "scan" { | |||
| it.cmd._args[1] = it.cmd.cursor | |||
| } else { | |||
| it.cmd._args[2] = it.cmd.cursor | |||
| } | |||
| err := it.cmd.process(it.cmd) | |||
| if err != nil { | |||
| return false | |||
| } | |||
| it.pos = 1 | |||
| // Redis can occasionally return empty page. | |||
| if len(it.cmd.page) > 0 { | |||
| return true | |||
| } | |||
| } | |||
| } | |||
| // Val returns the key/field at the current cursor position. | |||
| func (it *ScanIterator) Val() string { | |||
| var v string | |||
| it.mu.Lock() | |||
| if it.cmd.Err() == nil && it.pos > 0 && it.pos <= len(it.cmd.page) { | |||
| v = it.cmd.page[it.pos-1] | |||
| } | |||
| it.mu.Unlock() | |||
| return v | |||
| } | |||
| @@ -0,0 +1,226 @@ | |||
| package redis | |||
| import ( | |||
| "crypto/tls" | |||
| "errors" | |||
| "fmt" | |||
| "net" | |||
| "net/url" | |||
| "runtime" | |||
| "strconv" | |||
| "strings" | |||
| "time" | |||
| "github.com/go-redis/redis/internal/pool" | |||
| ) | |||
| // Limiter is the interface of a rate limiter or a circuit breaker. | |||
| type Limiter interface { | |||
| // Allow returns a nil if operation is allowed or an error otherwise. | |||
| // If operation is allowed client must report the result of operation | |||
| // whether is a success or a failure. | |||
| Allow() error | |||
| // ReportResult reports the result of previously allowed operation. | |||
| // nil indicates a success, non-nil error indicates a failure. | |||
| ReportResult(result error) | |||
| } | |||
| type Options struct { | |||
| // The network type, either tcp or unix. | |||
| // Default is tcp. | |||
| Network string | |||
| // host:port address. | |||
| Addr string | |||
| // Dialer creates new network connection and has priority over | |||
| // Network and Addr options. | |||
| Dialer func() (net.Conn, error) | |||
| // Hook that is called when new connection is established. | |||
| OnConnect func(*Conn) error | |||
| // Optional password. Must match the password specified in the | |||
| // requirepass server configuration option. | |||
| Password string | |||
| // Database to be selected after connecting to the server. | |||
| DB int | |||
| // Maximum number of retries before giving up. | |||
| // Default is to not retry failed commands. | |||
| MaxRetries int | |||
| // Minimum backoff between each retry. | |||
| // Default is 8 milliseconds; -1 disables backoff. | |||
| MinRetryBackoff time.Duration | |||
| // Maximum backoff between each retry. | |||
| // Default is 512 milliseconds; -1 disables backoff. | |||
| MaxRetryBackoff time.Duration | |||
| // Dial timeout for establishing new connections. | |||
| // Default is 5 seconds. | |||
| DialTimeout time.Duration | |||
| // Timeout for socket reads. If reached, commands will fail | |||
| // with a timeout instead of blocking. Use value -1 for no timeout and 0 for default. | |||
| // Default is 3 seconds. | |||
| ReadTimeout time.Duration | |||
| // Timeout for socket writes. If reached, commands will fail | |||
| // with a timeout instead of blocking. | |||
| // Default is ReadTimeout. | |||
| WriteTimeout time.Duration | |||
| // Maximum number of socket connections. | |||
| // Default is 10 connections per every CPU as reported by runtime.NumCPU. | |||
| PoolSize int | |||
| // Minimum number of idle connections which is useful when establishing | |||
| // new connection is slow. | |||
| MinIdleConns int | |||
| // Connection age at which client retires (closes) the connection. | |||
| // Default is to not close aged connections. | |||
| MaxConnAge time.Duration | |||
| // Amount of time client waits for connection if all connections | |||
| // are busy before returning an error. | |||
| // Default is ReadTimeout + 1 second. | |||
| PoolTimeout time.Duration | |||
| // Amount of time after which client closes idle connections. | |||
| // Should be less than server's timeout. | |||
| // Default is 5 minutes. -1 disables idle timeout check. | |||
| IdleTimeout time.Duration | |||
| // Frequency of idle checks made by idle connections reaper. | |||
| // Default is 1 minute. -1 disables idle connections reaper, | |||
| // but idle connections are still discarded by the client | |||
| // if IdleTimeout is set. | |||
| IdleCheckFrequency time.Duration | |||
| // Enables read only queries on slave nodes. | |||
| readOnly bool | |||
| // TLS Config to use. When set TLS will be negotiated. | |||
| TLSConfig *tls.Config | |||
| } | |||
| func (opt *Options) init() { | |||
| if opt.Network == "" { | |||
| opt.Network = "tcp" | |||
| } | |||
| if opt.Addr == "" { | |||
| opt.Addr = "localhost:6379" | |||
| } | |||
| if opt.Dialer == nil { | |||
| opt.Dialer = func() (net.Conn, error) { | |||
| netDialer := &net.Dialer{ | |||
| Timeout: opt.DialTimeout, | |||
| KeepAlive: 5 * time.Minute, | |||
| } | |||
| if opt.TLSConfig == nil { | |||
| return netDialer.Dial(opt.Network, opt.Addr) | |||
| } else { | |||
| return tls.DialWithDialer(netDialer, opt.Network, opt.Addr, opt.TLSConfig) | |||
| } | |||
| } | |||
| } | |||
| if opt.PoolSize == 0 { | |||
| opt.PoolSize = 10 * runtime.NumCPU() | |||
| } | |||
| if opt.DialTimeout == 0 { | |||
| opt.DialTimeout = 5 * time.Second | |||
| } | |||
| switch opt.ReadTimeout { | |||
| case -1: | |||
| opt.ReadTimeout = 0 | |||
| case 0: | |||
| opt.ReadTimeout = 3 * time.Second | |||
| } | |||
| switch opt.WriteTimeout { | |||
| case -1: | |||
| opt.WriteTimeout = 0 | |||
| case 0: | |||
| opt.WriteTimeout = opt.ReadTimeout | |||
| } | |||
| if opt.PoolTimeout == 0 { | |||
| opt.PoolTimeout = opt.ReadTimeout + time.Second | |||
| } | |||
| if opt.IdleTimeout == 0 { | |||
| opt.IdleTimeout = 5 * time.Minute | |||
| } | |||
| if opt.IdleCheckFrequency == 0 { | |||
| opt.IdleCheckFrequency = time.Minute | |||
| } | |||
| switch opt.MinRetryBackoff { | |||
| case -1: | |||
| opt.MinRetryBackoff = 0 | |||
| case 0: | |||
| opt.MinRetryBackoff = 8 * time.Millisecond | |||
| } | |||
| switch opt.MaxRetryBackoff { | |||
| case -1: | |||
| opt.MaxRetryBackoff = 0 | |||
| case 0: | |||
| opt.MaxRetryBackoff = 512 * time.Millisecond | |||
| } | |||
| } | |||
| // ParseURL parses an URL into Options that can be used to connect to Redis. | |||
| func ParseURL(redisURL string) (*Options, error) { | |||
| o := &Options{Network: "tcp"} | |||
| u, err := url.Parse(redisURL) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| if u.Scheme != "redis" && u.Scheme != "rediss" { | |||
| return nil, errors.New("invalid redis URL scheme: " + u.Scheme) | |||
| } | |||
| if u.User != nil { | |||
| if p, ok := u.User.Password(); ok { | |||
| o.Password = p | |||
| } | |||
| } | |||
| if len(u.Query()) > 0 { | |||
| return nil, errors.New("no options supported") | |||
| } | |||
| h, p, err := net.SplitHostPort(u.Host) | |||
| if err != nil { | |||
| h = u.Host | |||
| } | |||
| if h == "" { | |||
| h = "localhost" | |||
| } | |||
| if p == "" { | |||
| p = "6379" | |||
| } | |||
| o.Addr = net.JoinHostPort(h, p) | |||
| f := strings.FieldsFunc(u.Path, func(r rune) bool { | |||
| return r == '/' | |||
| }) | |||
| switch len(f) { | |||
| case 0: | |||
| o.DB = 0 | |||
| case 1: | |||
| if o.DB, err = strconv.Atoi(f[0]); err != nil { | |||
| return nil, fmt.Errorf("invalid redis database number: %q", f[0]) | |||
| } | |||
| default: | |||
| return nil, errors.New("invalid redis URL path: " + u.Path) | |||
| } | |||
| if u.Scheme == "rediss" { | |||
| o.TLSConfig = &tls.Config{ServerName: h} | |||
| } | |||
| return o, nil | |||
| } | |||
| func newConnPool(opt *Options) *pool.ConnPool { | |||
| return pool.NewConnPool(&pool.Options{ | |||
| Dialer: opt.Dialer, | |||
| PoolSize: opt.PoolSize, | |||
| MinIdleConns: opt.MinIdleConns, | |||
| MaxConnAge: opt.MaxConnAge, | |||
| PoolTimeout: opt.PoolTimeout, | |||
| IdleTimeout: opt.IdleTimeout, | |||
| IdleCheckFrequency: opt.IdleCheckFrequency, | |||
| }) | |||
| } | |||
| @@ -0,0 +1,120 @@ | |||
| package redis | |||
| import ( | |||
| "sync" | |||
| "github.com/go-redis/redis/internal/pool" | |||
| ) | |||
| type pipelineExecer func([]Cmder) error | |||
| type Pipeliner interface { | |||
| StatefulCmdable | |||
| Do(args ...interface{}) *Cmd | |||
| Process(cmd Cmder) error | |||
| Close() error | |||
| Discard() error | |||
| Exec() ([]Cmder, error) | |||
| } | |||
| var _ Pipeliner = (*Pipeline)(nil) | |||
| // Pipeline implements pipelining as described in | |||
| // http://redis.io/topics/pipelining. It's safe for concurrent use | |||
| // by multiple goroutines. | |||
| type Pipeline struct { | |||
| statefulCmdable | |||
| exec pipelineExecer | |||
| mu sync.Mutex | |||
| cmds []Cmder | |||
| closed bool | |||
| } | |||
| func (c *Pipeline) Do(args ...interface{}) *Cmd { | |||
| cmd := NewCmd(args...) | |||
| _ = c.Process(cmd) | |||
| return cmd | |||
| } | |||
| // Process queues the cmd for later execution. | |||
| func (c *Pipeline) Process(cmd Cmder) error { | |||
| c.mu.Lock() | |||
| c.cmds = append(c.cmds, cmd) | |||
| c.mu.Unlock() | |||
| return nil | |||
| } | |||
| // Close closes the pipeline, releasing any open resources. | |||
| func (c *Pipeline) Close() error { | |||
| c.mu.Lock() | |||
| c.discard() | |||
| c.closed = true | |||
| c.mu.Unlock() | |||
| return nil | |||
| } | |||
| // Discard resets the pipeline and discards queued commands. | |||
| func (c *Pipeline) Discard() error { | |||
| c.mu.Lock() | |||
| err := c.discard() | |||
| c.mu.Unlock() | |||
| return err | |||
| } | |||
| func (c *Pipeline) discard() error { | |||
| if c.closed { | |||
| return pool.ErrClosed | |||
| } | |||
| c.cmds = c.cmds[:0] | |||
| return nil | |||
| } | |||
| // Exec executes all previously queued commands using one | |||
| // client-server roundtrip. | |||
| // | |||
| // Exec always returns list of commands and error of the first failed | |||
| // command if any. | |||
| func (c *Pipeline) Exec() ([]Cmder, error) { | |||
| c.mu.Lock() | |||
| defer c.mu.Unlock() | |||
| if c.closed { | |||
| return nil, pool.ErrClosed | |||
| } | |||
| if len(c.cmds) == 0 { | |||
| return nil, nil | |||
| } | |||
| cmds := c.cmds | |||
| c.cmds = nil | |||
| return cmds, c.exec(cmds) | |||
| } | |||
| func (c *Pipeline) pipelined(fn func(Pipeliner) error) ([]Cmder, error) { | |||
| if err := fn(c); err != nil { | |||
| return nil, err | |||
| } | |||
| cmds, err := c.Exec() | |||
| _ = c.Close() | |||
| return cmds, err | |||
| } | |||
| func (c *Pipeline) Pipelined(fn func(Pipeliner) error) ([]Cmder, error) { | |||
| return c.pipelined(fn) | |||
| } | |||
| func (c *Pipeline) Pipeline() Pipeliner { | |||
| return c | |||
| } | |||
| func (c *Pipeline) TxPipelined(fn func(Pipeliner) error) ([]Cmder, error) { | |||
| return c.pipelined(fn) | |||
| } | |||
| func (c *Pipeline) TxPipeline() Pipeliner { | |||
| return c | |||
| } | |||
| @@ -0,0 +1,473 @@ | |||
| package redis | |||
| import ( | |||
| "errors" | |||
| "fmt" | |||
| "sync" | |||
| "time" | |||
| "github.com/go-redis/redis/internal" | |||
| "github.com/go-redis/redis/internal/pool" | |||
| "github.com/go-redis/redis/internal/proto" | |||
| ) | |||
| var errPingTimeout = errors.New("redis: ping timeout") | |||
| // PubSub implements Pub/Sub commands bas described in | |||
| // http://redis.io/topics/pubsub. Message receiving is NOT safe | |||
| // for concurrent use by multiple goroutines. | |||
| // | |||
| // PubSub automatically reconnects to Redis Server and resubscribes | |||
| // to the channels in case of network errors. | |||
| type PubSub struct { | |||
| opt *Options | |||
| newConn func([]string) (*pool.Conn, error) | |||
| closeConn func(*pool.Conn) error | |||
| mu sync.Mutex | |||
| cn *pool.Conn | |||
| channels map[string]struct{} | |||
| patterns map[string]struct{} | |||
| closed bool | |||
| exit chan struct{} | |||
| cmd *Cmd | |||
| chOnce sync.Once | |||
| ch chan *Message | |||
| ping chan struct{} | |||
| } | |||
| func (c *PubSub) init() { | |||
| c.exit = make(chan struct{}) | |||
| } | |||
| func (c *PubSub) conn() (*pool.Conn, error) { | |||
| c.mu.Lock() | |||
| cn, err := c._conn(nil) | |||
| c.mu.Unlock() | |||
| return cn, err | |||
| } | |||
| func (c *PubSub) _conn(newChannels []string) (*pool.Conn, error) { | |||
| if c.closed { | |||
| return nil, pool.ErrClosed | |||
| } | |||
| if c.cn != nil { | |||
| return c.cn, nil | |||
| } | |||
| channels := mapKeys(c.channels) | |||
| channels = append(channels, newChannels...) | |||
| cn, err := c.newConn(channels) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| if err := c.resubscribe(cn); err != nil { | |||
| _ = c.closeConn(cn) | |||
| return nil, err | |||
| } | |||
| c.cn = cn | |||
| return cn, nil | |||
| } | |||
| func (c *PubSub) writeCmd(cn *pool.Conn, cmd Cmder) error { | |||
| return cn.WithWriter(c.opt.WriteTimeout, func(wr *proto.Writer) error { | |||
| return writeCmd(wr, cmd) | |||
| }) | |||
| } | |||
| func (c *PubSub) resubscribe(cn *pool.Conn) error { | |||
| var firstErr error | |||
| if len(c.channels) > 0 { | |||
| err := c._subscribe(cn, "subscribe", mapKeys(c.channels)) | |||
| if err != nil && firstErr == nil { | |||
| firstErr = err | |||
| } | |||
| } | |||
| if len(c.patterns) > 0 { | |||
| err := c._subscribe(cn, "psubscribe", mapKeys(c.patterns)) | |||
| if err != nil && firstErr == nil { | |||
| firstErr = err | |||
| } | |||
| } | |||
| return firstErr | |||
| } | |||
| func mapKeys(m map[string]struct{}) []string { | |||
| s := make([]string, len(m)) | |||
| i := 0 | |||
| for k := range m { | |||
| s[i] = k | |||
| i++ | |||
| } | |||
| return s | |||
| } | |||
| func (c *PubSub) _subscribe( | |||
| cn *pool.Conn, redisCmd string, channels []string, | |||
| ) error { | |||
| args := make([]interface{}, 0, 1+len(channels)) | |||
| args = append(args, redisCmd) | |||
| for _, channel := range channels { | |||
| args = append(args, channel) | |||
| } | |||
| cmd := NewSliceCmd(args...) | |||
| return c.writeCmd(cn, cmd) | |||
| } | |||
| func (c *PubSub) releaseConn(cn *pool.Conn, err error, allowTimeout bool) { | |||
| c.mu.Lock() | |||
| c._releaseConn(cn, err, allowTimeout) | |||
| c.mu.Unlock() | |||
| } | |||
| func (c *PubSub) _releaseConn(cn *pool.Conn, err error, allowTimeout bool) { | |||
| if c.cn != cn { | |||
| return | |||
| } | |||
| if internal.IsBadConn(err, allowTimeout) { | |||
| c._reconnect(err) | |||
| } | |||
| } | |||
| func (c *PubSub) _reconnect(reason error) { | |||
| _ = c._closeTheCn(reason) | |||
| _, _ = c._conn(nil) | |||
| } | |||
| func (c *PubSub) _closeTheCn(reason error) error { | |||
| if c.cn == nil { | |||
| return nil | |||
| } | |||
| if !c.closed { | |||
| internal.Logf("redis: discarding bad PubSub connection: %s", reason) | |||
| } | |||
| err := c.closeConn(c.cn) | |||
| c.cn = nil | |||
| return err | |||
| } | |||
| func (c *PubSub) Close() error { | |||
| c.mu.Lock() | |||
| defer c.mu.Unlock() | |||
| if c.closed { | |||
| return pool.ErrClosed | |||
| } | |||
| c.closed = true | |||
| close(c.exit) | |||
| err := c._closeTheCn(pool.ErrClosed) | |||
| return err | |||
| } | |||
| // Subscribe the client to the specified channels. It returns | |||
| // empty subscription if there are no channels. | |||
| func (c *PubSub) Subscribe(channels ...string) error { | |||
| c.mu.Lock() | |||
| defer c.mu.Unlock() | |||
| err := c.subscribe("subscribe", channels...) | |||
| if c.channels == nil { | |||
| c.channels = make(map[string]struct{}) | |||
| } | |||
| for _, s := range channels { | |||
| c.channels[s] = struct{}{} | |||
| } | |||
| return err | |||
| } | |||
| // PSubscribe the client to the given patterns. It returns | |||
| // empty subscription if there are no patterns. | |||
| func (c *PubSub) PSubscribe(patterns ...string) error { | |||
| c.mu.Lock() | |||
| defer c.mu.Unlock() | |||
| err := c.subscribe("psubscribe", patterns...) | |||
| if c.patterns == nil { | |||
| c.patterns = make(map[string]struct{}) | |||
| } | |||
| for _, s := range patterns { | |||
| c.patterns[s] = struct{}{} | |||
| } | |||
| return err | |||
| } | |||
| // Unsubscribe the client from the given channels, or from all of | |||
| // them if none is given. | |||
| func (c *PubSub) Unsubscribe(channels ...string) error { | |||
| c.mu.Lock() | |||
| defer c.mu.Unlock() | |||
| for _, channel := range channels { | |||
| delete(c.channels, channel) | |||
| } | |||
| err := c.subscribe("unsubscribe", channels...) | |||
| return err | |||
| } | |||
| // PUnsubscribe the client from the given patterns, or from all of | |||
| // them if none is given. | |||
| func (c *PubSub) PUnsubscribe(patterns ...string) error { | |||
| c.mu.Lock() | |||
| defer c.mu.Unlock() | |||
| for _, pattern := range patterns { | |||
| delete(c.patterns, pattern) | |||
| } | |||
| err := c.subscribe("punsubscribe", patterns...) | |||
| return err | |||
| } | |||
| func (c *PubSub) subscribe(redisCmd string, channels ...string) error { | |||
| cn, err := c._conn(channels) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| err = c._subscribe(cn, redisCmd, channels) | |||
| c._releaseConn(cn, err, false) | |||
| return err | |||
| } | |||
| func (c *PubSub) Ping(payload ...string) error { | |||
| args := []interface{}{"ping"} | |||
| if len(payload) == 1 { | |||
| args = append(args, payload[0]) | |||
| } | |||
| cmd := NewCmd(args...) | |||
| cn, err := c.conn() | |||
| if err != nil { | |||
| return err | |||
| } | |||
| err = c.writeCmd(cn, cmd) | |||
| c.releaseConn(cn, err, false) | |||
| return err | |||
| } | |||
| // Subscription received after a successful subscription to channel. | |||
| type Subscription struct { | |||
| // Can be "subscribe", "unsubscribe", "psubscribe" or "punsubscribe". | |||
| Kind string | |||
| // Channel name we have subscribed to. | |||
| Channel string | |||
| // Number of channels we are currently subscribed to. | |||
| Count int | |||
| } | |||
| func (m *Subscription) String() string { | |||
| return fmt.Sprintf("%s: %s", m.Kind, m.Channel) | |||
| } | |||
| // Message received as result of a PUBLISH command issued by another client. | |||
| type Message struct { | |||
| Channel string | |||
| Pattern string | |||
| Payload string | |||
| } | |||
| func (m *Message) String() string { | |||
| return fmt.Sprintf("Message<%s: %s>", m.Channel, m.Payload) | |||
| } | |||
| // Pong received as result of a PING command issued by another client. | |||
| type Pong struct { | |||
| Payload string | |||
| } | |||
| func (p *Pong) String() string { | |||
| if p.Payload != "" { | |||
| return fmt.Sprintf("Pong<%s>", p.Payload) | |||
| } | |||
| return "Pong" | |||
| } | |||
| func (c *PubSub) newMessage(reply interface{}) (interface{}, error) { | |||
| switch reply := reply.(type) { | |||
| case string: | |||
| return &Pong{ | |||
| Payload: reply, | |||
| }, nil | |||
| case []interface{}: | |||
| switch kind := reply[0].(string); kind { | |||
| case "subscribe", "unsubscribe", "psubscribe", "punsubscribe": | |||
| return &Subscription{ | |||
| Kind: kind, | |||
| Channel: reply[1].(string), | |||
| Count: int(reply[2].(int64)), | |||
| }, nil | |||
| case "message": | |||
| return &Message{ | |||
| Channel: reply[1].(string), | |||
| Payload: reply[2].(string), | |||
| }, nil | |||
| case "pmessage": | |||
| return &Message{ | |||
| Pattern: reply[1].(string), | |||
| Channel: reply[2].(string), | |||
| Payload: reply[3].(string), | |||
| }, nil | |||
| case "pong": | |||
| return &Pong{ | |||
| Payload: reply[1].(string), | |||
| }, nil | |||
| default: | |||
| return nil, fmt.Errorf("redis: unsupported pubsub message: %q", kind) | |||
| } | |||
| default: | |||
| return nil, fmt.Errorf("redis: unsupported pubsub message: %#v", reply) | |||
| } | |||
| } | |||
| // ReceiveTimeout acts like Receive but returns an error if message | |||
| // is not received in time. This is low-level API and in most cases | |||
| // Channel should be used instead. | |||
| func (c *PubSub) ReceiveTimeout(timeout time.Duration) (interface{}, error) { | |||
| if c.cmd == nil { | |||
| c.cmd = NewCmd() | |||
| } | |||
| cn, err := c.conn() | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| err = cn.WithReader(timeout, func(rd *proto.Reader) error { | |||
| return c.cmd.readReply(rd) | |||
| }) | |||
| c.releaseConn(cn, err, timeout > 0) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return c.newMessage(c.cmd.Val()) | |||
| } | |||
| // Receive returns a message as a Subscription, Message, Pong or error. | |||
| // See PubSub example for details. This is low-level API and in most cases | |||
| // Channel should be used instead. | |||
| func (c *PubSub) Receive() (interface{}, error) { | |||
| return c.ReceiveTimeout(0) | |||
| } | |||
| // ReceiveMessage returns a Message or error ignoring Subscription and Pong | |||
| // messages. This is low-level API and in most cases Channel should be used | |||
| // instead. | |||
| func (c *PubSub) ReceiveMessage() (*Message, error) { | |||
| for { | |||
| msg, err := c.Receive() | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| switch msg := msg.(type) { | |||
| case *Subscription: | |||
| // Ignore. | |||
| case *Pong: | |||
| // Ignore. | |||
| case *Message: | |||
| return msg, nil | |||
| default: | |||
| err := fmt.Errorf("redis: unknown message: %T", msg) | |||
| return nil, err | |||
| } | |||
| } | |||
| } | |||
| // Channel returns a Go channel for concurrently receiving messages. | |||
| // It periodically sends Ping messages to test connection health. | |||
| // The channel is closed with PubSub. Receive* APIs can not be used | |||
| // after channel is created. | |||
| func (c *PubSub) Channel() <-chan *Message { | |||
| c.chOnce.Do(c.initChannel) | |||
| return c.ch | |||
| } | |||
| func (c *PubSub) initChannel() { | |||
| c.ch = make(chan *Message, 100) | |||
| c.ping = make(chan struct{}, 10) | |||
| go func() { | |||
| var errCount int | |||
| for { | |||
| msg, err := c.Receive() | |||
| if err != nil { | |||
| if err == pool.ErrClosed { | |||
| close(c.ch) | |||
| return | |||
| } | |||
| if errCount > 0 { | |||
| time.Sleep(c.retryBackoff(errCount)) | |||
| } | |||
| errCount++ | |||
| continue | |||
| } | |||
| errCount = 0 | |||
| // Any message is as good as a ping. | |||
| select { | |||
| case c.ping <- struct{}{}: | |||
| default: | |||
| } | |||
| switch msg := msg.(type) { | |||
| case *Subscription: | |||
| // Ignore. | |||
| case *Pong: | |||
| // Ignore. | |||
| case *Message: | |||
| c.ch <- msg | |||
| default: | |||
| internal.Logf("redis: unknown message: %T", msg) | |||
| } | |||
| } | |||
| }() | |||
| go func() { | |||
| const timeout = 5 * time.Second | |||
| timer := time.NewTimer(timeout) | |||
| timer.Stop() | |||
| healthy := true | |||
| for { | |||
| timer.Reset(timeout) | |||
| select { | |||
| case <-c.ping: | |||
| healthy = true | |||
| if !timer.Stop() { | |||
| <-timer.C | |||
| } | |||
| case <-timer.C: | |||
| pingErr := c.Ping() | |||
| if healthy { | |||
| healthy = false | |||
| } else { | |||
| if pingErr == nil { | |||
| pingErr = errPingTimeout | |||
| } | |||
| c.mu.Lock() | |||
| c._reconnect(pingErr) | |||
| c.mu.Unlock() | |||
| } | |||
| case <-c.exit: | |||
| return | |||
| } | |||
| } | |||
| }() | |||
| } | |||
| func (c *PubSub) retryBackoff(attempt int) time.Duration { | |||
| return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff) | |||
| } | |||
| @@ -0,0 +1,580 @@ | |||
| package redis | |||
| import ( | |||
| "context" | |||
| "fmt" | |||
| "log" | |||
| "os" | |||
| "time" | |||
| "github.com/go-redis/redis/internal" | |||
| "github.com/go-redis/redis/internal/pool" | |||
| "github.com/go-redis/redis/internal/proto" | |||
| ) | |||
| // Nil reply Redis returns when key does not exist. | |||
| const Nil = proto.Nil | |||
| func init() { | |||
| SetLogger(log.New(os.Stderr, "redis: ", log.LstdFlags|log.Lshortfile)) | |||
| } | |||
| func SetLogger(logger *log.Logger) { | |||
| internal.Logger = logger | |||
| } | |||
| type baseClient struct { | |||
| opt *Options | |||
| connPool pool.Pooler | |||
| limiter Limiter | |||
| process func(Cmder) error | |||
| processPipeline func([]Cmder) error | |||
| processTxPipeline func([]Cmder) error | |||
| onClose func() error // hook called when client is closed | |||
| } | |||
| func (c *baseClient) init() { | |||
| c.process = c.defaultProcess | |||
| c.processPipeline = c.defaultProcessPipeline | |||
| c.processTxPipeline = c.defaultProcessTxPipeline | |||
| } | |||
| func (c *baseClient) String() string { | |||
| return fmt.Sprintf("Redis<%s db:%d>", c.getAddr(), c.opt.DB) | |||
| } | |||
| func (c *baseClient) newConn() (*pool.Conn, error) { | |||
| cn, err := c.connPool.NewConn() | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| if cn.InitedAt.IsZero() { | |||
| if err := c.initConn(cn); err != nil { | |||
| _ = c.connPool.CloseConn(cn) | |||
| return nil, err | |||
| } | |||
| } | |||
| return cn, nil | |||
| } | |||
| func (c *baseClient) getConn() (*pool.Conn, error) { | |||
| if c.limiter != nil { | |||
| err := c.limiter.Allow() | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| } | |||
| cn, err := c._getConn() | |||
| if err != nil { | |||
| if c.limiter != nil { | |||
| c.limiter.ReportResult(err) | |||
| } | |||
| return nil, err | |||
| } | |||
| return cn, nil | |||
| } | |||
| func (c *baseClient) _getConn() (*pool.Conn, error) { | |||
| cn, err := c.connPool.Get() | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| if cn.InitedAt.IsZero() { | |||
| err := c.initConn(cn) | |||
| if err != nil { | |||
| c.connPool.Remove(cn) | |||
| return nil, err | |||
| } | |||
| } | |||
| return cn, nil | |||
| } | |||
| func (c *baseClient) releaseConn(cn *pool.Conn, err error) { | |||
| if c.limiter != nil { | |||
| c.limiter.ReportResult(err) | |||
| } | |||
| if internal.IsBadConn(err, false) { | |||
| c.connPool.Remove(cn) | |||
| } else { | |||
| c.connPool.Put(cn) | |||
| } | |||
| } | |||
| func (c *baseClient) releaseConnStrict(cn *pool.Conn, err error) { | |||
| if c.limiter != nil { | |||
| c.limiter.ReportResult(err) | |||
| } | |||
| if err == nil || internal.IsRedisError(err) { | |||
| c.connPool.Put(cn) | |||
| } else { | |||
| c.connPool.Remove(cn) | |||
| } | |||
| } | |||
| func (c *baseClient) initConn(cn *pool.Conn) error { | |||
| cn.InitedAt = time.Now() | |||
| if c.opt.Password == "" && | |||
| c.opt.DB == 0 && | |||
| !c.opt.readOnly && | |||
| c.opt.OnConnect == nil { | |||
| return nil | |||
| } | |||
| conn := newConn(c.opt, cn) | |||
| _, err := conn.Pipelined(func(pipe Pipeliner) error { | |||
| if c.opt.Password != "" { | |||
| pipe.Auth(c.opt.Password) | |||
| } | |||
| if c.opt.DB > 0 { | |||
| pipe.Select(c.opt.DB) | |||
| } | |||
| if c.opt.readOnly { | |||
| pipe.ReadOnly() | |||
| } | |||
| return nil | |||
| }) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| if c.opt.OnConnect != nil { | |||
| return c.opt.OnConnect(conn) | |||
| } | |||
| return nil | |||
| } | |||
| // Do creates a Cmd from the args and processes the cmd. | |||
| func (c *baseClient) Do(args ...interface{}) *Cmd { | |||
| cmd := NewCmd(args...) | |||
| _ = c.Process(cmd) | |||
| return cmd | |||
| } | |||
| // WrapProcess wraps function that processes Redis commands. | |||
| func (c *baseClient) WrapProcess( | |||
| fn func(oldProcess func(cmd Cmder) error) func(cmd Cmder) error, | |||
| ) { | |||
| c.process = fn(c.process) | |||
| } | |||
| func (c *baseClient) Process(cmd Cmder) error { | |||
| return c.process(cmd) | |||
| } | |||
| func (c *baseClient) defaultProcess(cmd Cmder) error { | |||
| for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ { | |||
| if attempt > 0 { | |||
| time.Sleep(c.retryBackoff(attempt)) | |||
| } | |||
| cn, err := c.getConn() | |||
| if err != nil { | |||
| cmd.setErr(err) | |||
| if internal.IsRetryableError(err, true) { | |||
| continue | |||
| } | |||
| return err | |||
| } | |||
| err = cn.WithWriter(c.opt.WriteTimeout, func(wr *proto.Writer) error { | |||
| return writeCmd(wr, cmd) | |||
| }) | |||
| if err != nil { | |||
| c.releaseConn(cn, err) | |||
| cmd.setErr(err) | |||
| if internal.IsRetryableError(err, true) { | |||
| continue | |||
| } | |||
| return err | |||
| } | |||
| err = cn.WithReader(c.cmdTimeout(cmd), func(rd *proto.Reader) error { | |||
| return cmd.readReply(rd) | |||
| }) | |||
| c.releaseConn(cn, err) | |||
| if err != nil && internal.IsRetryableError(err, cmd.readTimeout() == nil) { | |||
| continue | |||
| } | |||
| return err | |||
| } | |||
| return cmd.Err() | |||
| } | |||
| func (c *baseClient) retryBackoff(attempt int) time.Duration { | |||
| return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff) | |||
| } | |||
| func (c *baseClient) cmdTimeout(cmd Cmder) time.Duration { | |||
| if timeout := cmd.readTimeout(); timeout != nil { | |||
| t := *timeout | |||
| if t == 0 { | |||
| return 0 | |||
| } | |||
| return t + 10*time.Second | |||
| } | |||
| return c.opt.ReadTimeout | |||
| } | |||
| // Close closes the client, releasing any open resources. | |||
| // | |||
| // It is rare to Close a Client, as the Client is meant to be | |||
| // long-lived and shared between many goroutines. | |||
| func (c *baseClient) Close() error { | |||
| var firstErr error | |||
| if c.onClose != nil { | |||
| if err := c.onClose(); err != nil && firstErr == nil { | |||
| firstErr = err | |||
| } | |||
| } | |||
| if err := c.connPool.Close(); err != nil && firstErr == nil { | |||
| firstErr = err | |||
| } | |||
| return firstErr | |||
| } | |||
| func (c *baseClient) getAddr() string { | |||
| return c.opt.Addr | |||
| } | |||
| func (c *baseClient) WrapProcessPipeline( | |||
| fn func(oldProcess func([]Cmder) error) func([]Cmder) error, | |||
| ) { | |||
| c.processPipeline = fn(c.processPipeline) | |||
| c.processTxPipeline = fn(c.processTxPipeline) | |||
| } | |||
| func (c *baseClient) defaultProcessPipeline(cmds []Cmder) error { | |||
| return c.generalProcessPipeline(cmds, c.pipelineProcessCmds) | |||
| } | |||
| func (c *baseClient) defaultProcessTxPipeline(cmds []Cmder) error { | |||
| return c.generalProcessPipeline(cmds, c.txPipelineProcessCmds) | |||
| } | |||
| type pipelineProcessor func(*pool.Conn, []Cmder) (bool, error) | |||
| func (c *baseClient) generalProcessPipeline(cmds []Cmder, p pipelineProcessor) error { | |||
| for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ { | |||
| if attempt > 0 { | |||
| time.Sleep(c.retryBackoff(attempt)) | |||
| } | |||
| cn, err := c.getConn() | |||
| if err != nil { | |||
| setCmdsErr(cmds, err) | |||
| return err | |||
| } | |||
| canRetry, err := p(cn, cmds) | |||
| c.releaseConnStrict(cn, err) | |||
| if !canRetry || !internal.IsRetryableError(err, true) { | |||
| break | |||
| } | |||
| } | |||
| return cmdsFirstErr(cmds) | |||
| } | |||
| func (c *baseClient) pipelineProcessCmds(cn *pool.Conn, cmds []Cmder) (bool, error) { | |||
| err := cn.WithWriter(c.opt.WriteTimeout, func(wr *proto.Writer) error { | |||
| return writeCmd(wr, cmds...) | |||
| }) | |||
| if err != nil { | |||
| setCmdsErr(cmds, err) | |||
| return true, err | |||
| } | |||
| err = cn.WithReader(c.opt.ReadTimeout, func(rd *proto.Reader) error { | |||
| return pipelineReadCmds(rd, cmds) | |||
| }) | |||
| return true, err | |||
| } | |||
| func pipelineReadCmds(rd *proto.Reader, cmds []Cmder) error { | |||
| for _, cmd := range cmds { | |||
| err := cmd.readReply(rd) | |||
| if err != nil && !internal.IsRedisError(err) { | |||
| return err | |||
| } | |||
| } | |||
| return nil | |||
| } | |||
| func (c *baseClient) txPipelineProcessCmds(cn *pool.Conn, cmds []Cmder) (bool, error) { | |||
| err := cn.WithWriter(c.opt.WriteTimeout, func(wr *proto.Writer) error { | |||
| return txPipelineWriteMulti(wr, cmds) | |||
| }) | |||
| if err != nil { | |||
| setCmdsErr(cmds, err) | |||
| return true, err | |||
| } | |||
| err = cn.WithReader(c.opt.ReadTimeout, func(rd *proto.Reader) error { | |||
| err := txPipelineReadQueued(rd, cmds) | |||
| if err != nil { | |||
| setCmdsErr(cmds, err) | |||
| return err | |||
| } | |||
| return pipelineReadCmds(rd, cmds) | |||
| }) | |||
| return false, err | |||
| } | |||
| func txPipelineWriteMulti(wr *proto.Writer, cmds []Cmder) error { | |||
| multiExec := make([]Cmder, 0, len(cmds)+2) | |||
| multiExec = append(multiExec, NewStatusCmd("MULTI")) | |||
| multiExec = append(multiExec, cmds...) | |||
| multiExec = append(multiExec, NewSliceCmd("EXEC")) | |||
| return writeCmd(wr, multiExec...) | |||
| } | |||
| func txPipelineReadQueued(rd *proto.Reader, cmds []Cmder) error { | |||
| // Parse queued replies. | |||
| var statusCmd StatusCmd | |||
| err := statusCmd.readReply(rd) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| for range cmds { | |||
| err = statusCmd.readReply(rd) | |||
| if err != nil && !internal.IsRedisError(err) { | |||
| return err | |||
| } | |||
| } | |||
| // Parse number of replies. | |||
| line, err := rd.ReadLine() | |||
| if err != nil { | |||
| if err == Nil { | |||
| err = TxFailedErr | |||
| } | |||
| return err | |||
| } | |||
| switch line[0] { | |||
| case proto.ErrorReply: | |||
| return proto.ParseErrorReply(line) | |||
| case proto.ArrayReply: | |||
| // ok | |||
| default: | |||
| err := fmt.Errorf("redis: expected '*', but got line %q", line) | |||
| return err | |||
| } | |||
| return nil | |||
| } | |||
| //------------------------------------------------------------------------------ | |||
| // Client is a Redis client representing a pool of zero or more | |||
| // underlying connections. It's safe for concurrent use by multiple | |||
| // goroutines. | |||
| type Client struct { | |||
| baseClient | |||
| cmdable | |||
| ctx context.Context | |||
| } | |||
| // NewClient returns a client to the Redis Server specified by Options. | |||
| func NewClient(opt *Options) *Client { | |||
| opt.init() | |||
| c := Client{ | |||
| baseClient: baseClient{ | |||
| opt: opt, | |||
| connPool: newConnPool(opt), | |||
| }, | |||
| } | |||
| c.baseClient.init() | |||
| c.init() | |||
| return &c | |||
| } | |||
| func (c *Client) init() { | |||
| c.cmdable.setProcessor(c.Process) | |||
| } | |||
| func (c *Client) Context() context.Context { | |||
| if c.ctx != nil { | |||
| return c.ctx | |||
| } | |||
| return context.Background() | |||
| } | |||
| func (c *Client) WithContext(ctx context.Context) *Client { | |||
| if ctx == nil { | |||
| panic("nil context") | |||
| } | |||
| c2 := c.clone() | |||
| c2.ctx = ctx | |||
| return c2 | |||
| } | |||
| func (c *Client) clone() *Client { | |||
| cp := *c | |||
| cp.init() | |||
| return &cp | |||
| } | |||
| // Options returns read-only Options that were used to create the client. | |||
| func (c *Client) Options() *Options { | |||
| return c.opt | |||
| } | |||
| func (c *Client) SetLimiter(l Limiter) *Client { | |||
| c.limiter = l | |||
| return c | |||
| } | |||
| type PoolStats pool.Stats | |||
| // PoolStats returns connection pool stats. | |||
| func (c *Client) PoolStats() *PoolStats { | |||
| stats := c.connPool.Stats() | |||
| return (*PoolStats)(stats) | |||
| } | |||
| func (c *Client) Pipelined(fn func(Pipeliner) error) ([]Cmder, error) { | |||
| return c.Pipeline().Pipelined(fn) | |||
| } | |||
| func (c *Client) Pipeline() Pipeliner { | |||
| pipe := Pipeline{ | |||
| exec: c.processPipeline, | |||
| } | |||
| pipe.statefulCmdable.setProcessor(pipe.Process) | |||
| return &pipe | |||
| } | |||
| func (c *Client) TxPipelined(fn func(Pipeliner) error) ([]Cmder, error) { | |||
| return c.TxPipeline().Pipelined(fn) | |||
| } | |||
| // TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC. | |||
| func (c *Client) TxPipeline() Pipeliner { | |||
| pipe := Pipeline{ | |||
| exec: c.processTxPipeline, | |||
| } | |||
| pipe.statefulCmdable.setProcessor(pipe.Process) | |||
| return &pipe | |||
| } | |||
| func (c *Client) pubSub() *PubSub { | |||
| pubsub := &PubSub{ | |||
| opt: c.opt, | |||
| newConn: func(channels []string) (*pool.Conn, error) { | |||
| return c.newConn() | |||
| }, | |||
| closeConn: c.connPool.CloseConn, | |||
| } | |||
| pubsub.init() | |||
| return pubsub | |||
| } | |||
| // Subscribe subscribes the client to the specified channels. | |||
| // Channels can be omitted to create empty subscription. | |||
| // Note that this method does not wait on a response from Redis, so the | |||
| // subscription may not be active immediately. To force the connection to wait, | |||
| // you may call the Receive() method on the returned *PubSub like so: | |||
| // | |||
| // sub := client.Subscribe(queryResp) | |||
| // iface, err := sub.Receive() | |||
| // if err != nil { | |||
| // // handle error | |||
| // } | |||
| // | |||
| // // Should be *Subscription, but others are possible if other actions have been | |||
| // // taken on sub since it was created. | |||
| // switch iface.(type) { | |||
| // case *Subscription: | |||
| // // subscribe succeeded | |||
| // case *Message: | |||
| // // received first message | |||
| // case *Pong: | |||
| // // pong received | |||
| // default: | |||
| // // handle error | |||
| // } | |||
| // | |||
| // ch := sub.Channel() | |||
| func (c *Client) Subscribe(channels ...string) *PubSub { | |||
| pubsub := c.pubSub() | |||
| if len(channels) > 0 { | |||
| _ = pubsub.Subscribe(channels...) | |||
| } | |||
| return pubsub | |||
| } | |||
| // PSubscribe subscribes the client to the given patterns. | |||
| // Patterns can be omitted to create empty subscription. | |||
| func (c *Client) PSubscribe(channels ...string) *PubSub { | |||
| pubsub := c.pubSub() | |||
| if len(channels) > 0 { | |||
| _ = pubsub.PSubscribe(channels...) | |||
| } | |||
| return pubsub | |||
| } | |||
| //------------------------------------------------------------------------------ | |||
| // Conn is like Client, but its pool contains single connection. | |||
| type Conn struct { | |||
| baseClient | |||
| statefulCmdable | |||
| } | |||
| func newConn(opt *Options, cn *pool.Conn) *Conn { | |||
| c := Conn{ | |||
| baseClient: baseClient{ | |||
| opt: opt, | |||
| connPool: pool.NewSingleConnPool(cn), | |||
| }, | |||
| } | |||
| c.baseClient.init() | |||
| c.statefulCmdable.setProcessor(c.Process) | |||
| return &c | |||
| } | |||
| func (c *Conn) Pipelined(fn func(Pipeliner) error) ([]Cmder, error) { | |||
| return c.Pipeline().Pipelined(fn) | |||
| } | |||
| func (c *Conn) Pipeline() Pipeliner { | |||
| pipe := Pipeline{ | |||
| exec: c.processPipeline, | |||
| } | |||
| pipe.statefulCmdable.setProcessor(pipe.Process) | |||
| return &pipe | |||
| } | |||
| func (c *Conn) TxPipelined(fn func(Pipeliner) error) ([]Cmder, error) { | |||
| return c.TxPipeline().Pipelined(fn) | |||
| } | |||
| // TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC. | |||
| func (c *Conn) TxPipeline() Pipeliner { | |||
| pipe := Pipeline{ | |||
| exec: c.processTxPipeline, | |||
| } | |||
| pipe.statefulCmdable.setProcessor(pipe.Process) | |||
| return &pipe | |||
| } | |||
| @@ -0,0 +1,140 @@ | |||
| package redis | |||
| import "time" | |||
| // NewCmdResult returns a Cmd initialised with val and err for testing | |||
| func NewCmdResult(val interface{}, err error) *Cmd { | |||
| var cmd Cmd | |||
| cmd.val = val | |||
| cmd.setErr(err) | |||
| return &cmd | |||
| } | |||
| // NewSliceResult returns a SliceCmd initialised with val and err for testing | |||
| func NewSliceResult(val []interface{}, err error) *SliceCmd { | |||
| var cmd SliceCmd | |||
| cmd.val = val | |||
| cmd.setErr(err) | |||
| return &cmd | |||
| } | |||
| // NewStatusResult returns a StatusCmd initialised with val and err for testing | |||
| func NewStatusResult(val string, err error) *StatusCmd { | |||
| var cmd StatusCmd | |||
| cmd.val = val | |||
| cmd.setErr(err) | |||
| return &cmd | |||
| } | |||
| // NewIntResult returns an IntCmd initialised with val and err for testing | |||
| func NewIntResult(val int64, err error) *IntCmd { | |||
| var cmd IntCmd | |||
| cmd.val = val | |||
| cmd.setErr(err) | |||
| return &cmd | |||
| } | |||
| // NewDurationResult returns a DurationCmd initialised with val and err for testing | |||
| func NewDurationResult(val time.Duration, err error) *DurationCmd { | |||
| var cmd DurationCmd | |||
| cmd.val = val | |||
| cmd.setErr(err) | |||
| return &cmd | |||
| } | |||
| // NewBoolResult returns a BoolCmd initialised with val and err for testing | |||
| func NewBoolResult(val bool, err error) *BoolCmd { | |||
| var cmd BoolCmd | |||
| cmd.val = val | |||
| cmd.setErr(err) | |||
| return &cmd | |||
| } | |||
| // NewStringResult returns a StringCmd initialised with val and err for testing | |||
| func NewStringResult(val string, err error) *StringCmd { | |||
| var cmd StringCmd | |||
| cmd.val = val | |||
| cmd.setErr(err) | |||
| return &cmd | |||
| } | |||
| // NewFloatResult returns a FloatCmd initialised with val and err for testing | |||
| func NewFloatResult(val float64, err error) *FloatCmd { | |||
| var cmd FloatCmd | |||
| cmd.val = val | |||
| cmd.setErr(err) | |||
| return &cmd | |||
| } | |||
| // NewStringSliceResult returns a StringSliceCmd initialised with val and err for testing | |||
| func NewStringSliceResult(val []string, err error) *StringSliceCmd { | |||
| var cmd StringSliceCmd | |||
| cmd.val = val | |||
| cmd.setErr(err) | |||
| return &cmd | |||
| } | |||
| // NewBoolSliceResult returns a BoolSliceCmd initialised with val and err for testing | |||
| func NewBoolSliceResult(val []bool, err error) *BoolSliceCmd { | |||
| var cmd BoolSliceCmd | |||
| cmd.val = val | |||
| cmd.setErr(err) | |||
| return &cmd | |||
| } | |||
| // NewStringStringMapResult returns a StringStringMapCmd initialised with val and err for testing | |||
| func NewStringStringMapResult(val map[string]string, err error) *StringStringMapCmd { | |||
| var cmd StringStringMapCmd | |||
| cmd.val = val | |||
| cmd.setErr(err) | |||
| return &cmd | |||
| } | |||
| // NewStringIntMapCmdResult returns a StringIntMapCmd initialised with val and err for testing | |||
| func NewStringIntMapCmdResult(val map[string]int64, err error) *StringIntMapCmd { | |||
| var cmd StringIntMapCmd | |||
| cmd.val = val | |||
| cmd.setErr(err) | |||
| return &cmd | |||
| } | |||
| // NewZSliceCmdResult returns a ZSliceCmd initialised with val and err for testing | |||
| func NewZSliceCmdResult(val []Z, err error) *ZSliceCmd { | |||
| var cmd ZSliceCmd | |||
| cmd.val = val | |||
| cmd.setErr(err) | |||
| return &cmd | |||
| } | |||
| // NewScanCmdResult returns a ScanCmd initialised with val and err for testing | |||
| func NewScanCmdResult(keys []string, cursor uint64, err error) *ScanCmd { | |||
| var cmd ScanCmd | |||
| cmd.page = keys | |||
| cmd.cursor = cursor | |||
| cmd.setErr(err) | |||
| return &cmd | |||
| } | |||
| // NewClusterSlotsCmdResult returns a ClusterSlotsCmd initialised with val and err for testing | |||
| func NewClusterSlotsCmdResult(val []ClusterSlot, err error) *ClusterSlotsCmd { | |||
| var cmd ClusterSlotsCmd | |||
| cmd.val = val | |||
| cmd.setErr(err) | |||
| return &cmd | |||
| } | |||
| // NewGeoLocationCmdResult returns a GeoLocationCmd initialised with val and err for testing | |||
| func NewGeoLocationCmdResult(val []GeoLocation, err error) *GeoLocationCmd { | |||
| var cmd GeoLocationCmd | |||
| cmd.locations = val | |||
| cmd.setErr(err) | |||
| return &cmd | |||
| } | |||
| // NewCommandsInfoCmdResult returns a CommandsInfoCmd initialised with val and err for testing | |||
| func NewCommandsInfoCmdResult(val map[string]*CommandInfo, err error) *CommandsInfoCmd { | |||
| var cmd CommandsInfoCmd | |||
| cmd.val = val | |||
| cmd.setErr(err) | |||
| return &cmd | |||
| } | |||
| @@ -0,0 +1,658 @@ | |||
| package redis | |||
| import ( | |||
| "context" | |||
| "errors" | |||
| "fmt" | |||
| "math/rand" | |||
| "strconv" | |||
| "sync" | |||
| "sync/atomic" | |||
| "time" | |||
| "github.com/go-redis/redis/internal" | |||
| "github.com/go-redis/redis/internal/consistenthash" | |||
| "github.com/go-redis/redis/internal/hashtag" | |||
| "github.com/go-redis/redis/internal/pool" | |||
| ) | |||
| // Hash is type of hash function used in consistent hash. | |||
| type Hash consistenthash.Hash | |||
| var errRingShardsDown = errors.New("redis: all ring shards are down") | |||
| // RingOptions are used to configure a ring client and should be | |||
| // passed to NewRing. | |||
| type RingOptions struct { | |||
| // Map of name => host:port addresses of ring shards. | |||
| Addrs map[string]string | |||
| // Frequency of PING commands sent to check shards availability. | |||
| // Shard is considered down after 3 subsequent failed checks. | |||
| HeartbeatFrequency time.Duration | |||
| // Hash function used in consistent hash. | |||
| // Default is crc32.ChecksumIEEE. | |||
| Hash Hash | |||
| // Number of replicas in consistent hash. | |||
| // Default is 100 replicas. | |||
| // | |||
| // Higher number of replicas will provide less deviation, that is keys will be | |||
| // distributed to nodes more evenly. | |||
| // | |||
| // Following is deviation for common nreplicas: | |||
| // -------------------------------------------------------- | |||
| // | nreplicas | standard error | 99% confidence interval | | |||
| // | 10 | 0.3152 | (0.37, 1.98) | | |||
| // | 100 | 0.0997 | (0.76, 1.28) | | |||
| // | 1000 | 0.0316 | (0.92, 1.09) | | |||
| // -------------------------------------------------------- | |||
| // | |||
| // See https://arxiv.org/abs/1406.2294 for reference | |||
| HashReplicas int | |||
| // Following options are copied from Options struct. | |||
| OnConnect func(*Conn) error | |||
| DB int | |||
| Password string | |||
| MaxRetries int | |||
| MinRetryBackoff time.Duration | |||
| MaxRetryBackoff time.Duration | |||
| DialTimeout time.Duration | |||
| ReadTimeout time.Duration | |||
| WriteTimeout time.Duration | |||
| PoolSize int | |||
| MinIdleConns int | |||
| MaxConnAge time.Duration | |||
| PoolTimeout time.Duration | |||
| IdleTimeout time.Duration | |||
| IdleCheckFrequency time.Duration | |||
| } | |||
| func (opt *RingOptions) init() { | |||
| if opt.HeartbeatFrequency == 0 { | |||
| opt.HeartbeatFrequency = 500 * time.Millisecond | |||
| } | |||
| if opt.HashReplicas == 0 { | |||
| opt.HashReplicas = 100 | |||
| } | |||
| switch opt.MinRetryBackoff { | |||
| case -1: | |||
| opt.MinRetryBackoff = 0 | |||
| case 0: | |||
| opt.MinRetryBackoff = 8 * time.Millisecond | |||
| } | |||
| switch opt.MaxRetryBackoff { | |||
| case -1: | |||
| opt.MaxRetryBackoff = 0 | |||
| case 0: | |||
| opt.MaxRetryBackoff = 512 * time.Millisecond | |||
| } | |||
| } | |||
| func (opt *RingOptions) clientOptions() *Options { | |||
| return &Options{ | |||
| OnConnect: opt.OnConnect, | |||
| DB: opt.DB, | |||
| Password: opt.Password, | |||
| DialTimeout: opt.DialTimeout, | |||
| ReadTimeout: opt.ReadTimeout, | |||
| WriteTimeout: opt.WriteTimeout, | |||
| PoolSize: opt.PoolSize, | |||
| MinIdleConns: opt.MinIdleConns, | |||
| MaxConnAge: opt.MaxConnAge, | |||
| PoolTimeout: opt.PoolTimeout, | |||
| IdleTimeout: opt.IdleTimeout, | |||
| IdleCheckFrequency: opt.IdleCheckFrequency, | |||
| } | |||
| } | |||
| //------------------------------------------------------------------------------ | |||
| type ringShard struct { | |||
| Client *Client | |||
| down int32 | |||
| } | |||
| func (shard *ringShard) String() string { | |||
| var state string | |||
| if shard.IsUp() { | |||
| state = "up" | |||
| } else { | |||
| state = "down" | |||
| } | |||
| return fmt.Sprintf("%s is %s", shard.Client, state) | |||
| } | |||
| func (shard *ringShard) IsDown() bool { | |||
| const threshold = 3 | |||
| return atomic.LoadInt32(&shard.down) >= threshold | |||
| } | |||
| func (shard *ringShard) IsUp() bool { | |||
| return !shard.IsDown() | |||
| } | |||
| // Vote votes to set shard state and returns true if state was changed. | |||
| func (shard *ringShard) Vote(up bool) bool { | |||
| if up { | |||
| changed := shard.IsDown() | |||
| atomic.StoreInt32(&shard.down, 0) | |||
| return changed | |||
| } | |||
| if shard.IsDown() { | |||
| return false | |||
| } | |||
| atomic.AddInt32(&shard.down, 1) | |||
| return shard.IsDown() | |||
| } | |||
| //------------------------------------------------------------------------------ | |||
| type ringShards struct { | |||
| opt *RingOptions | |||
| mu sync.RWMutex | |||
| hash *consistenthash.Map | |||
| shards map[string]*ringShard // read only | |||
| list []*ringShard // read only | |||
| len int | |||
| closed bool | |||
| } | |||
| func newRingShards(opt *RingOptions) *ringShards { | |||
| return &ringShards{ | |||
| opt: opt, | |||
| hash: newConsistentHash(opt), | |||
| shards: make(map[string]*ringShard), | |||
| } | |||
| } | |||
| func (c *ringShards) Add(name string, cl *Client) { | |||
| shard := &ringShard{Client: cl} | |||
| c.hash.Add(name) | |||
| c.shards[name] = shard | |||
| c.list = append(c.list, shard) | |||
| } | |||
| func (c *ringShards) List() []*ringShard { | |||
| c.mu.RLock() | |||
| list := c.list | |||
| c.mu.RUnlock() | |||
| return list | |||
| } | |||
| func (c *ringShards) Hash(key string) string { | |||
| c.mu.RLock() | |||
| hash := c.hash.Get(key) | |||
| c.mu.RUnlock() | |||
| return hash | |||
| } | |||
| func (c *ringShards) GetByKey(key string) (*ringShard, error) { | |||
| key = hashtag.Key(key) | |||
| c.mu.RLock() | |||
| if c.closed { | |||
| c.mu.RUnlock() | |||
| return nil, pool.ErrClosed | |||
| } | |||
| hash := c.hash.Get(key) | |||
| if hash == "" { | |||
| c.mu.RUnlock() | |||
| return nil, errRingShardsDown | |||
| } | |||
| shard := c.shards[hash] | |||
| c.mu.RUnlock() | |||
| return shard, nil | |||
| } | |||
| func (c *ringShards) GetByHash(name string) (*ringShard, error) { | |||
| if name == "" { | |||
| return c.Random() | |||
| } | |||
| c.mu.RLock() | |||
| shard := c.shards[name] | |||
| c.mu.RUnlock() | |||
| return shard, nil | |||
| } | |||
| func (c *ringShards) Random() (*ringShard, error) { | |||
| return c.GetByKey(strconv.Itoa(rand.Int())) | |||
| } | |||
| // heartbeat monitors state of each shard in the ring. | |||
| func (c *ringShards) Heartbeat(frequency time.Duration) { | |||
| ticker := time.NewTicker(frequency) | |||
| defer ticker.Stop() | |||
| for range ticker.C { | |||
| var rebalance bool | |||
| c.mu.RLock() | |||
| if c.closed { | |||
| c.mu.RUnlock() | |||
| break | |||
| } | |||
| shards := c.list | |||
| c.mu.RUnlock() | |||
| for _, shard := range shards { | |||
| err := shard.Client.Ping().Err() | |||
| if shard.Vote(err == nil || err == pool.ErrPoolTimeout) { | |||
| internal.Logf("ring shard state changed: %s", shard) | |||
| rebalance = true | |||
| } | |||
| } | |||
| if rebalance { | |||
| c.rebalance() | |||
| } | |||
| } | |||
| } | |||
| // rebalance removes dead shards from the Ring. | |||
| func (c *ringShards) rebalance() { | |||
| hash := newConsistentHash(c.opt) | |||
| var shardsNum int | |||
| for name, shard := range c.shards { | |||
| if shard.IsUp() { | |||
| hash.Add(name) | |||
| shardsNum++ | |||
| } | |||
| } | |||
| c.mu.Lock() | |||
| c.hash = hash | |||
| c.len = shardsNum | |||
| c.mu.Unlock() | |||
| } | |||
| func (c *ringShards) Len() int { | |||
| c.mu.RLock() | |||
| l := c.len | |||
| c.mu.RUnlock() | |||
| return l | |||
| } | |||
| func (c *ringShards) Close() error { | |||
| c.mu.Lock() | |||
| defer c.mu.Unlock() | |||
| if c.closed { | |||
| return nil | |||
| } | |||
| c.closed = true | |||
| var firstErr error | |||
| for _, shard := range c.shards { | |||
| if err := shard.Client.Close(); err != nil && firstErr == nil { | |||
| firstErr = err | |||
| } | |||
| } | |||
| c.hash = nil | |||
| c.shards = nil | |||
| c.list = nil | |||
| return firstErr | |||
| } | |||
| //------------------------------------------------------------------------------ | |||
| // Ring is a Redis client that uses consistent hashing to distribute | |||
| // keys across multiple Redis servers (shards). It's safe for | |||
| // concurrent use by multiple goroutines. | |||
| // | |||
| // Ring monitors the state of each shard and removes dead shards from | |||
| // the ring. When a shard comes online it is added back to the ring. This | |||
| // gives you maximum availability and partition tolerance, but no | |||
| // consistency between different shards or even clients. Each client | |||
| // uses shards that are available to the client and does not do any | |||
| // coordination when shard state is changed. | |||
| // | |||
| // Ring should be used when you need multiple Redis servers for caching | |||
| // and can tolerate losing data when one of the servers dies. | |||
| // Otherwise you should use Redis Cluster. | |||
| type Ring struct { | |||
| cmdable | |||
| ctx context.Context | |||
| opt *RingOptions | |||
| shards *ringShards | |||
| cmdsInfoCache *cmdsInfoCache | |||
| process func(Cmder) error | |||
| processPipeline func([]Cmder) error | |||
| } | |||
| func NewRing(opt *RingOptions) *Ring { | |||
| opt.init() | |||
| ring := &Ring{ | |||
| opt: opt, | |||
| shards: newRingShards(opt), | |||
| } | |||
| ring.cmdsInfoCache = newCmdsInfoCache(ring.cmdsInfo) | |||
| ring.process = ring.defaultProcess | |||
| ring.processPipeline = ring.defaultProcessPipeline | |||
| ring.cmdable.setProcessor(ring.Process) | |||
| for name, addr := range opt.Addrs { | |||
| clopt := opt.clientOptions() | |||
| clopt.Addr = addr | |||
| ring.shards.Add(name, NewClient(clopt)) | |||
| } | |||
| go ring.shards.Heartbeat(opt.HeartbeatFrequency) | |||
| return ring | |||
| } | |||
| func (c *Ring) Context() context.Context { | |||
| if c.ctx != nil { | |||
| return c.ctx | |||
| } | |||
| return context.Background() | |||
| } | |||
| func (c *Ring) WithContext(ctx context.Context) *Ring { | |||
| if ctx == nil { | |||
| panic("nil context") | |||
| } | |||
| c2 := c.copy() | |||
| c2.ctx = ctx | |||
| return c2 | |||
| } | |||
| func (c *Ring) copy() *Ring { | |||
| cp := *c | |||
| return &cp | |||
| } | |||
| // Options returns read-only Options that were used to create the client. | |||
| func (c *Ring) Options() *RingOptions { | |||
| return c.opt | |||
| } | |||
| func (c *Ring) retryBackoff(attempt int) time.Duration { | |||
| return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff) | |||
| } | |||
| // PoolStats returns accumulated connection pool stats. | |||
| func (c *Ring) PoolStats() *PoolStats { | |||
| shards := c.shards.List() | |||
| var acc PoolStats | |||
| for _, shard := range shards { | |||
| s := shard.Client.connPool.Stats() | |||
| acc.Hits += s.Hits | |||
| acc.Misses += s.Misses | |||
| acc.Timeouts += s.Timeouts | |||
| acc.TotalConns += s.TotalConns | |||
| acc.IdleConns += s.IdleConns | |||
| } | |||
| return &acc | |||
| } | |||
| // Len returns the current number of shards in the ring. | |||
| func (c *Ring) Len() int { | |||
| return c.shards.Len() | |||
| } | |||
| // Subscribe subscribes the client to the specified channels. | |||
| func (c *Ring) Subscribe(channels ...string) *PubSub { | |||
| if len(channels) == 0 { | |||
| panic("at least one channel is required") | |||
| } | |||
| shard, err := c.shards.GetByKey(channels[0]) | |||
| if err != nil { | |||
| // TODO: return PubSub with sticky error | |||
| panic(err) | |||
| } | |||
| return shard.Client.Subscribe(channels...) | |||
| } | |||
| // PSubscribe subscribes the client to the given patterns. | |||
| func (c *Ring) PSubscribe(channels ...string) *PubSub { | |||
| if len(channels) == 0 { | |||
| panic("at least one channel is required") | |||
| } | |||
| shard, err := c.shards.GetByKey(channels[0]) | |||
| if err != nil { | |||
| // TODO: return PubSub with sticky error | |||
| panic(err) | |||
| } | |||
| return shard.Client.PSubscribe(channels...) | |||
| } | |||
| // ForEachShard concurrently calls the fn on each live shard in the ring. | |||
| // It returns the first error if any. | |||
| func (c *Ring) ForEachShard(fn func(client *Client) error) error { | |||
| shards := c.shards.List() | |||
| var wg sync.WaitGroup | |||
| errCh := make(chan error, 1) | |||
| for _, shard := range shards { | |||
| if shard.IsDown() { | |||
| continue | |||
| } | |||
| wg.Add(1) | |||
| go func(shard *ringShard) { | |||
| defer wg.Done() | |||
| err := fn(shard.Client) | |||
| if err != nil { | |||
| select { | |||
| case errCh <- err: | |||
| default: | |||
| } | |||
| } | |||
| }(shard) | |||
| } | |||
| wg.Wait() | |||
| select { | |||
| case err := <-errCh: | |||
| return err | |||
| default: | |||
| return nil | |||
| } | |||
| } | |||
| func (c *Ring) cmdsInfo() (map[string]*CommandInfo, error) { | |||
| shards := c.shards.List() | |||
| firstErr := errRingShardsDown | |||
| for _, shard := range shards { | |||
| cmdsInfo, err := shard.Client.Command().Result() | |||
| if err == nil { | |||
| return cmdsInfo, nil | |||
| } | |||
| if firstErr == nil { | |||
| firstErr = err | |||
| } | |||
| } | |||
| return nil, firstErr | |||
| } | |||
| func (c *Ring) cmdInfo(name string) *CommandInfo { | |||
| cmdsInfo, err := c.cmdsInfoCache.Get() | |||
| if err != nil { | |||
| return nil | |||
| } | |||
| info := cmdsInfo[name] | |||
| if info == nil { | |||
| internal.Logf("info for cmd=%s not found", name) | |||
| } | |||
| return info | |||
| } | |||
| func (c *Ring) cmdShard(cmd Cmder) (*ringShard, error) { | |||
| cmdInfo := c.cmdInfo(cmd.Name()) | |||
| pos := cmdFirstKeyPos(cmd, cmdInfo) | |||
| if pos == 0 { | |||
| return c.shards.Random() | |||
| } | |||
| firstKey := cmd.stringArg(pos) | |||
| return c.shards.GetByKey(firstKey) | |||
| } | |||
| // Do creates a Cmd from the args and processes the cmd. | |||
| func (c *Ring) Do(args ...interface{}) *Cmd { | |||
| cmd := NewCmd(args...) | |||
| c.Process(cmd) | |||
| return cmd | |||
| } | |||
| func (c *Ring) WrapProcess( | |||
| fn func(oldProcess func(cmd Cmder) error) func(cmd Cmder) error, | |||
| ) { | |||
| c.process = fn(c.process) | |||
| } | |||
| func (c *Ring) Process(cmd Cmder) error { | |||
| return c.process(cmd) | |||
| } | |||
| func (c *Ring) defaultProcess(cmd Cmder) error { | |||
| for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ { | |||
| if attempt > 0 { | |||
| time.Sleep(c.retryBackoff(attempt)) | |||
| } | |||
| shard, err := c.cmdShard(cmd) | |||
| if err != nil { | |||
| cmd.setErr(err) | |||
| return err | |||
| } | |||
| err = shard.Client.Process(cmd) | |||
| if err == nil { | |||
| return nil | |||
| } | |||
| if !internal.IsRetryableError(err, cmd.readTimeout() == nil) { | |||
| return err | |||
| } | |||
| } | |||
| return cmd.Err() | |||
| } | |||
| func (c *Ring) Pipeline() Pipeliner { | |||
| pipe := Pipeline{ | |||
| exec: c.processPipeline, | |||
| } | |||
| pipe.cmdable.setProcessor(pipe.Process) | |||
| return &pipe | |||
| } | |||
| func (c *Ring) Pipelined(fn func(Pipeliner) error) ([]Cmder, error) { | |||
| return c.Pipeline().Pipelined(fn) | |||
| } | |||
| func (c *Ring) WrapProcessPipeline( | |||
| fn func(oldProcess func([]Cmder) error) func([]Cmder) error, | |||
| ) { | |||
| c.processPipeline = fn(c.processPipeline) | |||
| } | |||
| func (c *Ring) defaultProcessPipeline(cmds []Cmder) error { | |||
| cmdsMap := make(map[string][]Cmder) | |||
| for _, cmd := range cmds { | |||
| cmdInfo := c.cmdInfo(cmd.Name()) | |||
| hash := cmd.stringArg(cmdFirstKeyPos(cmd, cmdInfo)) | |||
| if hash != "" { | |||
| hash = c.shards.Hash(hashtag.Key(hash)) | |||
| } | |||
| cmdsMap[hash] = append(cmdsMap[hash], cmd) | |||
| } | |||
| for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ { | |||
| if attempt > 0 { | |||
| time.Sleep(c.retryBackoff(attempt)) | |||
| } | |||
| var mu sync.Mutex | |||
| var failedCmdsMap map[string][]Cmder | |||
| var wg sync.WaitGroup | |||
| for hash, cmds := range cmdsMap { | |||
| wg.Add(1) | |||
| go func(hash string, cmds []Cmder) { | |||
| defer wg.Done() | |||
| shard, err := c.shards.GetByHash(hash) | |||
| if err != nil { | |||
| setCmdsErr(cmds, err) | |||
| return | |||
| } | |||
| cn, err := shard.Client.getConn() | |||
| if err != nil { | |||
| setCmdsErr(cmds, err) | |||
| return | |||
| } | |||
| canRetry, err := shard.Client.pipelineProcessCmds(cn, cmds) | |||
| shard.Client.releaseConnStrict(cn, err) | |||
| if canRetry && internal.IsRetryableError(err, true) { | |||
| mu.Lock() | |||
| if failedCmdsMap == nil { | |||
| failedCmdsMap = make(map[string][]Cmder) | |||
| } | |||
| failedCmdsMap[hash] = cmds | |||
| mu.Unlock() | |||
| } | |||
| }(hash, cmds) | |||
| } | |||
| wg.Wait() | |||
| if len(failedCmdsMap) == 0 { | |||
| break | |||
| } | |||
| cmdsMap = failedCmdsMap | |||
| } | |||
| return cmdsFirstErr(cmds) | |||
| } | |||
| func (c *Ring) TxPipeline() Pipeliner { | |||
| panic("not implemented") | |||
| } | |||
| func (c *Ring) TxPipelined(fn func(Pipeliner) error) ([]Cmder, error) { | |||
| panic("not implemented") | |||
| } | |||
| // Close closes the ring client, releasing any open resources. | |||
| // | |||
| // It is rare to Close a Ring, as the Ring is meant to be long-lived | |||
| // and shared between many goroutines. | |||
| func (c *Ring) Close() error { | |||
| return c.shards.Close() | |||
| } | |||
| func newConsistentHash(opt *RingOptions) *consistenthash.Map { | |||
| return consistenthash.New(opt.HashReplicas, consistenthash.Hash(opt.Hash)) | |||
| } | |||
| @@ -0,0 +1,62 @@ | |||
| package redis | |||
| import ( | |||
| "crypto/sha1" | |||
| "encoding/hex" | |||
| "io" | |||
| "strings" | |||
| ) | |||
| type scripter interface { | |||
| Eval(script string, keys []string, args ...interface{}) *Cmd | |||
| EvalSha(sha1 string, keys []string, args ...interface{}) *Cmd | |||
| ScriptExists(hashes ...string) *BoolSliceCmd | |||
| ScriptLoad(script string) *StringCmd | |||
| } | |||
| var _ scripter = (*Client)(nil) | |||
| var _ scripter = (*Ring)(nil) | |||
| var _ scripter = (*ClusterClient)(nil) | |||
| type Script struct { | |||
| src, hash string | |||
| } | |||
| func NewScript(src string) *Script { | |||
| h := sha1.New() | |||
| io.WriteString(h, src) | |||
| return &Script{ | |||
| src: src, | |||
| hash: hex.EncodeToString(h.Sum(nil)), | |||
| } | |||
| } | |||
| func (s *Script) Hash() string { | |||
| return s.hash | |||
| } | |||
| func (s *Script) Load(c scripter) *StringCmd { | |||
| return c.ScriptLoad(s.src) | |||
| } | |||
| func (s *Script) Exists(c scripter) *BoolSliceCmd { | |||
| return c.ScriptExists(s.hash) | |||
| } | |||
| func (s *Script) Eval(c scripter, keys []string, args ...interface{}) *Cmd { | |||
| return c.Eval(s.src, keys, args...) | |||
| } | |||
| func (s *Script) EvalSha(c scripter, keys []string, args ...interface{}) *Cmd { | |||
| return c.EvalSha(s.hash, keys, args...) | |||
| } | |||
| // Run optimistically uses EVALSHA to run the script. If script does not exist | |||
| // it is retried using EVAL. | |||
| func (s *Script) Run(c scripter, keys []string, args ...interface{}) *Cmd { | |||
| r := s.EvalSha(c, keys, args...) | |||
| if err := r.Err(); err != nil && strings.HasPrefix(err.Error(), "NOSCRIPT ") { | |||
| return s.Eval(c, keys, args...) | |||
| } | |||
| return r | |||
| } | |||
| @@ -0,0 +1,399 @@ | |||
| package redis | |||
| import ( | |||
| "crypto/tls" | |||
| "errors" | |||
| "net" | |||
| "strings" | |||
| "sync" | |||
| "time" | |||
| "github.com/go-redis/redis/internal" | |||
| "github.com/go-redis/redis/internal/pool" | |||
| ) | |||
| //------------------------------------------------------------------------------ | |||
| // FailoverOptions are used to configure a failover client and should | |||
| // be passed to NewFailoverClient. | |||
| type FailoverOptions struct { | |||
| // The master name. | |||
| MasterName string | |||
| // A seed list of host:port addresses of sentinel nodes. | |||
| SentinelAddrs []string | |||
| // Following options are copied from Options struct. | |||
| OnConnect func(*Conn) error | |||
| Password string | |||
| DB int | |||
| MaxRetries int | |||
| MinRetryBackoff time.Duration | |||
| MaxRetryBackoff time.Duration | |||
| DialTimeout time.Duration | |||
| ReadTimeout time.Duration | |||
| WriteTimeout time.Duration | |||
| PoolSize int | |||
| MinIdleConns int | |||
| MaxConnAge time.Duration | |||
| PoolTimeout time.Duration | |||
| IdleTimeout time.Duration | |||
| IdleCheckFrequency time.Duration | |||
| TLSConfig *tls.Config | |||
| } | |||
| func (opt *FailoverOptions) options() *Options { | |||
| return &Options{ | |||
| Addr: "FailoverClient", | |||
| OnConnect: opt.OnConnect, | |||
| DB: opt.DB, | |||
| Password: opt.Password, | |||
| MaxRetries: opt.MaxRetries, | |||
| DialTimeout: opt.DialTimeout, | |||
| ReadTimeout: opt.ReadTimeout, | |||
| WriteTimeout: opt.WriteTimeout, | |||
| PoolSize: opt.PoolSize, | |||
| PoolTimeout: opt.PoolTimeout, | |||
| IdleTimeout: opt.IdleTimeout, | |||
| IdleCheckFrequency: opt.IdleCheckFrequency, | |||
| TLSConfig: opt.TLSConfig, | |||
| } | |||
| } | |||
| // NewFailoverClient returns a Redis client that uses Redis Sentinel | |||
| // for automatic failover. It's safe for concurrent use by multiple | |||
| // goroutines. | |||
| func NewFailoverClient(failoverOpt *FailoverOptions) *Client { | |||
| opt := failoverOpt.options() | |||
| opt.init() | |||
| failover := &sentinelFailover{ | |||
| masterName: failoverOpt.MasterName, | |||
| sentinelAddrs: failoverOpt.SentinelAddrs, | |||
| opt: opt, | |||
| } | |||
| c := Client{ | |||
| baseClient: baseClient{ | |||
| opt: opt, | |||
| connPool: failover.Pool(), | |||
| onClose: func() error { | |||
| return failover.Close() | |||
| }, | |||
| }, | |||
| } | |||
| c.baseClient.init() | |||
| c.cmdable.setProcessor(c.Process) | |||
| return &c | |||
| } | |||
| //------------------------------------------------------------------------------ | |||
| type SentinelClient struct { | |||
| baseClient | |||
| } | |||
| func NewSentinelClient(opt *Options) *SentinelClient { | |||
| opt.init() | |||
| c := &SentinelClient{ | |||
| baseClient: baseClient{ | |||
| opt: opt, | |||
| connPool: newConnPool(opt), | |||
| }, | |||
| } | |||
| c.baseClient.init() | |||
| return c | |||
| } | |||
| func (c *SentinelClient) pubSub() *PubSub { | |||
| pubsub := &PubSub{ | |||
| opt: c.opt, | |||
| newConn: func(channels []string) (*pool.Conn, error) { | |||
| return c.newConn() | |||
| }, | |||
| closeConn: c.connPool.CloseConn, | |||
| } | |||
| pubsub.init() | |||
| return pubsub | |||
| } | |||
| // Subscribe subscribes the client to the specified channels. | |||
| // Channels can be omitted to create empty subscription. | |||
| func (c *SentinelClient) Subscribe(channels ...string) *PubSub { | |||
| pubsub := c.pubSub() | |||
| if len(channels) > 0 { | |||
| _ = pubsub.Subscribe(channels...) | |||
| } | |||
| return pubsub | |||
| } | |||
| // PSubscribe subscribes the client to the given patterns. | |||
| // Patterns can be omitted to create empty subscription. | |||
| func (c *SentinelClient) PSubscribe(channels ...string) *PubSub { | |||
| pubsub := c.pubSub() | |||
| if len(channels) > 0 { | |||
| _ = pubsub.PSubscribe(channels...) | |||
| } | |||
| return pubsub | |||
| } | |||
| func (c *SentinelClient) GetMasterAddrByName(name string) *StringSliceCmd { | |||
| cmd := NewStringSliceCmd("sentinel", "get-master-addr-by-name", name) | |||
| c.Process(cmd) | |||
| return cmd | |||
| } | |||
| func (c *SentinelClient) Sentinels(name string) *SliceCmd { | |||
| cmd := NewSliceCmd("sentinel", "sentinels", name) | |||
| c.Process(cmd) | |||
| return cmd | |||
| } | |||
| // Failover forces a failover as if the master was not reachable, and without | |||
| // asking for agreement to other Sentinels. | |||
| func (c *SentinelClient) Failover(name string) *StatusCmd { | |||
| cmd := NewStatusCmd("sentinel", "failover", name) | |||
| c.Process(cmd) | |||
| return cmd | |||
| } | |||
| // Reset resets all the masters with matching name. The pattern argument is a | |||
| // glob-style pattern. The reset process clears any previous state in a master | |||
| // (including a failover in progress), and removes every slave and sentinel | |||
| // already discovered and associated with the master. | |||
| func (c *SentinelClient) Reset(pattern string) *IntCmd { | |||
| cmd := NewIntCmd("sentinel", "reset", pattern) | |||
| c.Process(cmd) | |||
| return cmd | |||
| } | |||
| type sentinelFailover struct { | |||
| sentinelAddrs []string | |||
| opt *Options | |||
| pool *pool.ConnPool | |||
| poolOnce sync.Once | |||
| mu sync.RWMutex | |||
| masterName string | |||
| _masterAddr string | |||
| sentinel *SentinelClient | |||
| pubsub *PubSub | |||
| } | |||
| func (c *sentinelFailover) Close() error { | |||
| c.mu.Lock() | |||
| defer c.mu.Unlock() | |||
| if c.sentinel != nil { | |||
| return c.closeSentinel() | |||
| } | |||
| return nil | |||
| } | |||
| func (c *sentinelFailover) Pool() *pool.ConnPool { | |||
| c.poolOnce.Do(func() { | |||
| c.opt.Dialer = c.dial | |||
| c.pool = newConnPool(c.opt) | |||
| }) | |||
| return c.pool | |||
| } | |||
| func (c *sentinelFailover) dial() (net.Conn, error) { | |||
| addr, err := c.MasterAddr() | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return net.DialTimeout("tcp", addr, c.opt.DialTimeout) | |||
| } | |||
| func (c *sentinelFailover) MasterAddr() (string, error) { | |||
| addr, err := c.masterAddr() | |||
| if err != nil { | |||
| return "", err | |||
| } | |||
| c.switchMaster(addr) | |||
| return addr, nil | |||
| } | |||
| func (c *sentinelFailover) masterAddr() (string, error) { | |||
| addr := c.getMasterAddr() | |||
| if addr != "" { | |||
| return addr, nil | |||
| } | |||
| c.mu.Lock() | |||
| defer c.mu.Unlock() | |||
| for i, sentinelAddr := range c.sentinelAddrs { | |||
| sentinel := NewSentinelClient(&Options{ | |||
| Addr: sentinelAddr, | |||
| MaxRetries: c.opt.MaxRetries, | |||
| DialTimeout: c.opt.DialTimeout, | |||
| ReadTimeout: c.opt.ReadTimeout, | |||
| WriteTimeout: c.opt.WriteTimeout, | |||
| PoolSize: c.opt.PoolSize, | |||
| PoolTimeout: c.opt.PoolTimeout, | |||
| IdleTimeout: c.opt.IdleTimeout, | |||
| IdleCheckFrequency: c.opt.IdleCheckFrequency, | |||
| TLSConfig: c.opt.TLSConfig, | |||
| }) | |||
| masterAddr, err := sentinel.GetMasterAddrByName(c.masterName).Result() | |||
| if err != nil { | |||
| internal.Logf("sentinel: GetMasterAddrByName master=%q failed: %s", | |||
| c.masterName, err) | |||
| _ = sentinel.Close() | |||
| continue | |||
| } | |||
| // Push working sentinel to the top. | |||
| c.sentinelAddrs[0], c.sentinelAddrs[i] = c.sentinelAddrs[i], c.sentinelAddrs[0] | |||
| c.setSentinel(sentinel) | |||
| addr := net.JoinHostPort(masterAddr[0], masterAddr[1]) | |||
| return addr, nil | |||
| } | |||
| return "", errors.New("redis: all sentinels are unreachable") | |||
| } | |||
| func (c *sentinelFailover) getMasterAddr() string { | |||
| c.mu.RLock() | |||
| sentinel := c.sentinel | |||
| c.mu.RUnlock() | |||
| if sentinel == nil { | |||
| return "" | |||
| } | |||
| addr, err := sentinel.GetMasterAddrByName(c.masterName).Result() | |||
| if err != nil { | |||
| internal.Logf("sentinel: GetMasterAddrByName name=%q failed: %s", | |||
| c.masterName, err) | |||
| c.mu.Lock() | |||
| if c.sentinel == sentinel { | |||
| c.closeSentinel() | |||
| } | |||
| c.mu.Unlock() | |||
| return "" | |||
| } | |||
| return net.JoinHostPort(addr[0], addr[1]) | |||
| } | |||
| func (c *sentinelFailover) switchMaster(addr string) { | |||
| c.mu.RLock() | |||
| masterAddr := c._masterAddr | |||
| c.mu.RUnlock() | |||
| if masterAddr == addr { | |||
| return | |||
| } | |||
| c.mu.Lock() | |||
| defer c.mu.Unlock() | |||
| internal.Logf("sentinel: new master=%q addr=%q", | |||
| c.masterName, addr) | |||
| _ = c.Pool().Filter(func(cn *pool.Conn) bool { | |||
| return cn.RemoteAddr().String() != addr | |||
| }) | |||
| c._masterAddr = addr | |||
| } | |||
| func (c *sentinelFailover) setSentinel(sentinel *SentinelClient) { | |||
| c.discoverSentinels(sentinel) | |||
| c.sentinel = sentinel | |||
| c.pubsub = sentinel.Subscribe("+switch-master") | |||
| go c.listen(c.pubsub) | |||
| } | |||
| func (c *sentinelFailover) closeSentinel() error { | |||
| var firstErr error | |||
| err := c.pubsub.Close() | |||
| if err != nil && firstErr == err { | |||
| firstErr = err | |||
| } | |||
| c.pubsub = nil | |||
| err = c.sentinel.Close() | |||
| if err != nil && firstErr == err { | |||
| firstErr = err | |||
| } | |||
| c.sentinel = nil | |||
| return firstErr | |||
| } | |||
| func (c *sentinelFailover) discoverSentinels(sentinel *SentinelClient) { | |||
| sentinels, err := sentinel.Sentinels(c.masterName).Result() | |||
| if err != nil { | |||
| internal.Logf("sentinel: Sentinels master=%q failed: %s", c.masterName, err) | |||
| return | |||
| } | |||
| for _, sentinel := range sentinels { | |||
| vals := sentinel.([]interface{}) | |||
| for i := 0; i < len(vals); i += 2 { | |||
| key := vals[i].(string) | |||
| if key == "name" { | |||
| sentinelAddr := vals[i+1].(string) | |||
| if !contains(c.sentinelAddrs, sentinelAddr) { | |||
| internal.Logf("sentinel: discovered new sentinel=%q for master=%q", | |||
| sentinelAddr, c.masterName) | |||
| c.sentinelAddrs = append(c.sentinelAddrs, sentinelAddr) | |||
| } | |||
| } | |||
| } | |||
| } | |||
| } | |||
| func (c *sentinelFailover) listen(pubsub *PubSub) { | |||
| ch := pubsub.Channel() | |||
| for { | |||
| msg, ok := <-ch | |||
| if !ok { | |||
| break | |||
| } | |||
| switch msg.Channel { | |||
| case "+switch-master": | |||
| parts := strings.Split(msg.Payload, " ") | |||
| if parts[0] != c.masterName { | |||
| internal.Logf("sentinel: ignore addr for master=%q", parts[0]) | |||
| continue | |||
| } | |||
| addr := net.JoinHostPort(parts[3], parts[4]) | |||
| c.switchMaster(addr) | |||
| } | |||
| } | |||
| } | |||
| func contains(slice []string, str string) bool { | |||
| for _, s := range slice { | |||
| if s == str { | |||
| return true | |||
| } | |||
| } | |||
| return false | |||
| } | |||
| @@ -0,0 +1,110 @@ | |||
| package redis | |||
| import ( | |||
| "github.com/go-redis/redis/internal/pool" | |||
| "github.com/go-redis/redis/internal/proto" | |||
| ) | |||
| // TxFailedErr transaction redis failed. | |||
| const TxFailedErr = proto.RedisError("redis: transaction failed") | |||
| // Tx implements Redis transactions as described in | |||
| // http://redis.io/topics/transactions. It's NOT safe for concurrent use | |||
| // by multiple goroutines, because Exec resets list of watched keys. | |||
| // If you don't need WATCH it is better to use Pipeline. | |||
| type Tx struct { | |||
| statefulCmdable | |||
| baseClient | |||
| } | |||
| func (c *Client) newTx() *Tx { | |||
| tx := Tx{ | |||
| baseClient: baseClient{ | |||
| opt: c.opt, | |||
| connPool: pool.NewStickyConnPool(c.connPool.(*pool.ConnPool), true), | |||
| }, | |||
| } | |||
| tx.baseClient.init() | |||
| tx.statefulCmdable.setProcessor(tx.Process) | |||
| return &tx | |||
| } | |||
| // Watch prepares a transaction and marks the keys to be watched | |||
| // for conditional execution if there are any keys. | |||
| // | |||
| // The transaction is automatically closed when fn exits. | |||
| func (c *Client) Watch(fn func(*Tx) error, keys ...string) error { | |||
| tx := c.newTx() | |||
| if len(keys) > 0 { | |||
| if err := tx.Watch(keys...).Err(); err != nil { | |||
| _ = tx.Close() | |||
| return err | |||
| } | |||
| } | |||
| err := fn(tx) | |||
| _ = tx.Close() | |||
| return err | |||
| } | |||
| // Close closes the transaction, releasing any open resources. | |||
| func (c *Tx) Close() error { | |||
| _ = c.Unwatch().Err() | |||
| return c.baseClient.Close() | |||
| } | |||
| // Watch marks the keys to be watched for conditional execution | |||
| // of a transaction. | |||
| func (c *Tx) Watch(keys ...string) *StatusCmd { | |||
| args := make([]interface{}, 1+len(keys)) | |||
| args[0] = "watch" | |||
| for i, key := range keys { | |||
| args[1+i] = key | |||
| } | |||
| cmd := NewStatusCmd(args...) | |||
| c.Process(cmd) | |||
| return cmd | |||
| } | |||
| // Unwatch flushes all the previously watched keys for a transaction. | |||
| func (c *Tx) Unwatch(keys ...string) *StatusCmd { | |||
| args := make([]interface{}, 1+len(keys)) | |||
| args[0] = "unwatch" | |||
| for i, key := range keys { | |||
| args[1+i] = key | |||
| } | |||
| cmd := NewStatusCmd(args...) | |||
| c.Process(cmd) | |||
| return cmd | |||
| } | |||
| // Pipeline creates a new pipeline. It is more convenient to use Pipelined. | |||
| func (c *Tx) Pipeline() Pipeliner { | |||
| pipe := Pipeline{ | |||
| exec: c.processTxPipeline, | |||
| } | |||
| pipe.statefulCmdable.setProcessor(pipe.Process) | |||
| return &pipe | |||
| } | |||
| // Pipelined executes commands queued in the fn in a transaction. | |||
| // | |||
| // When using WATCH, EXEC will execute commands only if the watched keys | |||
| // were not modified, allowing for a check-and-set mechanism. | |||
| // | |||
| // Exec always returns list of commands. If transaction fails | |||
| // TxFailedErr is returned. Otherwise Exec returns an error of the first | |||
| // failed command or nil. | |||
| func (c *Tx) Pipelined(fn func(Pipeliner) error) ([]Cmder, error) { | |||
| return c.Pipeline().Pipelined(fn) | |||
| } | |||
| // TxPipelined is an alias for Pipelined. | |||
| func (c *Tx) TxPipelined(fn func(Pipeliner) error) ([]Cmder, error) { | |||
| return c.Pipelined(fn) | |||
| } | |||
| // TxPipeline is an alias for Pipeline. | |||
| func (c *Tx) TxPipeline() Pipeliner { | |||
| return c.Pipeline() | |||
| } | |||
| @@ -0,0 +1,179 @@ | |||
| package redis | |||
| import ( | |||
| "crypto/tls" | |||
| "time" | |||
| ) | |||
| // UniversalOptions information is required by UniversalClient to establish | |||
| // connections. | |||
| type UniversalOptions struct { | |||
| // Either a single address or a seed list of host:port addresses | |||
| // of cluster/sentinel nodes. | |||
| Addrs []string | |||
| // Database to be selected after connecting to the server. | |||
| // Only single-node and failover clients. | |||
| DB int | |||
| // Common options. | |||
| OnConnect func(*Conn) error | |||
| Password string | |||
| MaxRetries int | |||
| MinRetryBackoff time.Duration | |||
| MaxRetryBackoff time.Duration | |||
| DialTimeout time.Duration | |||
| ReadTimeout time.Duration | |||
| WriteTimeout time.Duration | |||
| PoolSize int | |||
| MinIdleConns int | |||
| MaxConnAge time.Duration | |||
| PoolTimeout time.Duration | |||
| IdleTimeout time.Duration | |||
| IdleCheckFrequency time.Duration | |||
| TLSConfig *tls.Config | |||
| // Only cluster clients. | |||
| MaxRedirects int | |||
| ReadOnly bool | |||
| RouteByLatency bool | |||
| RouteRandomly bool | |||
| // The sentinel master name. | |||
| // Only failover clients. | |||
| MasterName string | |||
| } | |||
| func (o *UniversalOptions) cluster() *ClusterOptions { | |||
| if len(o.Addrs) == 0 { | |||
| o.Addrs = []string{"127.0.0.1:6379"} | |||
| } | |||
| return &ClusterOptions{ | |||
| Addrs: o.Addrs, | |||
| OnConnect: o.OnConnect, | |||
| Password: o.Password, | |||
| MaxRedirects: o.MaxRedirects, | |||
| ReadOnly: o.ReadOnly, | |||
| RouteByLatency: o.RouteByLatency, | |||
| RouteRandomly: o.RouteRandomly, | |||
| MaxRetries: o.MaxRetries, | |||
| MinRetryBackoff: o.MinRetryBackoff, | |||
| MaxRetryBackoff: o.MaxRetryBackoff, | |||
| DialTimeout: o.DialTimeout, | |||
| ReadTimeout: o.ReadTimeout, | |||
| WriteTimeout: o.WriteTimeout, | |||
| PoolSize: o.PoolSize, | |||
| MinIdleConns: o.MinIdleConns, | |||
| MaxConnAge: o.MaxConnAge, | |||
| PoolTimeout: o.PoolTimeout, | |||
| IdleTimeout: o.IdleTimeout, | |||
| IdleCheckFrequency: o.IdleCheckFrequency, | |||
| TLSConfig: o.TLSConfig, | |||
| } | |||
| } | |||
| func (o *UniversalOptions) failover() *FailoverOptions { | |||
| if len(o.Addrs) == 0 { | |||
| o.Addrs = []string{"127.0.0.1:26379"} | |||
| } | |||
| return &FailoverOptions{ | |||
| SentinelAddrs: o.Addrs, | |||
| MasterName: o.MasterName, | |||
| OnConnect: o.OnConnect, | |||
| DB: o.DB, | |||
| Password: o.Password, | |||
| MaxRetries: o.MaxRetries, | |||
| MinRetryBackoff: o.MinRetryBackoff, | |||
| MaxRetryBackoff: o.MaxRetryBackoff, | |||
| DialTimeout: o.DialTimeout, | |||
| ReadTimeout: o.ReadTimeout, | |||
| WriteTimeout: o.WriteTimeout, | |||
| PoolSize: o.PoolSize, | |||
| MinIdleConns: o.MinIdleConns, | |||
| MaxConnAge: o.MaxConnAge, | |||
| PoolTimeout: o.PoolTimeout, | |||
| IdleTimeout: o.IdleTimeout, | |||
| IdleCheckFrequency: o.IdleCheckFrequency, | |||
| TLSConfig: o.TLSConfig, | |||
| } | |||
| } | |||
| func (o *UniversalOptions) simple() *Options { | |||
| addr := "127.0.0.1:6379" | |||
| if len(o.Addrs) > 0 { | |||
| addr = o.Addrs[0] | |||
| } | |||
| return &Options{ | |||
| Addr: addr, | |||
| OnConnect: o.OnConnect, | |||
| DB: o.DB, | |||
| Password: o.Password, | |||
| MaxRetries: o.MaxRetries, | |||
| MinRetryBackoff: o.MinRetryBackoff, | |||
| MaxRetryBackoff: o.MaxRetryBackoff, | |||
| DialTimeout: o.DialTimeout, | |||
| ReadTimeout: o.ReadTimeout, | |||
| WriteTimeout: o.WriteTimeout, | |||
| PoolSize: o.PoolSize, | |||
| MinIdleConns: o.MinIdleConns, | |||
| MaxConnAge: o.MaxConnAge, | |||
| PoolTimeout: o.PoolTimeout, | |||
| IdleTimeout: o.IdleTimeout, | |||
| IdleCheckFrequency: o.IdleCheckFrequency, | |||
| TLSConfig: o.TLSConfig, | |||
| } | |||
| } | |||
| // -------------------------------------------------------------------- | |||
| // UniversalClient is an abstract client which - based on the provided options - | |||
| // can connect to either clusters, or sentinel-backed failover instances or simple | |||
| // single-instance servers. This can be useful for testing cluster-specific | |||
| // applications locally. | |||
| type UniversalClient interface { | |||
| Cmdable | |||
| Watch(fn func(*Tx) error, keys ...string) error | |||
| Process(cmd Cmder) error | |||
| WrapProcess(fn func(oldProcess func(cmd Cmder) error) func(cmd Cmder) error) | |||
| Subscribe(channels ...string) *PubSub | |||
| PSubscribe(channels ...string) *PubSub | |||
| Close() error | |||
| } | |||
| var _ UniversalClient = (*Client)(nil) | |||
| var _ UniversalClient = (*ClusterClient)(nil) | |||
| // NewUniversalClient returns a new multi client. The type of client returned depends | |||
| // on the following three conditions: | |||
| // | |||
| // 1. if a MasterName is passed a sentinel-backed FailoverClient will be returned | |||
| // 2. if the number of Addrs is two or more, a ClusterClient will be returned | |||
| // 3. otherwise, a single-node redis Client will be returned. | |||
| func NewUniversalClient(opts *UniversalOptions) UniversalClient { | |||
| if opts.MasterName != "" { | |||
| return NewFailoverClient(opts.failover()) | |||
| } else if len(opts.Addrs) > 1 { | |||
| return NewClusterClient(opts.cluster()) | |||
| } | |||
| return NewClient(opts.simple()) | |||
| } | |||
| @@ -143,6 +143,14 @@ github.com/go-macaron/session/postgres | |||
| github.com/go-macaron/session/redis | |||
| # github.com/go-macaron/toolbox v0.0.0-20180818072302-a77f45a7ce90 | |||
| github.com/go-macaron/toolbox | |||
| # github.com/go-redis/redis v6.15.2+incompatible | |||
| github.com/go-redis/redis | |||
| github.com/go-redis/redis/internal | |||
| github.com/go-redis/redis/internal/consistenthash | |||
| github.com/go-redis/redis/internal/hashtag | |||
| github.com/go-redis/redis/internal/pool | |||
| github.com/go-redis/redis/internal/proto | |||
| github.com/go-redis/redis/internal/util | |||
| # github.com/go-sql-driver/mysql v1.4.0 => github.com/go-sql-driver/mysql v0.0.0-20181218123637-c45f530f8e7f | |||
| github.com/go-sql-driver/mysql | |||
| # github.com/go-xorm/builder v0.3.3 | |||