diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
new file mode 100644
index 0000000000..1af2323fe9
--- /dev/null
+++ b/.github/CODEOWNERS
@@ -0,0 +1 @@
+doctests/* @dmaier-redislabs
diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml
new file mode 100644
index 0000000000..707670d0fa
--- /dev/null
+++ b/.github/FUNDING.yml
@@ -0,0 +1 @@
+custom: ['https://uptrace.dev/sponsor']
diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md
new file mode 100644
index 0000000000..3f934f8f60
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/bug_report.md
@@ -0,0 +1,49 @@
+---
+name: Bug report
+about: Create a report to help us improve
+title: ''
+labels: ''
+assignees: ''
+---
+
+Issue tracker is used for reporting bugs and discussing new features. Please use
+[stackoverflow](https://stackoverflow.com) for supporting issues.
+
+
+
+## Expected Behavior
+
+
+
+## Current Behavior
+
+
+
+## Possible Solution
+
+
+
+## Steps to Reproduce
+
+
+
+
+1.
+2.
+3.
+4.
+
+## Context (Environment)
+
+
+
+
+
+
+## Detailed Description
+
+
+
+## Possible Implementation
+
+
diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml
new file mode 100644
index 0000000000..e86d7a6672
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/config.yml
@@ -0,0 +1,5 @@
+blank_issues_enabled: true
+contact_links:
+ - name: Discussions
+ url: https://github.com/go-redis/redis/discussions
+ about: Ask a question via GitHub Discussions
diff --git a/.github/actions/run-tests/action.yml b/.github/actions/run-tests/action.yml
new file mode 100644
index 0000000000..b8e576e792
--- /dev/null
+++ b/.github/actions/run-tests/action.yml
@@ -0,0 +1,54 @@
+name: 'Run go-redis tests'
+description: 'Runs go-redis tests against different Redis versions and configurations'
+inputs:
+ go-version:
+ description: 'Go version to use for running tests'
+ default: '1.23'
+ redis-version:
+ description: 'Redis version to test against'
+ required: true
+runs:
+ using: "composite"
+ steps:
+ - name: Set up ${{ inputs.go-version }}
+ uses: actions/setup-go@v5
+ with:
+ go-version: ${{ inputs.go-version }}
+
+ - name: Setup Test environment
+ env:
+ REDIS_VERSION: ${{ inputs.redis-version }}
+ CLIENT_LIBS_TEST_IMAGE: "redislabs/client-libs-test:${{ inputs.redis-version }}"
+ run: |
+ set -e
+ redis_version_np=$(echo "$REDIS_VERSION" | grep -oP '^\d+.\d+')
+
+ # Mapping of redis version to redis testing containers
+ declare -A redis_version_mapping=(
+ ["8.2.x"]="8.2.1-pre"
+ ["8.0.x"]="8.0.2"
+ ["7.4.x"]="rs-7.4.0-v5"
+ ["7.2.x"]="rs-7.2.0-v17"
+ )
+
+ if [[ -v redis_version_mapping[$REDIS_VERSION] ]]; then
+ echo "REDIS_VERSION=${redis_version_np}" >> $GITHUB_ENV
+ echo "REDIS_IMAGE=redis:${{ inputs.redis-version }}" >> $GITHUB_ENV
+ echo "CLIENT_LIBS_TEST_IMAGE=redislabs/client-libs-test:${redis_version_mapping[$REDIS_VERSION]}" >> $GITHUB_ENV
+ else
+ echo "Version not found in the mapping."
+ exit 1
+ fi
+ sleep 10 # wait for redis to start
+ shell: bash
+ - name: Set up Docker Compose environment with redis ${{ inputs.redis-version }}
+ run: |
+ make docker.start
+ shell: bash
+ - name: Run tests
+ env:
+ RCE_DOCKER: "true"
+ RE_CLUSTER: "false"
+ run: |
+ make test.ci
+ shell: bash
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
new file mode 100644
index 0000000000..77b7be590e
--- /dev/null
+++ b/.github/dependabot.yml
@@ -0,0 +1,10 @@
+version: 2
+updates:
+- package-ecosystem: gomod
+ directory: /
+ schedule:
+ interval: weekly
+- package-ecosystem: github-actions
+ directory: /
+ schedule:
+ interval: weekly
diff --git a/.github/release-drafter-config.yml b/.github/release-drafter-config.yml
new file mode 100644
index 0000000000..c961f5972a
--- /dev/null
+++ b/.github/release-drafter-config.yml
@@ -0,0 +1,50 @@
+name-template: '$NEXT_MINOR_VERSION'
+tag-template: 'v$NEXT_MINOR_VERSION'
+autolabeler:
+ - label: 'maintenance'
+ files:
+ - '*.md'
+ - '.github/*'
+ - label: 'bug'
+ branch:
+ - '/bug-.+'
+ - label: 'maintenance'
+ branch:
+ - '/maintenance-.+'
+ - label: 'feature'
+ branch:
+ - '/feature-.+'
+categories:
+ - title: 'Breaking Changes'
+ labels:
+ - 'breakingchange'
+ - title: '๐งช Experimental Features'
+ labels:
+ - 'experimental'
+ - title: '๐ New Features'
+ labels:
+ - 'feature'
+ - 'enhancement'
+ - title: '๐ Bug Fixes'
+ labels:
+ - 'fix'
+ - 'bugfix'
+ - 'bug'
+ - 'BUG'
+ - title: '๐งฐ Maintenance'
+ label: 'maintenance'
+change-template: '- $TITLE (#$NUMBER)'
+exclude-labels:
+ - 'skip-changelog'
+exclude-contributors:
+ - 'dependabot'
+template: |
+ # Changes
+
+ $CHANGES
+
+ ## Contributors
+ We'd like to thank all the contributors who worked on this release!
+
+ $CONTRIBUTORS
+
diff --git a/.github/spellcheck-settings.yml b/.github/spellcheck-settings.yml
new file mode 100644
index 0000000000..b8ca6cca6f
--- /dev/null
+++ b/.github/spellcheck-settings.yml
@@ -0,0 +1,29 @@
+matrix:
+- name: Markdown
+ expect_match: false
+ apsell:
+ lang: en
+ d: en_US
+ ignore-case: true
+ dictionary:
+ wordlists:
+ - .github/wordlist.txt
+ output: wordlist.dic
+ pipeline:
+ - pyspelling.filters.markdown:
+ markdown_extensions:
+ - markdown.extensions.extra:
+ - pyspelling.filters.html:
+ comments: false
+ attributes:
+ - alt
+ ignores:
+ - ':matches(code, pre)'
+ - code
+ - pre
+ - blockquote
+ - img
+ sources:
+ - 'README.md'
+ - 'FAQ.md'
+ - 'docs/**'
diff --git a/.github/wordlist.txt b/.github/wordlist.txt
new file mode 100644
index 0000000000..741c51aa03
--- /dev/null
+++ b/.github/wordlist.txt
@@ -0,0 +1,78 @@
+ACLs
+APIs
+autoload
+autoloader
+autoloading
+analytics
+Autoloading
+backend
+backends
+behaviour
+CAS
+ClickHouse
+config
+customizable
+Customizable
+dataset
+de
+DisableIdentity
+ElastiCache
+extensibility
+FPM
+Golang
+IANA
+keyspace
+keyspaces
+Kvrocks
+localhost
+Lua
+MSSQL
+namespace
+NoSQL
+OpenTelemetry
+ORM
+Packagist
+PhpRedis
+pipelining
+pluggable
+Predis
+PSR
+Quickstart
+README
+rebalanced
+rebalancing
+redis
+Redis
+RocksDB
+runtime
+SHA
+sharding
+SETNAME
+SpellCheck
+SSL
+struct
+stunnel
+SynDump
+TCP
+TLS
+UnstableResp
+uri
+URI
+url
+variadic
+RedisStack
+RedisGears
+RedisTimeseries
+RediSearch
+RawResult
+RawVal
+entra
+EntraID
+Entra
+OAuth
+Azure
+StreamingCredentialsProvider
+oauth
+entraid
+MiB
+KiB
diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
new file mode 100644
index 0000000000..075d603a4b
--- /dev/null
+++ b/.github/workflows/build.yml
@@ -0,0 +1,100 @@
+name: Go
+
+on:
+ push:
+ branches: [master, v9, v9.7, v9.8, 'ndyakov/*', 'ofekshenawa/*', 'htemelski-redis/*', 'ce/*']
+ pull_request:
+ branches: [master, v9, v9.7, v9.8, 'ndyakov/*', 'ofekshenawa/*', 'htemelski-redis/*', 'ce/*']
+
+permissions:
+ contents: read
+
+jobs:
+
+ benchmark:
+ name: benchmark
+ runs-on: ubuntu-latest
+ strategy:
+ fail-fast: false
+ matrix:
+ redis-version:
+ - "8.2.x" # Redis CE 8.2
+ - "8.0.x" # Redis CE 8.0
+ - "7.4.x" # Redis stack 7.4
+ go-version:
+ - "1.23.x"
+ - "1.24.x"
+
+ steps:
+ - name: Set up ${{ matrix.go-version }}
+ uses: actions/setup-go@v6
+ with:
+ go-version: ${{ matrix.go-version }}
+
+ - name: Checkout code
+ uses: actions/checkout@v5
+
+ - name: Setup Test environment
+ env:
+ REDIS_VERSION: ${{ matrix.redis-version }}
+ CLIENT_LIBS_TEST_IMAGE: "redislabs/client-libs-test:${{ matrix.redis-version }}"
+ run: |
+ set -e
+ redis_version_np=$(echo "$REDIS_VERSION" | grep -oP '^\d+.\d+')
+
+ # Mapping of redis version to redis testing containers
+ declare -A redis_version_mapping=(
+ ["8.2.x"]="8.2.1-pre"
+ ["8.0.x"]="8.0.2"
+ ["7.4.x"]="rs-7.4.0-v5"
+ )
+ if [[ -v redis_version_mapping[$REDIS_VERSION] ]]; then
+ echo "REDIS_VERSION=${redis_version_np}" >> $GITHUB_ENV
+ echo "REDIS_IMAGE=redis:${{ matrix.redis-version }}" >> $GITHUB_ENV
+ echo "CLIENT_LIBS_TEST_IMAGE=redislabs/client-libs-test:${redis_version_mapping[$REDIS_VERSION]}" >> $GITHUB_ENV
+ else
+ echo "Version not found in the mapping."
+ exit 1
+ fi
+ shell: bash
+ - name: Set up Docker Compose environment with redis ${{ matrix.redis-version }}
+ run: make docker.start
+ shell: bash
+ - name: Benchmark Tests
+ env:
+ RCE_DOCKER: "true"
+ RE_CLUSTER: "false"
+ run: make bench
+ shell: bash
+
+ test-redis-ce:
+ name: test-redis-ce
+ runs-on: ubuntu-latest
+ strategy:
+ fail-fast: false
+ matrix:
+ redis-version:
+ - "8.2.x" # Redis CE 8.2
+ - "8.0.x" # Redis CE 8.0
+ - "7.4.x" # Redis stack 7.4
+ - "7.2.x" # Redis stack 7.2
+ go-version:
+ - "1.23.x"
+ - "1.24.x"
+
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v5
+
+ - name: Run tests
+ uses: ./.github/actions/run-tests
+ with:
+ go-version: ${{matrix.go-version}}
+ redis-version: ${{ matrix.redis-version }}
+
+ - name: Upload to Codecov
+ uses: codecov/codecov-action@v5
+ with:
+ files: coverage.txt
+ token: ${{ secrets.CODECOV_TOKEN }}
+
diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml
new file mode 100644
index 0000000000..0a62809eea
--- /dev/null
+++ b/.github/workflows/codeql-analysis.yml
@@ -0,0 +1,67 @@
+# For most projects, this workflow file will not need changing; you simply need
+# to commit it to your repository.
+#
+# You may wish to alter this file to override the set of languages analyzed,
+# or to provide custom queries or build logic.
+#
+# ******** NOTE ********
+# We have attempted to detect the languages in your repository. Please check
+# the `language` matrix defined below to confirm you have the correct set of
+# supported CodeQL languages.
+#
+name: "CodeQL"
+
+on:
+ push:
+ branches: [master, v9, v9.7, v9.8]
+ pull_request:
+ branches: [master, v9, v9.7, v9.8]
+
+jobs:
+ analyze:
+ name: Analyze
+ runs-on: ubuntu-latest
+ permissions:
+ actions: read
+ contents: read
+ security-events: write
+
+ strategy:
+ fail-fast: false
+ matrix:
+ language: [ 'go' ]
+ # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ]
+ # Learn more about CodeQL language support at https://git.io/codeql-language-support
+
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v5
+
+ # Initializes the CodeQL tools for scanning.
+ - name: Initialize CodeQL
+ uses: github/codeql-action/init@v3
+ with:
+ languages: ${{ matrix.language }}
+ # If you wish to specify custom queries, you can do so here or in a config file.
+ # By default, queries listed here will override any specified in a config file.
+ # Prefix the list here with "+" to use these queries and those in the config file.
+ # queries: ./path/to/local/query, your-org/your-repo/queries@main
+
+ # Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
+ # If this step fails, then you should remove it and run the build manually (see below)
+ - name: Autobuild
+ uses: github/codeql-action/autobuild@v3
+
+ # โน๏ธ Command-line programs to run using the OS shell.
+ # ๐ https://git.io/JvXDl
+
+ # โ๏ธ If the Autobuild fails above, remove it and uncomment the following three lines
+ # and modify them (or add more) to build your code if your project
+ # uses a compiled language
+
+ #- run: |
+ # make bootstrap
+ # make release
+
+ - name: Perform CodeQL Analysis
+ uses: github/codeql-action/analyze@v3
diff --git a/.github/workflows/doctests.yaml b/.github/workflows/doctests.yaml
new file mode 100644
index 0000000000..1afd0d8033
--- /dev/null
+++ b/.github/workflows/doctests.yaml
@@ -0,0 +1,43 @@
+name: Documentation Tests
+
+on:
+ push:
+ branches: [master, examples]
+ pull_request:
+ branches: [master, examples]
+
+permissions:
+ contents: read
+
+jobs:
+ doctests:
+ name: doctests
+ runs-on: ubuntu-latest
+
+ services:
+ redis-stack:
+ image: redislabs/client-libs-test:8.0.2
+ env:
+ TLS_ENABLED: no
+ REDIS_CLUSTER: no
+ PORT: 6379
+ ports:
+ - 6379:6379
+
+ strategy:
+ fail-fast: false
+ matrix:
+ go-version: ["1.24"]
+
+ steps:
+ - name: Set up ${{ matrix.go-version }}
+ uses: actions/setup-go@v6
+ with:
+ go-version: ${{ matrix.go-version }}
+
+ - name: Checkout code
+ uses: actions/checkout@v5
+
+ - name: Test doc examples
+ working-directory: ./doctests
+ run: make test
diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml
new file mode 100644
index 0000000000..62552abf29
--- /dev/null
+++ b/.github/workflows/golangci-lint.yml
@@ -0,0 +1,28 @@
+name: golangci-lint
+
+on:
+ push:
+ tags:
+ - v*
+ branches:
+ - master
+ - main
+ - v9
+ - v9.8
+ pull_request:
+
+permissions:
+ contents: read
+ pull-requests: read # for golangci/golangci-lint-action to fetch pull requests
+
+jobs:
+ golangci:
+ name: lint
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v5
+ - name: golangci-lint
+ uses: golangci/golangci-lint-action@v8.0.0
+ with:
+ verify: true
+
diff --git a/.github/workflows/release-drafter.yml b/.github/workflows/release-drafter.yml
new file mode 100644
index 0000000000..6695abfe4b
--- /dev/null
+++ b/.github/workflows/release-drafter.yml
@@ -0,0 +1,24 @@
+name: Release Drafter
+
+on:
+ push:
+ # branches to consider in the event; optional, defaults to all
+ branches:
+ - master
+
+permissions: {}
+jobs:
+ update_release_draft:
+ permissions:
+ pull-requests: write # to add label to PR (release-drafter/release-drafter)
+ contents: write # to create a github release (release-drafter/release-drafter)
+
+ runs-on: ubuntu-latest
+ steps:
+ # Drafts your next Release notes as Pull Requests are merged into "master"
+ - uses: release-drafter/release-drafter@v6
+ with:
+ # (Optional) specify config name to use, relative to .github/. Default: release-drafter.yml
+ config-name: release-drafter-config.yml
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.github/workflows/spellcheck.yml b/.github/workflows/spellcheck.yml
new file mode 100644
index 0000000000..1517c33947
--- /dev/null
+++ b/.github/workflows/spellcheck.yml
@@ -0,0 +1,14 @@
+name: spellcheck
+on:
+ pull_request:
+jobs:
+ check-spelling:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v5
+ - name: Check Spelling
+ uses: rojopolis/spellcheck-github-actions@0.52.0
+ with:
+ config_path: .github/spellcheck-settings.yml
+ task_name: Markdown
diff --git a/.github/workflows/stale-issues.yml b/.github/workflows/stale-issues.yml
new file mode 100644
index 0000000000..ba0fb5587e
--- /dev/null
+++ b/.github/workflows/stale-issues.yml
@@ -0,0 +1,95 @@
+name: "Stale Issue Management"
+on:
+ schedule:
+ # Run daily at midnight UTC
+ - cron: "0 0 * * *"
+ workflow_dispatch: # Allow manual triggering
+
+env:
+ # Default stale policy timeframes
+ DAYS_BEFORE_STALE: 365
+ DAYS_BEFORE_CLOSE: 30
+
+ # Accelerated timeline for needs-information issues
+ NEEDS_INFO_DAYS_BEFORE_STALE: 30
+ NEEDS_INFO_DAYS_BEFORE_CLOSE: 7
+
+jobs:
+ stale:
+ runs-on: ubuntu-latest
+ steps:
+ # First step: Handle regular issues (excluding needs-information)
+ - name: Mark regular issues as stale
+ uses: actions/stale@v9
+ with:
+ repo-token: ${{ secrets.GITHUB_TOKEN }}
+
+ # Default stale policy
+ days-before-stale: ${{ env.DAYS_BEFORE_STALE }}
+ days-before-close: ${{ env.DAYS_BEFORE_CLOSE }}
+
+ # Explicit stale label configuration
+ stale-issue-label: "stale"
+ stale-pr-label: "stale"
+
+ stale-issue-message: |
+ This issue has been automatically marked as stale due to inactivity.
+ It will be closed in 30 days if no further activity occurs.
+ If you believe this issue is still relevant, please add a comment to keep it open.
+
+ close-issue-message: |
+ This issue has been automatically closed due to inactivity.
+ If you believe this issue is still relevant, please reopen it or create a new issue with updated information.
+
+ # Exclude needs-information issues from this step
+ exempt-issue-labels: 'no-stale,needs-information'
+
+ # Remove stale label when issue/PR becomes active again
+ remove-stale-when-updated: true
+
+ # Apply to pull requests with same timeline
+ days-before-pr-stale: ${{ env.DAYS_BEFORE_STALE }}
+ days-before-pr-close: ${{ env.DAYS_BEFORE_CLOSE }}
+
+ stale-pr-message: |
+ This pull request has been automatically marked as stale due to inactivity.
+ It will be closed in 30 days if no further activity occurs.
+
+ close-pr-message: |
+ This pull request has been automatically closed due to inactivity.
+ If you would like to continue this work, please reopen the PR or create a new one.
+
+ # Only exclude no-stale PRs (needs-information PRs follow standard timeline)
+ exempt-pr-labels: 'no-stale'
+
+ # Second step: Handle needs-information issues with accelerated timeline
+ - name: Mark needs-information issues as stale
+ uses: actions/stale@v9
+ with:
+ repo-token: ${{ secrets.GITHUB_TOKEN }}
+
+ # Accelerated timeline for needs-information
+ days-before-stale: ${{ env.NEEDS_INFO_DAYS_BEFORE_STALE }}
+ days-before-close: ${{ env.NEEDS_INFO_DAYS_BEFORE_CLOSE }}
+
+ # Explicit stale label configuration
+ stale-issue-label: "stale"
+
+ # Only target ISSUES with needs-information label (not PRs)
+ only-issue-labels: 'needs-information'
+
+ stale-issue-message: |
+ This issue has been marked as stale because it requires additional information
+ that has not been provided for 30 days. It will be closed in 7 days if the
+ requested information is not provided.
+
+ close-issue-message: |
+ This issue has been closed because the requested information was not provided within the specified timeframe.
+ If you can provide the missing information, please reopen this issue or create a new one.
+
+ # Disable PR processing for this step
+ days-before-pr-stale: -1
+ days-before-pr-close: -1
+
+ # Remove stale label when issue becomes active again
+ remove-stale-when-updated: true
diff --git a/.github/workflows/test-redis-enterprise.yml b/.github/workflows/test-redis-enterprise.yml
new file mode 100644
index 0000000000..faf629025d
--- /dev/null
+++ b/.github/workflows/test-redis-enterprise.yml
@@ -0,0 +1,57 @@
+name: RE Tests
+
+on:
+ push:
+ branches: [master, v9, v9.7, v9.8]
+ pull_request:
+
+permissions:
+ contents: read
+
+jobs:
+ build:
+ name: build
+ runs-on: ubuntu-latest
+ strategy:
+ fail-fast: false
+ matrix:
+ go-version: [1.24.x]
+ re-build: ["7.4.2-54"]
+
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v5
+
+ - name: Clone Redis EE docker repository
+ uses: actions/checkout@v5
+ with:
+ repository: RedisLabs/redis-ee-docker
+ path: redis-ee
+
+ - name: Set up ${{ matrix.go-version }}
+ uses: actions/setup-go@v6
+ with:
+ go-version: ${{ matrix.go-version }}
+
+ - name: Build cluster
+ working-directory: redis-ee
+ env:
+ IMAGE: "redislabs/redis:${{ matrix.re-build }}"
+ RE_USERNAME: test@test.com
+ RE_PASS: 12345
+ RE_CLUSTER_NAME: re-test
+ RE_USE_OSS_CLUSTER: false
+ RE_DB_PORT: 6379
+ run: ./build.sh
+
+ - name: Test
+ env:
+ RE_CLUSTER: true
+ REDIS_VERSION: "7.4"
+ run: |
+ go test \
+ --ginkgo.skip-file="ring_test.go" \
+ --ginkgo.skip-file="sentinel_test.go" \
+ --ginkgo.skip-file="osscluster_test.go" \
+ --ginkgo.skip-file="pubsub_test.go" \
+ --ginkgo.label-filter='!NonRedisEnterprise'
diff --git a/.gitignore b/.gitignore
index ebfe903bcd..00710d5077 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,2 +1,15 @@
*.rdb
-testdata/*/
+testdata/*
+.idea/
+.DS_Store
+*.tar.gz
+*.dic
+redis8tests.sh
+coverage.txt
+**/coverage.txt
+.vscode
+tmp/*
+*.test
+
+# maintenanceNotifications upgrade documentation (temporary)
+maintenanceNotifications/docs/
diff --git a/.golangci.yml b/.golangci.yml
new file mode 100644
index 0000000000..872454ff7f
--- /dev/null
+++ b/.golangci.yml
@@ -0,0 +1,34 @@
+version: "2"
+run:
+ timeout: 5m
+ tests: false
+linters:
+ settings:
+ staticcheck:
+ checks:
+ - all
+ # Incorrect or missing package comment.
+ # https://staticcheck.dev/docs/checks/#ST1000
+ - -ST1000
+ # Omit embedded fields from selector expression.
+ # https://staticcheck.dev/docs/checks/#QF1008
+ - -QF1008
+ - -ST1003
+ exclusions:
+ generated: lax
+ presets:
+ - comments
+ - common-false-positives
+ - legacy
+ - std-error-handling
+ paths:
+ - third_party$
+ - builtin$
+ - examples$
+formatters:
+ exclusions:
+ generated: lax
+ paths:
+ - third_party$
+ - builtin$
+ - examples$
diff --git a/.prettierrc.yml b/.prettierrc.yml
new file mode 100644
index 0000000000..8b7f044ad1
--- /dev/null
+++ b/.prettierrc.yml
@@ -0,0 +1,4 @@
+semi: false
+singleQuote: true
+proseWrap: always
+printWidth: 100
diff --git a/.travis.yml b/.travis.yml
deleted file mode 100644
index f49927ee84..0000000000
--- a/.travis.yml
+++ /dev/null
@@ -1,21 +0,0 @@
-sudo: false
-language: go
-
-services:
- - redis-server
-
-go:
- - 1.4.x
- - 1.7.x
- - 1.8.x
- - 1.9.x
- - tip
-
-matrix:
- allow_failures:
- - go: 1.4.x
- - go: tip
-
-install:
- - go get github.com/onsi/ginkgo
- - go get github.com/onsi/gomega
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
new file mode 100644
index 0000000000..8c68c522e5
--- /dev/null
+++ b/CONTRIBUTING.md
@@ -0,0 +1,118 @@
+# Contributing
+
+## Introduction
+
+We appreciate your interest in considering contributing to go-redis.
+Community contributions mean a lot to us.
+
+## Contributions we need
+
+You may already know how you'd like to contribute, whether it's a fix for a bug you
+encountered, or a new feature your team wants to use.
+
+If you don't know where to start, consider improving
+documentation, bug triaging, and writing tutorials are all examples of
+helpful contributions that mean less work for you.
+
+## Your First Contribution
+
+Unsure where to begin contributing? You can start by looking through
+[help-wanted
+issues](https://github.com/redis/go-redis/issues?q=is%3Aopen+is%3Aissue+label%3ahelp-wanted).
+
+Never contributed to open source before? Here are a couple of friendly
+tutorials:
+
+-
+-
+
+## Getting Started
+
+Here's how to get started with your code contribution:
+
+1. Create your own fork of go-redis
+2. Do the changes in your fork
+3. If you need a development environment, run `make docker.start`.
+
+> Note: this clones and builds the docker containers specified in `docker-compose.yml`, to understand more about
+> the infrastructure that will be started you can check the `docker-compose.yml`. You also have the possiblity
+> to specify the redis image that will be pulled with the env variable `CLIENT_LIBS_TEST_IMAGE`.
+> By default the docker image that will be pulled and started is `redislabs/client-libs-test:8.2.1-pre`.
+> If you want to test with newer Redis version, using a newer version of `redislabs/client-libs-test` should work out of the box.
+
+4. While developing, make sure the tests pass by running `make test` (if you have the docker containers running, `make test.ci` may be sufficient).
+> Note: `make test` will try to start all containers, run the tests with `make test.ci` and then stop all containers.
+5. If you like the change and think the project could use it, send a
+ pull request
+
+To see what else is part of the automation, run `invoke -l`
+
+
+## Testing
+
+### Setting up Docker
+To run the tests, you need to have Docker installed and running. If you are using a host OS that does not support
+docker host networks out of the box (e.g. Windows, OSX), you need to set up a docker desktop and enable docker host networks.
+
+### Running tests
+Call `make test` to run all tests.
+
+Continuous Integration uses these same wrappers to run all of these
+tests against multiple versions of redis. Feel free to test your
+changes against all the go versions supported, as declared by the
+[build.yml](./.github/workflows/build.yml) file.
+
+### Troubleshooting
+
+If you get any errors when running `make test`, make sure
+that you are using supported versions of Docker and go.
+
+## How to Report a Bug
+
+### Security Vulnerabilities
+
+**NOTE**: If you find a security vulnerability, do NOT open an issue.
+Email [Redis Open Source ()](mailto:oss@redis.com) instead.
+
+In order to determine whether you are dealing with a security issue, ask
+yourself these two questions:
+
+- Can I access something that's not mine, or something I shouldn't
+ have access to?
+- Can I disable something for other people?
+
+If the answer to either of those two questions are *yes*, then you're
+probably dealing with a security issue. Note that even if you answer
+*no* to both questions, you may still be dealing with a security
+issue, so if you're unsure, just email [us](mailto:oss@redis.com).
+
+### Everything Else
+
+When filing an issue, make sure to answer these five questions:
+
+1. What version of go-redis are you using?
+2. What version of redis are you using?
+3. What did you do?
+4. What did you expect to see?
+5. What did you see instead?
+
+## Suggest a feature or enhancement
+
+If you'd like to contribute a new feature, make sure you check our
+issue list to see if someone has already proposed it. Work may already
+be underway on the feature you want or we may have rejected a
+feature like it already.
+
+If you don't see anything, open a new issue that describes the feature
+you would like and how it should work.
+
+## Code review process
+
+The core team regularly looks at pull requests. We will provide
+feedback as soon as possible. After receiving our feedback, please respond
+within two weeks. After that time, we may close your PR if it isn't
+showing any activity.
+
+## Support
+
+Maintainers can provide limited support to contributors on discord: https://discord.gg/W4txy5AeKM
diff --git a/LICENSE b/LICENSE
index 298bed9bea..f4967dbc5c 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,4 +1,4 @@
-Copyright (c) 2013 The github.com/go-redis/redis Authors.
+Copyright (c) 2013 The github.com/redis/go-redis Authors.
All rights reserved.
Redistribution and use in source and binary forms, with or without
diff --git a/Makefile b/Makefile
index 50fdc55a1a..0252a7e2a3 100644
--- a/Makefile
+++ b/Makefile
@@ -1,19 +1,87 @@
-all: testdeps
- go test ./...
- go test ./... -short -race
- go vet
+GO_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | sort)
+REDIS_VERSION ?= 8.2
+RE_CLUSTER ?= false
+RCE_DOCKER ?= true
+CLIENT_LIBS_TEST_IMAGE ?= redislabs/client-libs-test:8.2.1-pre
-testdeps: testdata/redis/src/redis-server
+docker.start:
+ export RE_CLUSTER=$(RE_CLUSTER) && \
+ export RCE_DOCKER=$(RCE_DOCKER) && \
+ export REDIS_VERSION=$(REDIS_VERSION) && \
+ export CLIENT_LIBS_TEST_IMAGE=$(CLIENT_LIBS_TEST_IMAGE) && \
+ docker compose --profile all up -d --quiet-pull
-bench: testdeps
- go test ./... -test.run=NONE -test.bench=. -test.benchmem
+docker.stop:
+ docker compose --profile all down
-.PHONY: all test testdeps bench
+test:
+ $(MAKE) docker.start
+ @if [ -z "$(REDIS_VERSION)" ]; then \
+ echo "REDIS_VERSION not set, running all tests"; \
+ $(MAKE) test.ci; \
+ else \
+ MAJOR_VERSION=$$(echo "$(REDIS_VERSION)" | cut -d. -f1); \
+ if [ "$$MAJOR_VERSION" -ge 8 ]; then \
+ echo "REDIS_VERSION $(REDIS_VERSION) >= 8, running all tests"; \
+ $(MAKE) test.ci; \
+ else \
+ echo "REDIS_VERSION $(REDIS_VERSION) < 8, skipping vector_sets tests"; \
+ $(MAKE) test.ci.skip-vectorsets; \
+ fi; \
+ fi
+ $(MAKE) docker.stop
-testdata/redis:
- mkdir -p $@
- wget -qO- https://github.com/antirez/redis/archive/unstable.tar.gz | tar xvz --strip-components=1 -C $@
+test.ci:
+ set -e; for dir in $(GO_MOD_DIRS); do \
+ echo "go test in $${dir}"; \
+ (cd "$${dir}" && \
+ export RE_CLUSTER=$(RE_CLUSTER) && \
+ export RCE_DOCKER=$(RCE_DOCKER) && \
+ export REDIS_VERSION=$(REDIS_VERSION) && \
+ go mod tidy -compat=1.18 && \
+ go vet && \
+ go test -v -coverprofile=coverage.txt -covermode=atomic ./... -race -skip Example); \
+ done
+ cd internal/customvet && go build .
+ go vet -vettool ./internal/customvet/customvet
-testdata/redis/src/redis-server: testdata/redis
- sed -i 's/libjemalloc.a/libjemalloc.a -lrt/g' $ go-redis is the official Redis client library for the Go programming language. It offers a straightforward interface for interacting with Redis servers.
+
+## Supported versions
+
+In `go-redis` we are aiming to support the last three releases of Redis. Currently, this means we do support:
+- [Redis 7.2](https://raw.githubusercontent.com/redis/redis/7.2/00-RELEASENOTES) - using Redis Stack 7.2 for modules support
+- [Redis 7.4](https://raw.githubusercontent.com/redis/redis/7.4/00-RELEASENOTES) - using Redis Stack 7.4 for modules support
+- [Redis 8.0](https://raw.githubusercontent.com/redis/redis/8.0/00-RELEASENOTES) - using Redis CE 8.0 where modules are included
+- [Redis 8.2](https://raw.githubusercontent.com/redis/redis/8.2/00-RELEASENOTES) - using Redis CE 8.2 where modules are included
+
+Although the `go.mod` states it requires at minimum `go 1.18`, our CI is configured to run the tests against all three
+versions of Redis and latest two versions of Go ([1.23](https://go.dev/doc/devel/release#go1.23.0),
+[1.24](https://go.dev/doc/devel/release#go1.24.0)). We observe that some modules related test may not pass with
+Redis Stack 7.2 and some commands are changed with Redis CE 8.0.
+Please do refer to the documentation and the tests if you experience any issues. We do plan to update the go version
+in the `go.mod` to `go 1.24` in one of the next releases.
+
+## How do I Redis?
+
+[Learn for free at Redis University](https://university.redis.com/)
+
+[Build faster with the Redis Launchpad](https://launchpad.redis.com/)
+
+[Try the Redis Cloud](https://redis.com/try-free/)
+
+[Dive in developer tutorials](https://developer.redis.com/)
+
+[Join the Redis community](https://redis.com/community/)
+
+[Work at Redis](https://redis.com/company/careers/jobs/)
+
+## Documentation
+
+- [English](https://redis.uptrace.dev)
+- [็ฎไฝไธญๆ](https://redis.uptrace.dev/zh/)
+
+## Resources
+
+- [Discussions](https://github.com/redis/go-redis/discussions)
+- [Chat](https://discord.gg/W4txy5AeKM)
+- [Reference](https://pkg.go.dev/github.com/redis/go-redis/v9)
+- [Examples](https://pkg.go.dev/github.com/redis/go-redis/v9#pkg-examples)
+
+## Ecosystem
+
+- [Redis Mock](https://github.com/go-redis/redismock)
+- [Distributed Locks](https://github.com/bsm/redislock)
+- [Redis Cache](https://github.com/go-redis/cache)
+- [Rate limiting](https://github.com/go-redis/redis_rate)
+
+This client also works with [Kvrocks](https://github.com/apache/incubator-kvrocks), a distributed
+key value NoSQL database that uses RocksDB as storage engine and is compatible with Redis protocol.
+
+## Features
+
+- Redis commands except QUIT and SYNC.
+- Automatic connection pooling.
+- [StreamingCredentialsProvider (e.g. entra id, oauth)](#1-streaming-credentials-provider-highest-priority) (experimental)
+- [Pub/Sub](https://redis.uptrace.dev/guide/go-redis-pubsub.html).
+- [Pipelines and transactions](https://redis.uptrace.dev/guide/go-redis-pipelines.html).
+- [Scripting](https://redis.uptrace.dev/guide/lua-scripting.html).
+- [Redis Sentinel](https://redis.uptrace.dev/guide/go-redis-sentinel.html).
+- [Redis Cluster](https://redis.uptrace.dev/guide/go-redis-cluster.html).
+- [Redis Ring](https://redis.uptrace.dev/guide/ring.html).
+- [Redis Performance Monitoring](https://redis.uptrace.dev/guide/redis-performance-monitoring.html).
+- [Redis Probabilistic [RedisStack]](https://redis.io/docs/data-types/probabilistic/)
+- [Customizable read and write buffers size.](#custom-buffer-sizes)
## Installation
-Install:
+go-redis supports 2 last Go versions and requires a Go version with
+[modules](https://github.com/golang/go/wiki/Modules) support. So make sure to initialize a Go
+module:
```shell
-go get -u github.com/go-redis/redis
+go mod init github.com/my/repo
```
-Import:
+Then install go-redis/**v9**:
-```go
-import "github.com/go-redis/redis"
+```shell
+go get github.com/redis/go-redis/v9
```
## Quickstart
```go
-func ExampleNewClient() {
- client := redis.NewClient(&redis.Options{
- Addr: "localhost:6379",
- Password: "", // no password set
- DB: 0, // use default DB
- })
+import (
+ "context"
+ "fmt"
- pong, err := client.Ping().Result()
- fmt.Println(pong, err)
- // Output: PONG
-}
+ "github.com/redis/go-redis/v9"
+)
+
+var ctx = context.Background()
func ExampleClient() {
- err := client.Set("key", "value", 0).Err()
- if err != nil {
- panic(err)
- }
-
- val, err := client.Get("key").Result()
- if err != nil {
- panic(err)
- }
- fmt.Println("key", val)
-
- val2, err := client.Get("key2").Result()
- if err == redis.Nil {
- fmt.Println("key2 does not exists")
- } else if err != nil {
- panic(err)
- } else {
- fmt.Println("key2", val2)
- }
- // Output: key value
- // key2 does not exists
+ rdb := redis.NewClient(&redis.Options{
+ Addr: "localhost:6379",
+ Password: "", // no password set
+ DB: 0, // use default DB
+ })
+
+ err := rdb.Set(ctx, "key", "value", 0).Err()
+ if err != nil {
+ panic(err)
+ }
+
+ val, err := rdb.Get(ctx, "key").Result()
+ if err != nil {
+ panic(err)
+ }
+ fmt.Println("key", val)
+
+ val2, err := rdb.Get(ctx, "key2").Result()
+ if err == redis.Nil {
+ fmt.Println("key2 does not exist")
+ } else if err != nil {
+ panic(err)
+ } else {
+ fmt.Println("key2", val2)
+ }
+ // Output: key value
+ // key2 does not exist
}
```
-## Howto
+### Authentication
-Please go through [examples](https://godoc.org/github.com/go-redis/redis#pkg-examples) to get an idea how to use this package.
+The Redis client supports multiple ways to provide authentication credentials, with a clear priority order. Here are the available options:
-## Look and feel
+#### 1. Streaming Credentials Provider (Highest Priority) - Experimental feature
-Some corner cases:
+The streaming credentials provider allows for dynamic credential updates during the connection lifetime. This is particularly useful for managed identity services and token-based authentication.
- SET key value EX 10 NX
- set, err := client.SetNX("key", "value", 10*time.Second).Result()
+```go
+type StreamingCredentialsProvider interface {
+ Subscribe(listener CredentialsListener) (Credentials, UnsubscribeFunc, error)
+}
- SORT list LIMIT 0 2 ASC
- vals, err := client.Sort("list", redis.Sort{Offset: 0, Count: 2, Order: "ASC"}).Result()
+type CredentialsListener interface {
+ OnNext(credentials Credentials) // Called when credentials are updated
+ OnError(err error) // Called when an error occurs
+}
+
+type Credentials interface {
+ BasicAuth() (username string, password string)
+ RawCredentials() string
+}
+```
+
+Example usage:
+```go
+rdb := redis.NewClient(&redis.Options{
+ Addr: "localhost:6379",
+ StreamingCredentialsProvider: &MyCredentialsProvider{},
+})
+```
+
+**Note:** The streaming credentials provider can be used with [go-redis-entraid](https://github.com/redis/go-redis-entraid) to enable Entra ID (formerly Azure AD) authentication. This allows for seamless integration with Azure's managed identity services and token-based authentication.
- ZRANGEBYSCORE zset -inf +inf WITHSCORES LIMIT 0 2
- vals, err := client.ZRangeByScoreWithScores("zset", redis.ZRangeBy{
- Min: "-inf",
- Max: "+inf",
- Offset: 0,
- Count: 2,
- }).Result()
+Example with Entra ID:
+```go
+import (
+ "github.com/redis/go-redis/v9"
+ "github.com/redis/go-redis-entraid"
+)
- ZINTERSTORE out 2 zset1 zset2 WEIGHTS 2 3 AGGREGATE SUM
- vals, err := client.ZInterStore("out", redis.ZStore{Weights: []int64{2, 3}}, "zset1", "zset2").Result()
+// Create an Entra ID credentials provider
+provider := entraid.NewDefaultAzureIdentityProvider()
- EVAL "return {KEYS[1],ARGV[1]}" 1 "key" "hello"
- vals, err := client.Eval("return {KEYS[1],ARGV[1]}", []string{"key"}, "hello").Result()
+// Configure Redis client with Entra ID authentication
+rdb := redis.NewClient(&redis.Options{
+ Addr: "your-redis-server.redis.cache.windows.net:6380",
+ StreamingCredentialsProvider: provider,
+ TLSConfig: &tls.Config{
+ MinVersion: tls.VersionTLS12,
+ },
+})
+```
-## Benchmark
+#### 2. Context-based Credentials Provider
-go-redis vs redigo:
+The context-based provider allows credentials to be determined at the time of each operation, using the context.
+```go
+rdb := redis.NewClient(&redis.Options{
+ Addr: "localhost:6379",
+ CredentialsProviderContext: func(ctx context.Context) (string, string, error) {
+ // Return username, password, and any error
+ return "user", "pass", nil
+ },
+})
```
-BenchmarkSetGoRedis10Conns64Bytes-4 200000 7621 ns/op 210 B/op 6 allocs/op
-BenchmarkSetGoRedis100Conns64Bytes-4 200000 7554 ns/op 210 B/op 6 allocs/op
-BenchmarkSetGoRedis10Conns1KB-4 200000 7697 ns/op 210 B/op 6 allocs/op
-BenchmarkSetGoRedis100Conns1KB-4 200000 7688 ns/op 210 B/op 6 allocs/op
-BenchmarkSetGoRedis10Conns10KB-4 200000 9214 ns/op 210 B/op 6 allocs/op
-BenchmarkSetGoRedis100Conns10KB-4 200000 9181 ns/op 210 B/op 6 allocs/op
-BenchmarkSetGoRedis10Conns1MB-4 2000 583242 ns/op 2337 B/op 6 allocs/op
-BenchmarkSetGoRedis100Conns1MB-4 2000 583089 ns/op 2338 B/op 6 allocs/op
-BenchmarkSetRedigo10Conns64Bytes-4 200000 7576 ns/op 208 B/op 7 allocs/op
-BenchmarkSetRedigo100Conns64Bytes-4 200000 7782 ns/op 208 B/op 7 allocs/op
-BenchmarkSetRedigo10Conns1KB-4 200000 7958 ns/op 208 B/op 7 allocs/op
-BenchmarkSetRedigo100Conns1KB-4 200000 7725 ns/op 208 B/op 7 allocs/op
-BenchmarkSetRedigo10Conns10KB-4 100000 18442 ns/op 208 B/op 7 allocs/op
-BenchmarkSetRedigo100Conns10KB-4 100000 18818 ns/op 208 B/op 7 allocs/op
-BenchmarkSetRedigo10Conns1MB-4 2000 668829 ns/op 226 B/op 7 allocs/op
-BenchmarkSetRedigo100Conns1MB-4 2000 679542 ns/op 226 B/op 7 allocs/op
+
+#### 3. Regular Credentials Provider
+
+A simple function-based provider that returns static credentials.
+
+```go
+rdb := redis.NewClient(&redis.Options{
+ Addr: "localhost:6379",
+ CredentialsProvider: func() (string, string) {
+ // Return username and password
+ return "user", "pass"
+ },
+})
```
-Redis Cluster:
+#### 4. Username/Password Fields (Lowest Priority)
+
+The most basic way to provide credentials is through the `Username` and `Password` fields in the options.
+```go
+rdb := redis.NewClient(&redis.Options{
+ Addr: "localhost:6379",
+ Username: "user",
+ Password: "pass",
+})
```
-BenchmarkRedisPing-4 200000 6983 ns/op 116 B/op 4 allocs/op
-BenchmarkRedisClusterPing-4 100000 11535 ns/op 117 B/op 4 allocs/op
+
+#### Priority Order
+
+The client will use credentials in the following priority order:
+1. Streaming Credentials Provider (if set)
+2. Context-based Credentials Provider (if set)
+3. Regular Credentials Provider (if set)
+4. Username/Password fields (if set)
+
+If none of these are set, the client will attempt to connect without authentication.
+
+### Protocol Version
+
+The client supports both RESP2 and RESP3 protocols. You can specify the protocol version in the options:
+
+```go
+rdb := redis.NewClient(&redis.Options{
+ Addr: "localhost:6379",
+ Password: "", // no password set
+ DB: 0, // use default DB
+ Protocol: 3, // specify 2 for RESP 2 or 3 for RESP 3
+})
+```
+
+### Connecting via a redis url
+
+go-redis also supports connecting via the
+[redis uri specification](https://github.com/redis/redis-specifications/tree/master/uri/redis.txt).
+The example below demonstrates how the connection can easily be configured using a string, adhering
+to this specification.
+
+```go
+import (
+ "github.com/redis/go-redis/v9"
+)
+
+func ExampleClient() *redis.Client {
+ url := "redis://user:password@localhost:6379/0?protocol=3"
+ opts, err := redis.ParseURL(url)
+ if err != nil {
+ panic(err)
+ }
+
+ return redis.NewClient(opts)
+}
+
+```
+
+### Instrument with OpenTelemetry
+
+```go
+import (
+ "github.com/redis/go-redis/v9"
+ "github.com/redis/go-redis/extra/redisotel/v9"
+ "errors"
+)
+
+func main() {
+ ...
+ rdb := redis.NewClient(&redis.Options{...})
+
+ if err := errors.Join(redisotel.InstrumentTracing(rdb), redisotel.InstrumentMetrics(rdb)); err != nil {
+ log.Fatal(err)
+ }
+```
+
+
+### Buffer Size Configuration
+
+go-redis uses 32KiB read and write buffers by default for optimal performance. For high-throughput applications or large pipelines, you can customize buffer sizes:
+
+```go
+rdb := redis.NewClient(&redis.Options{
+ Addr: "localhost:6379",
+ ReadBufferSize: 1024 * 1024, // 1MiB read buffer
+ WriteBufferSize: 1024 * 1024, // 1MiB write buffer
+})
+```
+
+### Advanced Configuration
+
+go-redis supports extending the client identification phase to allow projects to send their own custom client identification.
+
+#### Default Client Identification
+
+By default, go-redis automatically sends the client library name and version during the connection process. This feature is available in redis-server as of version 7.2. As a result, the command is "fire and forget", meaning it should fail silently, in the case that the redis server does not support this feature.
+
+#### Disabling Identity Verification
+
+When connection identity verification is not required or needs to be explicitly disabled, a `DisableIdentity` configuration option exists.
+Initially there was a typo and the option was named `DisableIndentity` instead of `DisableIdentity`. The misspelled option is marked as Deprecated and will be removed in V10 of this library.
+Although both options will work at the moment, the correct option is `DisableIdentity`. The deprecated option will be removed in V10 of this library, so please use the correct option name to avoid any issues.
+
+To disable verification, set the `DisableIdentity` option to `true` in the Redis client options:
+
+```go
+rdb := redis.NewClient(&redis.Options{
+ Addr: "localhost:6379",
+ Password: "",
+ DB: 0,
+ DisableIdentity: true, // Disable set-info on connect
+})
+```
+
+#### Unstable RESP3 Structures for RediSearch Commands
+When integrating Redis with application functionalities using RESP3, it's important to note that some response structures aren't final yet. This is especially true for more complex structures like search and query results. We recommend using RESP2 when using the search and query capabilities, but we plan to stabilize the RESP3-based API-s in the coming versions. You can find more guidance in the upcoming release notes.
+
+To enable unstable RESP3, set the option in your client configuration:
+
+```go
+redis.NewClient(&redis.Options{
+ UnstableResp3: true,
+ })
+```
+**Note:** When UnstableResp3 mode is enabled, it's necessary to use RawResult() and RawVal() to retrieve a raw data.
+ Since, raw response is the only option for unstable search commands Val() and Result() calls wouldn't have any affect on them:
+
+```go
+res1, err := client.FTSearchWithArgs(ctx, "txt", "foo bar", &redis.FTSearchOptions{}).RawResult()
+val1 := client.FTSearchWithArgs(ctx, "txt", "foo bar", &redis.FTSearchOptions{}).RawVal()
+```
+
+#### Redis-Search Default Dialect
+
+In the Redis-Search module, **the default dialect is 2**. If needed, you can explicitly specify a different dialect using the appropriate configuration in your queries.
+
+**Important**: Be aware that the query dialect may impact the results returned. If needed, you can revert to a different dialect version by passing the desired dialect in the arguments of the command you want to execute.
+For example:
+```
+ res2, err := rdb.FTSearchWithArgs(ctx,
+ "idx:bicycle",
+ "@pickup_zone:[CONTAINS $bike]",
+ &redis.FTSearchOptions{
+ Params: map[string]interface{}{
+ "bike": "POINT(-0.1278 51.5074)",
+ },
+ DialectVersion: 3,
+ },
+ ).Result()
+```
+You can find further details in the [query dialect documentation](https://redis.io/docs/latest/develop/interact/search-and-query/advanced-concepts/dialects/).
+
+#### Custom buffer sizes
+Prior to v9.12, the buffer size was the default go value of 4096 bytes. Starting from v9.12,
+go-redis uses 32KiB read and write buffers by default for optimal performance.
+For high-throughput applications or large pipelines, you can customize buffer sizes:
+
+```go
+rdb := redis.NewClient(&redis.Options{
+ Addr: "localhost:6379",
+ ReadBufferSize: 1024 * 1024, // 1MiB read buffer
+ WriteBufferSize: 1024 * 1024, // 1MiB write buffer
+})
+```
+
+**Important**: If you experience any issues with the default buffer sizes, please try setting them to the go default of 4096 bytes.
+
+## Contributing
+We welcome contributions to the go-redis library! If you have a bug fix, feature request, or improvement, please open an issue or pull request on GitHub.
+We appreciate your help in making go-redis better for everyone.
+If you are interested in contributing to the go-redis library, please check out our [contributing guidelines](CONTRIBUTING.md) for more information on how to get started.
+
+## Look and feel
+
+Some corner cases:
+
+```go
+// SET key value EX 10 NX
+set, err := rdb.SetNX(ctx, "key", "value", 10*time.Second).Result()
+
+// SET key value keepttl NX
+set, err := rdb.SetNX(ctx, "key", "value", redis.KeepTTL).Result()
+
+// SORT list LIMIT 0 2 ASC
+vals, err := rdb.Sort(ctx, "list", &redis.Sort{Offset: 0, Count: 2, Order: "ASC"}).Result()
+
+// ZRANGEBYSCORE zset -inf +inf WITHSCORES LIMIT 0 2
+vals, err := rdb.ZRangeByScoreWithScores(ctx, "zset", &redis.ZRangeBy{
+ Min: "-inf",
+ Max: "+inf",
+ Offset: 0,
+ Count: 2,
+}).Result()
+
+// ZINTERSTORE out 2 zset1 zset2 WEIGHTS 2 3 AGGREGATE SUM
+vals, err := rdb.ZInterStore(ctx, "out", &redis.ZStore{
+ Keys: []string{"zset1", "zset2"},
+ Weights: []int64{2, 3}
+}).Result()
+
+// EVAL "return {KEYS[1],ARGV[1]}" 1 "key" "hello"
+vals, err := rdb.Eval(ctx, "return {KEYS[1],ARGV[1]}", []string{"key"}, "hello").Result()
+
+// custom command
+res, err := rdb.Do(ctx, "set", "key", "value").Result()
+```
+
+
+## Run the test
+
+Recommended to use Docker, just need to run:
+```shell
+make test
```
## See also
-- [Golang PostgreSQL ORM](https://github.com/go-pg/pg)
-- [Golang msgpack](https://github.com/vmihailenco/msgpack)
-- [Golang message task queue](https://github.com/go-msgqueue/msgqueue)
+- [Golang ORM](https://bun.uptrace.dev) for PostgreSQL, MySQL, MSSQL, and SQLite
+- [Golang PostgreSQL](https://bun.uptrace.dev/postgres/)
+- [Golang HTTP router](https://bunrouter.uptrace.dev/)
+- [Golang ClickHouse ORM](https://github.com/uptrace/go-clickhouse)
+
+## Contributors
+
+> The go-redis project was originally initiated by :star: [**uptrace/uptrace**](https://github.com/uptrace/uptrace).
+> Uptrace is an open-source APM tool that supports distributed tracing, metrics, and logs. You can
+> use it to monitor applications and set up automatic alerts to receive notifications via email,
+> Slack, Telegram, and others.
+>
+> See [OpenTelemetry](https://github.com/redis/go-redis/tree/master/example/otel) example which
+> demonstrates how you can use Uptrace to monitor go-redis.
+
+Thanks to all the people who already contributed!
+
+
+
+
diff --git a/RELEASE-NOTES.md b/RELEASE-NOTES.md
new file mode 100644
index 0000000000..0f1112f8ac
--- /dev/null
+++ b/RELEASE-NOTES.md
@@ -0,0 +1,530 @@
+# Release Notes
+
+# 9.15.0-beta.3 (2025-09-26)
+
+## Highlights
+This beta release includes a pre-production version of processing push notifications and hitless upgrades.
+
+# Changes
+
+- chore: Update hash_commands.go ([#3523](https://github.com/redis/go-redis/pull/3523))
+
+## ๐ New Features
+
+- feat: RESP3 notifications support & Hitless notifications handling ([#3418](https://github.com/redis/go-redis/pull/3418))
+
+## ๐ Bug Fixes
+
+- fix: pipeline repeatedly sets the error ([#3525](https://github.com/redis/go-redis/pull/3525))
+
+## ๐งฐ Maintenance
+
+- chore(deps): bump rojopolis/spellcheck-github-actions from 0.51.0 to 0.52.0 ([#3520](https://github.com/redis/go-redis/pull/3520))
+- feat(e2e-testing): maintnotifications e2e and refactor ([#3526](https://github.com/redis/go-redis/pull/3526))
+- feat(tag.sh): Improved resiliency of the release process ([#3530](https://github.com/redis/go-redis/pull/3530))
+
+## Contributors
+We'd like to thank all the contributors who worked on this release!
+
+[@cxljs](https://github.com/cxljs), [@ndyakov](https://github.com/ndyakov), [@htemelski-redis](https://github.com/htemelski-redis), and [@omid-h70](https://github.com/omid-h70)
+
+
+# 9.15.0-beta.1 (2025-09-10)
+
+## Highlights
+This beta release includes a pre-production version of processing push notifications and hitless upgrades.
+
+### Hitless Upgrades
+Hitless upgrades is a major new feature that allows for zero-downtime upgrades in Redis clusters.
+You can find more information in the [Hitless Upgrades documentation](https://github.com/redis/go-redis/tree/master/hitless).
+
+# Changes
+
+## ๐ New Features
+- [CAE-1088] & [CAE-1072] feat: RESP3 notifications support & Hitless notifications handling ([#3418](https://github.com/redis/go-redis/pull/3418))
+
+## Contributors
+We'd like to thank all the contributors who worked on this release!
+
+[@ndyakov](https://github.com/ndyakov), [@htemelski-redis](https://github.com/htemelski-redis), [@ofekshenawa](https://github.com/ofekshenawa)
+
+
+# 9.14.0 (2025-09-10)
+
+## Highlights
+- Added batch process method to the pipeline ([#3510](https://github.com/redis/go-redis/pull/3510))
+
+# Changes
+
+## ๐ New Features
+
+- Added batch process method to the pipeline ([#3510](https://github.com/redis/go-redis/pull/3510))
+
+## ๐ Bug Fixes
+
+- fix: SetErr on Cmd if the command cannot be queued correctly in multi/exec ([#3509](https://github.com/redis/go-redis/pull/3509))
+
+## ๐งฐ Maintenance
+
+- Updates release drafter config to exclude dependabot ([#3511](https://github.com/redis/go-redis/pull/3511))
+- chore(deps): bump actions/setup-go from 5 to 6 ([#3504](https://github.com/redis/go-redis/pull/3504))
+
+## Contributors
+We'd like to thank all the contributors who worked on this release!
+
+[@elena-kolevska](https://github.com/elena-kolevksa), [@htemelski-redis](https://github.com/htemelski-redis) and [@ndyakov](https://github.com/ndyakov)
+
+
+# 9.13.0 (2025-09-03)
+
+## Highlights
+- Pipeliner expose queued commands ([#3496](https://github.com/redis/go-redis/pull/3496))
+- Ensure that JSON.GET returns Nil response ([#3470](https://github.com/redis/go-redis/pull/3470))
+- Fixes on Read and Write buffer sizes and UniversalOptions
+
+## Changes
+- Pipeliner expose queued commands ([#3496](https://github.com/redis/go-redis/pull/3496))
+- fix(test): fix a timing issue in pubsub test ([#3498](https://github.com/redis/go-redis/pull/3498))
+- Allow users to enable read-write splitting in failover mode. ([#3482](https://github.com/redis/go-redis/pull/3482))
+- Set the read/write buffer size of the sentinel client to 4KiB ([#3476](https://github.com/redis/go-redis/pull/3476))
+
+## ๐ New Features
+
+- fix(otel): register wait metrics ([#3499](https://github.com/redis/go-redis/pull/3499))
+- Support subscriptions against cluster slave nodes ([#3480](https://github.com/redis/go-redis/pull/3480))
+- Add wait metrics to otel ([#3493](https://github.com/redis/go-redis/pull/3493))
+- Clean failing timeout implementation ([#3472](https://github.com/redis/go-redis/pull/3472))
+
+## ๐ Bug Fixes
+
+- Do not assume that all non-IP hosts are loopbacks ([#3085](https://github.com/redis/go-redis/pull/3085))
+- Ensure that JSON.GET returns Nil response ([#3470](https://github.com/redis/go-redis/pull/3470))
+
+## ๐งฐ Maintenance
+
+- fix(otel): register wait metrics ([#3499](https://github.com/redis/go-redis/pull/3499))
+- fix(make test): Add default env in makefile ([#3491](https://github.com/redis/go-redis/pull/3491))
+- Update the introduction to running tests in README.md ([#3495](https://github.com/redis/go-redis/pull/3495))
+- test: Add comprehensive edge case tests for IncrByFloat command ([#3477](https://github.com/redis/go-redis/pull/3477))
+- Set the default read/write buffer size of Redis connection to 32KiB ([#3483](https://github.com/redis/go-redis/pull/3483))
+- Bumps test image to 8.2.1-pre ([#3478](https://github.com/redis/go-redis/pull/3478))
+- fix UniversalOptions miss ReadBufferSize and WriteBufferSize options ([#3485](https://github.com/redis/go-redis/pull/3485))
+- chore(deps): bump actions/checkout from 4 to 5 ([#3484](https://github.com/redis/go-redis/pull/3484))
+- Removes dry run for stale issues policy ([#3471](https://github.com/redis/go-redis/pull/3471))
+- Update otel metrics URL ([#3474](https://github.com/redis/go-redis/pull/3474))
+
+## Contributors
+We'd like to thank all the contributors who worked on this release!
+
+[@LINKIWI](https://github.com/LINKIWI), [@cxljs](https://github.com/cxljs), [@cybersmeashish](https://github.com/cybersmeashish), [@elena-kolevska](https://github.com/elena-kolevska), [@htemelski-redis](https://github.com/htemelski-redis), [@mwhooker](https://github.com/mwhooker), [@ndyakov](https://github.com/ndyakov), [@ofekshenawa](https://github.com/ofekshenawa), [@suever](https://github.com/suever)
+
+
+# 9.12.1 (2025-08-11)
+## ๐ Highlights
+In the last version (9.12.0) the client introduced bigger write and read buffer sized. The default value we set was 512KiB.
+However, users reported that this is too big for most use cases and can lead to high memory usage.
+In this version the default value is changed to 256KiB. The `README.md` was updated to reflect the
+correct default value and include a note that the default value can be changed.
+
+## ๐ Bug Fixes
+
+- fix(options): Add buffer sizes to failover. Update README ([#3468](https://github.com/redis/go-redis/pull/3468))
+
+## ๐งฐ Maintenance
+
+- fix(options): Add buffer sizes to failover. Update README ([#3468](https://github.com/redis/go-redis/pull/3468))
+- chore: update & fix otel example ([#3466](https://github.com/redis/go-redis/pull/3466))
+
+## Contributors
+We'd like to thank all the contributors who worked on this release!
+
+[@ndyakov](https://github.com/ndyakov) and [@vmihailenco](https://github.com/vmihailenco)
+
+# 9.12.0 (2025-08-05)
+
+## ๐ Highlights
+
+- This release includes support for [Redis 8.2](https://redis.io/docs/latest/operate/oss_and_stack/stack-with-enterprise/release-notes/redisce/redisos-8.2-release-notes/).
+- Introduces an experimental Query Builders for `FTSearch`, `FTAggregate` and other search commands.
+- Adds support for `EPSILON` option in `FT.VSIM`.
+- Includes bug fixes and improvements contributed by the community related to ring and [redisotel](https://github.com/redis/go-redis/tree/master/extra/redisotel).
+
+## Changes
+- Improve stale issue workflow ([#3458](https://github.com/redis/go-redis/pull/3458))
+- chore(ci): Add 8.2 rc2 pre build for CI ([#3459](https://github.com/redis/go-redis/pull/3459))
+- Added new stream commands ([#3450](https://github.com/redis/go-redis/pull/3450))
+- feat: Add "skip_verify" to Sentinel ([#3428](https://github.com/redis/go-redis/pull/3428))
+- fix: `errors.Join` requires Go 1.20 or later ([#3442](https://github.com/redis/go-redis/pull/3442))
+- DOC-4344 document quickstart examples ([#3426](https://github.com/redis/go-redis/pull/3426))
+- feat(bitop): add support for the new bitop operations ([#3409](https://github.com/redis/go-redis/pull/3409))
+
+## ๐ New Features
+
+- feat: recover addIdleConn may occur panic ([#2445](https://github.com/redis/go-redis/pull/2445))
+- feat(ring): specify custom health check func via HeartbeatFn option ([#2940](https://github.com/redis/go-redis/pull/2940))
+- Add Query Builder for RediSearch commands ([#3436](https://github.com/redis/go-redis/pull/3436))
+- add configurable buffer sizes for Redis connections ([#3453](https://github.com/redis/go-redis/pull/3453))
+- Add VAMANA vector type to RediSearch ([#3449](https://github.com/redis/go-redis/pull/3449))
+- VSIM add `EPSILON` option ([#3454](https://github.com/redis/go-redis/pull/3454))
+- Add closing support to otel metrics instrumentation ([#3444](https://github.com/redis/go-redis/pull/3444))
+
+## ๐ Bug Fixes
+
+- fix(redisotel): fix buggy append in reportPoolStats ([#3122](https://github.com/redis/go-redis/pull/3122))
+- fix(search): return results even if doc is empty ([#3457](https://github.com/redis/go-redis/pull/3457))
+- [ISSUE-3402]: Ring.Pipelined return dial timeout error ([#3403](https://github.com/redis/go-redis/pull/3403))
+
+## ๐งฐ Maintenance
+
+- Merges stale issues jobs into one job with two steps ([#3463](https://github.com/redis/go-redis/pull/3463))
+- improve code readability ([#3446](https://github.com/redis/go-redis/pull/3446))
+- chore(release): 9.12.0-beta.1 ([#3460](https://github.com/redis/go-redis/pull/3460))
+- DOC-5472 time series doc examples ([#3443](https://github.com/redis/go-redis/pull/3443))
+- Add VAMANA compression algorithm tests ([#3461](https://github.com/redis/go-redis/pull/3461))
+- bumped redis 8.2 version used in the CI/CD ([#3451](https://github.com/redis/go-redis/pull/3451))
+
+## Contributors
+We'd like to thank all the contributors who worked on this release!
+
+[@andy-stark-redis](https://github.com/andy-stark-redis), [@cxljs](https://github.com/cxljs), [@elena-kolevska](https://github.com/elena-kolevska), [@htemelski-redis](https://github.com/htemelski-redis), [@jouir](https://github.com/jouir), [@monkey92t](https://github.com/monkey92t), [@ndyakov](https://github.com/ndyakov), [@ofekshenawa](https://github.com/ofekshenawa), [@rokn](https://github.com/rokn), [@smnvdev](https://github.com/smnvdev), [@strobil](https://github.com/strobil) and [@wzy9607](https://github.com/wzy9607)
+
+## New Contributors
+* [@htemelski-redis](https://github.com/htemelski-redis) made their first contribution in [#3409](https://github.com/redis/go-redis/pull/3409)
+* [@smnvdev](https://github.com/smnvdev) made their first contribution in [#3403](https://github.com/redis/go-redis/pull/3403)
+* [@rokn](https://github.com/rokn) made their first contribution in [#3444](https://github.com/redis/go-redis/pull/3444)
+
+# 9.11.0 (2025-06-24)
+
+## ๐ Highlights
+
+Fixes TxPipeline to work correctly in cluster scenarios, allowing execution of commands
+only in the same slot.
+
+# Changes
+
+## ๐ New Features
+
+- Set cluster slot for `scan` commands, rather than random ([#2623](https://github.com/redis/go-redis/pull/2623))
+- Add CredentialsProvider field to UniversalOptions ([#2927](https://github.com/redis/go-redis/pull/2927))
+- feat(redisotel): add WithCallerEnabled option ([#3415](https://github.com/redis/go-redis/pull/3415))
+
+## ๐ Bug Fixes
+
+- fix(txpipeline): keyless commands should take the slot of the keyed ([#3411](https://github.com/redis/go-redis/pull/3411))
+- fix(loading): cache the loaded flag for slave nodes ([#3410](https://github.com/redis/go-redis/pull/3410))
+- fix(txpipeline): should return error on multi/exec on multiple slots ([#3408](https://github.com/redis/go-redis/pull/3408))
+- fix: check if the shard exists to avoid returning nil ([#3396](https://github.com/redis/go-redis/pull/3396))
+
+## ๐งฐ Maintenance
+
+- feat: optimize connection pool waitTurn ([#3412](https://github.com/redis/go-redis/pull/3412))
+- chore(ci): update CI redis builds ([#3407](https://github.com/redis/go-redis/pull/3407))
+- chore: remove a redundant method from `Ring`, `Client` and `ClusterClient` ([#3401](https://github.com/redis/go-redis/pull/3401))
+- test: refactor TestBasicCredentials using table-driven tests ([#3406](https://github.com/redis/go-redis/pull/3406))
+- perf: reduce unnecessary memory allocation operations ([#3399](https://github.com/redis/go-redis/pull/3399))
+- fix: insert entry during iterating over a map ([#3398](https://github.com/redis/go-redis/pull/3398))
+- DOC-5229 probabilistic data type examples ([#3413](https://github.com/redis/go-redis/pull/3413))
+- chore(deps): bump rojopolis/spellcheck-github-actions from 0.49.0 to 0.51.0 ([#3414](https://github.com/redis/go-redis/pull/3414))
+
+## Contributors
+We'd like to thank all the contributors who worked on this release!
+
+[@andy-stark-redis](https://github.com/andy-stark-redis), [@boekkooi-impossiblecloud](https://github.com/boekkooi-impossiblecloud), [@cxljs](https://github.com/cxljs), [@dcherubini](https://github.com/dcherubini), [@dependabot[bot]](https://github.com/apps/dependabot), [@iamamirsalehi](https://github.com/iamamirsalehi), [@ndyakov](https://github.com/ndyakov), [@pete-woods](https://github.com/pete-woods), [@twz915](https://github.com/twz915) and [dependabot[bot]](https://github.com/apps/dependabot)
+
+# 9.10.0 (2025-06-06)
+
+## ๐ Highlights
+
+`go-redis` now supports [vector sets](https://redis.io/docs/latest/develop/data-types/vector-sets/). This data type is marked
+as "in preview" in Redis and its support in `go-redis` is marked as experimental. You can find examples in the documentation and
+in the `doctests` folder.
+
+# Changes
+
+## ๐ New Features
+
+- feat: support vectorset ([#3375](https://github.com/redis/go-redis/pull/3375))
+
+## ๐งฐ Maintenance
+
+- Add the missing NewFloatSliceResult for testing ([#3393](https://github.com/redis/go-redis/pull/3393))
+- DOC-5078 vector set examples ([#3394](https://github.com/redis/go-redis/pull/3394))
+
+## Contributors
+We'd like to thank all the contributors who worked on this release!
+
+[@AndBobsYourUncle](https://github.com/AndBobsYourUncle), [@andy-stark-redis](https://github.com/andy-stark-redis), [@fukua95](https://github.com/fukua95) and [@ndyakov](https://github.com/ndyakov)
+
+
+
+# 9.9.0 (2025-05-27)
+
+## ๐ Highlights
+- **Token-based Authentication**: Added `StreamingCredentialsProvider` for dynamic credential updates (experimental)
+ - Can be used with [go-redis-entraid](https://github.com/redis/go-redis-entraid) for Azure AD authentication
+- **Connection Statistics**: Added connection waiting statistics for better monitoring
+- **Failover Improvements**: Added `ParseFailoverURL` for easier failover configuration
+- **Ring Client Enhancements**: Added shard access methods for better Pub/Sub management
+
+## โจ New Features
+- Added `StreamingCredentialsProvider` for token-based authentication ([#3320](https://github.com/redis/go-redis/pull/3320))
+ - Supports dynamic credential updates
+ - Includes connection close hooks
+ - Note: Currently marked as experimental
+- Added `ParseFailoverURL` for parsing failover URLs ([#3362](https://github.com/redis/go-redis/pull/3362))
+- Added connection waiting statistics ([#2804](https://github.com/redis/go-redis/pull/2804))
+- Added new utility functions:
+ - `ParseFloat` and `MustParseFloat` in public utils package ([#3371](https://github.com/redis/go-redis/pull/3371))
+ - Unit tests for `Atoi`, `ParseInt`, `ParseUint`, and `ParseFloat` ([#3377](https://github.com/redis/go-redis/pull/3377))
+- Added Ring client shard access methods:
+ - `GetShardClients()` to retrieve all active shard clients
+ - `GetShardClientForKey(key string)` to get the shard client for a specific key ([#3388](https://github.com/redis/go-redis/pull/3388))
+
+## ๐ Bug Fixes
+- Fixed routing reads to loading slave nodes ([#3370](https://github.com/redis/go-redis/pull/3370))
+- Added support for nil lag in XINFO GROUPS ([#3369](https://github.com/redis/go-redis/pull/3369))
+- Fixed pool acquisition timeout issues ([#3381](https://github.com/redis/go-redis/pull/3381))
+- Optimized unnecessary copy operations ([#3376](https://github.com/redis/go-redis/pull/3376))
+
+## ๐ Documentation
+- Updated documentation for XINFO GROUPS with nil lag support ([#3369](https://github.com/redis/go-redis/pull/3369))
+- Added package-level comments for new features
+
+## โก Performance and Reliability
+- Optimized `ReplaceSpaces` function ([#3383](https://github.com/redis/go-redis/pull/3383))
+- Set default value for `Options.Protocol` in `init()` ([#3387](https://github.com/redis/go-redis/pull/3387))
+- Exported pool errors for public consumption ([#3380](https://github.com/redis/go-redis/pull/3380))
+
+## ๐ง Dependencies and Infrastructure
+- Updated Redis CI to version 8.0.1 ([#3372](https://github.com/redis/go-redis/pull/3372))
+- Updated spellcheck GitHub Actions ([#3389](https://github.com/redis/go-redis/pull/3389))
+- Removed unused parameters ([#3382](https://github.com/redis/go-redis/pull/3382), [#3384](https://github.com/redis/go-redis/pull/3384))
+
+## ๐งช Testing
+- Added unit tests for pool acquisition timeout ([#3381](https://github.com/redis/go-redis/pull/3381))
+- Added unit tests for utility functions ([#3377](https://github.com/redis/go-redis/pull/3377))
+
+## ๐ฅ Contributors
+
+We would like to thank all the contributors who made this release possible:
+
+[@ndyakov](https://github.com/ndyakov), [@ofekshenawa](https://github.com/ofekshenawa), [@LINKIWI](https://github.com/LINKIWI), [@iamamirsalehi](https://github.com/iamamirsalehi), [@fukua95](https://github.com/fukua95), [@lzakharov](https://github.com/lzakharov), [@DengY11](https://github.com/DengY11)
+
+## ๐ Changelog
+
+For a complete list of changes, see the [full changelog](https://github.com/redis/go-redis/compare/v9.8.0...v9.9.0).
+
+# 9.8.0 (2025-04-30)
+
+## ๐ Highlights
+- **Redis 8 Support**: Full compatibility with Redis 8.0, including testing and CI integration
+- **Enhanced Hash Operations**: Added support for new hash commands (`HGETDEL`, `HGETEX`, `HSETEX`) and `HSTRLEN` command
+- **Search Improvements**: Enabled Search DIALECT 2 by default and added `CountOnly` argument for `FT.Search`
+
+## โจ New Features
+- Added support for new hash commands: `HGETDEL`, `HGETEX`, `HSETEX` ([#3305](https://github.com/redis/go-redis/pull/3305))
+- Added `HSTRLEN` command for hash operations ([#2843](https://github.com/redis/go-redis/pull/2843))
+- Added `Do` method for raw query by single connection from `pool.Conn()` ([#3182](https://github.com/redis/go-redis/pull/3182))
+- Prevent false-positive marshaling by treating zero time.Time as empty in isEmptyValue ([#3273](https://github.com/redis/go-redis/pull/3273))
+- Added FailoverClusterClient support for Universal client ([#2794](https://github.com/redis/go-redis/pull/2794))
+- Added support for cluster mode with `IsClusterMode` config parameter ([#3255](https://github.com/redis/go-redis/pull/3255))
+- Added client name support in `HELLO` RESP handshake ([#3294](https://github.com/redis/go-redis/pull/3294))
+- **Enabled Search DIALECT 2 by default** ([#3213](https://github.com/redis/go-redis/pull/3213))
+- Added read-only option for failover configurations ([#3281](https://github.com/redis/go-redis/pull/3281))
+- Added `CountOnly` argument for `FT.Search` to use `LIMIT 0 0` ([#3338](https://github.com/redis/go-redis/pull/3338))
+- Added `DB` option support in `NewFailoverClusterClient` ([#3342](https://github.com/redis/go-redis/pull/3342))
+- Added `nil` check for the options when creating a client ([#3363](https://github.com/redis/go-redis/pull/3363))
+
+## ๐ Bug Fixes
+- Fixed `PubSub` concurrency safety issues ([#3360](https://github.com/redis/go-redis/pull/3360))
+- Fixed panic caused when argument is `nil` ([#3353](https://github.com/redis/go-redis/pull/3353))
+- Improved error handling when fetching master node from sentinels ([#3349](https://github.com/redis/go-redis/pull/3349))
+- Fixed connection pool timeout issues and increased retries ([#3298](https://github.com/redis/go-redis/pull/3298))
+- Fixed context cancellation error leading to connection spikes on Primary instances ([#3190](https://github.com/redis/go-redis/pull/3190))
+- Fixed RedisCluster client to consider `MASTERDOWN` a retriable error ([#3164](https://github.com/redis/go-redis/pull/3164))
+- Fixed tracing to show complete commands instead of truncated versions ([#3290](https://github.com/redis/go-redis/pull/3290))
+- Fixed OpenTelemetry instrumentation to prevent multiple span reporting ([#3168](https://github.com/redis/go-redis/pull/3168))
+- Fixed `FT.Search` Limit argument and added `CountOnly` argument for limit 0 0 ([#3338](https://github.com/redis/go-redis/pull/3338))
+- Fixed missing command in interface ([#3344](https://github.com/redis/go-redis/pull/3344))
+- Fixed slot calculation for `COUNTKEYSINSLOT` command ([#3327](https://github.com/redis/go-redis/pull/3327))
+- Updated PubSub implementation with correct context ([#3329](https://github.com/redis/go-redis/pull/3329))
+
+## ๐ Documentation
+- Added hash search examples ([#3357](https://github.com/redis/go-redis/pull/3357))
+- Fixed documentation comments ([#3351](https://github.com/redis/go-redis/pull/3351))
+- Added `CountOnly` search example ([#3345](https://github.com/redis/go-redis/pull/3345))
+- Added examples for list commands: `LLEN`, `LPOP`, `LPUSH`, `LRANGE`, `RPOP`, `RPUSH` ([#3234](https://github.com/redis/go-redis/pull/3234))
+- Added `SADD` and `SMEMBERS` command examples ([#3242](https://github.com/redis/go-redis/pull/3242))
+- Updated `README.md` to use Redis Discord guild ([#3331](https://github.com/redis/go-redis/pull/3331))
+- Updated `HExpire` command documentation ([#3355](https://github.com/redis/go-redis/pull/3355))
+- Featured OpenTelemetry instrumentation more prominently ([#3316](https://github.com/redis/go-redis/pull/3316))
+- Updated `README.md` with additional information ([#310ce55](https://github.com/redis/go-redis/commit/310ce55))
+
+## โก Performance and Reliability
+- Bound connection pool background dials to configured dial timeout ([#3089](https://github.com/redis/go-redis/pull/3089))
+- Ensured context isn't exhausted via concurrent query ([#3334](https://github.com/redis/go-redis/pull/3334))
+
+## ๐ง Dependencies and Infrastructure
+- Updated testing image to Redis 8.0-RC2 ([#3361](https://github.com/redis/go-redis/pull/3361))
+- Enabled CI for Redis CE 8.0 ([#3274](https://github.com/redis/go-redis/pull/3274))
+- Updated various dependencies:
+ - Bumped golangci/golangci-lint-action from 6.5.0 to 7.0.0 ([#3354](https://github.com/redis/go-redis/pull/3354))
+ - Bumped rojopolis/spellcheck-github-actions ([#3336](https://github.com/redis/go-redis/pull/3336))
+ - Bumped golang.org/x/net in example/otel ([#3308](https://github.com/redis/go-redis/pull/3308))
+- Migrated golangci-lint configuration to v2 format ([#3354](https://github.com/redis/go-redis/pull/3354))
+
+## โ ๏ธ Breaking Changes
+- **Enabled Search DIALECT 2 by default** ([#3213](https://github.com/redis/go-redis/pull/3213))
+- Dropped RedisGears (Triggers and Functions) support ([#3321](https://github.com/redis/go-redis/pull/3321))
+- Dropped FT.PROFILE command that was never enabled ([#3323](https://github.com/redis/go-redis/pull/3323))
+
+## ๐ Security
+- Fixed network error handling on SETINFO (CVE-2025-29923) ([#3295](https://github.com/redis/go-redis/pull/3295))
+
+## ๐งช Testing
+- Added integration tests for Redis 8 behavior changes in Redis Search ([#3337](https://github.com/redis/go-redis/pull/3337))
+- Added vector types INT8 and UINT8 tests ([#3299](https://github.com/redis/go-redis/pull/3299))
+- Added test codes for search_commands.go ([#3285](https://github.com/redis/go-redis/pull/3285))
+- Fixed example test sorting ([#3292](https://github.com/redis/go-redis/pull/3292))
+
+## ๐ฅ Contributors
+
+We would like to thank all the contributors who made this release possible:
+
+[@alexander-menshchikov](https://github.com/alexander-menshchikov), [@EXPEbdodla](https://github.com/EXPEbdodla), [@afti](https://github.com/afti), [@dmaier-redislabs](https://github.com/dmaier-redislabs), [@four_leaf_clover](https://github.com/four_leaf_clover), [@alohaglenn](https://github.com/alohaglenn), [@gh73962](https://github.com/gh73962), [@justinmir](https://github.com/justinmir), [@LINKIWI](https://github.com/LINKIWI), [@liushuangbill](https://github.com/liushuangbill), [@golang88](https://github.com/golang88), [@gnpaone](https://github.com/gnpaone), [@ndyakov](https://github.com/ndyakov), [@nikolaydubina](https://github.com/nikolaydubina), [@oleglacto](https://github.com/oleglacto), [@andy-stark-redis](https://github.com/andy-stark-redis), [@rodneyosodo](https://github.com/rodneyosodo), [@dependabot](https://github.com/dependabot), [@rfyiamcool](https://github.com/rfyiamcool), [@frankxjkuang](https://github.com/frankxjkuang), [@fukua95](https://github.com/fukua95), [@soleymani-milad](https://github.com/soleymani-milad), [@ofekshenawa](https://github.com/ofekshenawa), [@khasanovbi](https://github.com/khasanovbi)
+
+
+# Old Changelog
+## Unreleased
+
+### Changed
+
+* `go-redis` won't skip span creation if the parent spans is not recording. ([#2980](https://github.com/redis/go-redis/issues/2980))
+ Users can use the OpenTelemetry sampler to control the sampling behavior.
+ For instance, you can use the `ParentBased(NeverSample())` sampler from `go.opentelemetry.io/otel/sdk/trace` to keep
+ a similar behavior (drop orphan spans) of `go-redis` as before.
+
+## [9.0.5](https://github.com/redis/go-redis/compare/v9.0.4...v9.0.5) (2023-05-29)
+
+
+### Features
+
+* Add ACL LOG ([#2536](https://github.com/redis/go-redis/issues/2536)) ([31ba855](https://github.com/redis/go-redis/commit/31ba855ddebc38fbcc69a75d9d4fb769417cf602))
+* add field protocol to setupClusterQueryParams ([#2600](https://github.com/redis/go-redis/issues/2600)) ([840c25c](https://github.com/redis/go-redis/commit/840c25cb6f320501886a82a5e75f47b491e46fbe))
+* add protocol option ([#2598](https://github.com/redis/go-redis/issues/2598)) ([3917988](https://github.com/redis/go-redis/commit/391798880cfb915c4660f6c3ba63e0c1a459e2af))
+
+
+
+## [9.0.4](https://github.com/redis/go-redis/compare/v9.0.3...v9.0.4) (2023-05-01)
+
+
+### Bug Fixes
+
+* reader float parser ([#2513](https://github.com/redis/go-redis/issues/2513)) ([46f2450](https://github.com/redis/go-redis/commit/46f245075e6e3a8bd8471f9ca67ea95fd675e241))
+
+
+### Features
+
+* add client info command ([#2483](https://github.com/redis/go-redis/issues/2483)) ([b8c7317](https://github.com/redis/go-redis/commit/b8c7317cc6af444603731f7017c602347c0ba61e))
+* no longer verify HELLO error messages ([#2515](https://github.com/redis/go-redis/issues/2515)) ([7b4f217](https://github.com/redis/go-redis/commit/7b4f2179cb5dba3d3c6b0c6f10db52b837c912c8))
+* read the structure to increase the judgment of the omitempty opโฆ ([#2529](https://github.com/redis/go-redis/issues/2529)) ([37c057b](https://github.com/redis/go-redis/commit/37c057b8e597c5e8a0e372337f6a8ad27f6030af))
+
+
+
+## [9.0.3](https://github.com/redis/go-redis/compare/v9.0.2...v9.0.3) (2023-04-02)
+
+### New Features
+
+- feat(scan): scan time.Time sets the default decoding (#2413)
+- Add support for CLUSTER LINKS command (#2504)
+- Add support for acl dryrun command (#2502)
+- Add support for COMMAND GETKEYS & COMMAND GETKEYSANDFLAGS (#2500)
+- Add support for LCS Command (#2480)
+- Add support for BZMPOP (#2456)
+- Adding support for ZMPOP command (#2408)
+- Add support for LMPOP (#2440)
+- feat: remove pool unused fields (#2438)
+- Expiretime and PExpireTime (#2426)
+- Implement `FUNCTION` group of commands (#2475)
+- feat(zadd): add ZAddLT and ZAddGT (#2429)
+- Add: Support for COMMAND LIST command (#2491)
+- Add support for BLMPOP (#2442)
+- feat: check pipeline.Do to prevent confusion with Exec (#2517)
+- Function stats, function kill, fcall and fcall_ro (#2486)
+- feat: Add support for CLUSTER SHARDS command (#2507)
+- feat(cmd): support for adding byte,bit parameters to the bitpos command (#2498)
+
+### Fixed
+
+- fix: eval api cmd.SetFirstKeyPos (#2501)
+- fix: limit the number of connections created (#2441)
+- fixed #2462 v9 continue support dragonfly, it's Hello command return "NOAUTH Authentication required" error (#2479)
+- Fix for internal/hscan/structmap.go:89:23: undefined: reflect.Pointer (#2458)
+- fix: group lag can be null (#2448)
+
+### Maintenance
+
+- Updating to the latest version of redis (#2508)
+- Allowing for running tests on a port other than the fixed 6380 (#2466)
+- redis 7.0.8 in tests (#2450)
+- docs: Update redisotel example for v9 (#2425)
+- chore: update go mod, Upgrade golang.org/x/net version to 0.7.0 (#2476)
+- chore: add Chinese translation (#2436)
+- chore(deps): bump github.com/bsm/gomega from 1.20.0 to 1.26.0 (#2421)
+- chore(deps): bump github.com/bsm/ginkgo/v2 from 2.5.0 to 2.7.0 (#2420)
+- chore(deps): bump actions/setup-go from 3 to 4 (#2495)
+- docs: add instructions for the HSet api (#2503)
+- docs: add reading lag field comment (#2451)
+- test: update go mod before testing(go mod tidy) (#2423)
+- docs: fix comment typo (#2505)
+- test: remove testify (#2463)
+- refactor: change ListElementCmd to KeyValuesCmd. (#2443)
+- fix(appendArg): appendArg case special type (#2489)
+
+## [9.0.2](https://github.com/redis/go-redis/compare/v9.0.1...v9.0.2) (2023-02-01)
+
+### Features
+
+* upgrade OpenTelemetry, use the new metrics API. ([#2410](https://github.com/redis/go-redis/issues/2410)) ([e29e42c](https://github.com/redis/go-redis/commit/e29e42cde2755ab910d04185025dc43ce6f59c65))
+
+## v9 2023-01-30
+
+### Breaking
+
+- Changed Pipelines to not be thread-safe any more.
+
+### Added
+
+- Added support for [RESP3](https://github.com/antirez/RESP3/blob/master/spec.md) protocol. It was
+ contributed by @monkey92t who has done the majority of work in this release.
+- Added `ContextTimeoutEnabled` option that controls whether the client respects context timeouts
+ and deadlines. See
+ [Redis Timeouts](https://redis.uptrace.dev/guide/go-redis-debugging.html#timeouts) for details.
+- Added `ParseClusterURL` to parse URLs into `ClusterOptions`, for example,
+ `redis://user:password@localhost:6789?dial_timeout=3&read_timeout=6s&addr=localhost:6790&addr=localhost:6791`.
+- Added metrics instrumentation using `redisotel.IstrumentMetrics`. See
+ [documentation](https://redis.uptrace.dev/guide/go-redis-monitoring.html)
+- Added `redis.HasErrorPrefix` to help working with errors.
+
+### Changed
+
+- Removed asynchronous cancellation based on the context timeout. It was racy in v8 and is
+ completely gone in v9.
+- Reworked hook interface and added `DialHook`.
+- Replaced `redisotel.NewTracingHook` with `redisotel.InstrumentTracing`. See
+ [example](example/otel) and
+ [documentation](https://redis.uptrace.dev/guide/go-redis-monitoring.html).
+- Replaced `*redis.Z` with `redis.Z` since it is small enough to be passed as value without making
+ an allocation.
+- Renamed the option `MaxConnAge` to `ConnMaxLifetime`.
+- Renamed the option `IdleTimeout` to `ConnMaxIdleTime`.
+- Removed connection reaper in favor of `MaxIdleConns`.
+- Removed `WithContext` since `context.Context` can be passed directly as an arg.
+- Removed `Pipeline.Close` since there is no real need to explicitly manage pipeline resources and
+ it can be safely reused via `sync.Pool` etc. `Pipeline.Discard` is still available if you want to
+ reset commands for some reason.
+
+### Fixed
+
+- Improved and fixed pipeline retries.
+- As usually, added support for more commands and fixed some bugs.
diff --git a/RELEASING.md b/RELEASING.md
new file mode 100644
index 0000000000..1115db4e3e
--- /dev/null
+++ b/RELEASING.md
@@ -0,0 +1,15 @@
+# Releasing
+
+1. Run `release.sh` script which updates versions in go.mod files and pushes a new branch to GitHub:
+
+```shell
+TAG=v1.0.0 ./scripts/release.sh
+```
+
+2. Open a pull request and wait for the build to finish.
+
+3. Merge the pull request and run `tag.sh` to create tags for packages:
+
+```shell
+TAG=v1.0.0 ./scripts/tag.sh
+```
diff --git a/acl_commands.go b/acl_commands.go
new file mode 100644
index 0000000000..9cb800bb3b
--- /dev/null
+++ b/acl_commands.go
@@ -0,0 +1,89 @@
+package redis
+
+import "context"
+
+type ACLCmdable interface {
+ ACLDryRun(ctx context.Context, username string, command ...interface{}) *StringCmd
+
+ ACLLog(ctx context.Context, count int64) *ACLLogCmd
+ ACLLogReset(ctx context.Context) *StatusCmd
+
+ ACLSetUser(ctx context.Context, username string, rules ...string) *StatusCmd
+ ACLDelUser(ctx context.Context, username string) *IntCmd
+ ACLList(ctx context.Context) *StringSliceCmd
+
+ ACLCat(ctx context.Context) *StringSliceCmd
+ ACLCatArgs(ctx context.Context, options *ACLCatArgs) *StringSliceCmd
+}
+
+type ACLCatArgs struct {
+ Category string
+}
+
+func (c cmdable) ACLDryRun(ctx context.Context, username string, command ...interface{}) *StringCmd {
+ args := make([]interface{}, 0, 3+len(command))
+ args = append(args, "acl", "dryrun", username)
+ args = append(args, command...)
+ cmd := NewStringCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ACLLog(ctx context.Context, count int64) *ACLLogCmd {
+ args := make([]interface{}, 0, 3)
+ args = append(args, "acl", "log")
+ if count > 0 {
+ args = append(args, count)
+ }
+ cmd := NewACLLogCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ACLLogReset(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "acl", "log", "reset")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ACLDelUser(ctx context.Context, username string) *IntCmd {
+ cmd := NewIntCmd(ctx, "acl", "deluser", username)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ACLSetUser(ctx context.Context, username string, rules ...string) *StatusCmd {
+ args := make([]interface{}, 3+len(rules))
+ args[0] = "acl"
+ args[1] = "setuser"
+ args[2] = username
+ for i, rule := range rules {
+ args[i+3] = rule
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ACLList(ctx context.Context) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "acl", "list")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ACLCat(ctx context.Context) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "acl", "cat")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ACLCatArgs(ctx context.Context, options *ACLCatArgs) *StringSliceCmd {
+ // if there is a category passed, build new cmd, if there isn't - use the ACLCat method
+ if options != nil && options.Category != "" {
+ cmd := NewStringSliceCmd(ctx, "acl", "cat", options.Category)
+ _ = c(ctx, cmd)
+ return cmd
+ }
+
+ return c.ACLCat(ctx)
+}
diff --git a/acl_commands_test.go b/acl_commands_test.go
new file mode 100644
index 0000000000..a96621dbce
--- /dev/null
+++ b/acl_commands_test.go
@@ -0,0 +1,449 @@
+package redis_test
+
+import (
+ "context"
+
+ "github.com/redis/go-redis/v9"
+
+ . "github.com/bsm/ginkgo/v2"
+ . "github.com/bsm/gomega"
+)
+
+var TestUserName string = "goredis"
+var _ = Describe("ACL", func() {
+ var client *redis.Client
+ var ctx context.Context
+
+ BeforeEach(func() {
+ ctx = context.Background()
+ opt := redisOptions()
+ client = redis.NewClient(opt)
+ })
+
+ It("should ACL LOG", Label("NonRedisEnterprise"), func() {
+ Expect(client.ACLLogReset(ctx).Err()).NotTo(HaveOccurred())
+ err := client.Do(ctx, "acl", "setuser", "test", ">test", "on", "allkeys", "+get").Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ clientAcl := redis.NewClient(redisOptions())
+ clientAcl.Options().Username = "test"
+ clientAcl.Options().Password = "test"
+ clientAcl.Options().DB = 0
+ _ = clientAcl.Set(ctx, "mystring", "foo", 0).Err()
+ _ = clientAcl.HSet(ctx, "myhash", "foo", "bar").Err()
+ _ = clientAcl.SAdd(ctx, "myset", "foo", "bar").Err()
+
+ logEntries, err := client.ACLLog(ctx, 10).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(logEntries)).To(Equal(4))
+
+ for _, entry := range logEntries {
+ Expect(entry.Reason).To(Equal("command"))
+ Expect(entry.Context).To(Equal("toplevel"))
+ Expect(entry.Object).NotTo(BeEmpty())
+ Expect(entry.Username).To(Equal("test"))
+ Expect(entry.AgeSeconds).To(BeNumerically(">=", 0))
+ Expect(entry.ClientInfo).NotTo(BeNil())
+ Expect(entry.EntryID).To(BeNumerically(">=", 0))
+ Expect(entry.TimestampCreated).To(BeNumerically(">=", 0))
+ Expect(entry.TimestampLastUpdated).To(BeNumerically(">=", 0))
+ }
+
+ limitedLogEntries, err := client.ACLLog(ctx, 2).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(limitedLogEntries)).To(Equal(2))
+
+ // cleanup after creating the user
+ err = client.Do(ctx, "acl", "deluser", "test").Err()
+ Expect(err).NotTo(HaveOccurred())
+ })
+
+ It("should ACL LOG RESET", Label("NonRedisEnterprise"), func() {
+ // Call ACL LOG RESET
+ resetCmd := client.ACLLogReset(ctx)
+ Expect(resetCmd.Err()).NotTo(HaveOccurred())
+ Expect(resetCmd.Val()).To(Equal("OK"))
+
+ // Verify that the log is empty after the reset
+ logEntries, err := client.ACLLog(ctx, 10).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(logEntries)).To(Equal(0))
+ })
+
+})
+var _ = Describe("ACL user commands", Label("NonRedisEnterprise"), func() {
+ var client *redis.Client
+ var ctx context.Context
+
+ BeforeEach(func() {
+ ctx = context.Background()
+ opt := redisOptions()
+ client = redis.NewClient(opt)
+ })
+
+ AfterEach(func() {
+ _, err := client.ACLDelUser(context.Background(), TestUserName).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(client.Close()).NotTo(HaveOccurred())
+ })
+
+ It("list only default user", func() {
+ res, err := client.ACLList(ctx).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(HaveLen(1))
+ Expect(res[0]).To(ContainSubstring("default"))
+ })
+
+ It("setuser and deluser", func() {
+ res, err := client.ACLList(ctx).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(HaveLen(1))
+ Expect(res[0]).To(ContainSubstring("default"))
+
+ add, err := client.ACLSetUser(ctx, TestUserName, "nopass", "on", "allkeys", "+set", "+get").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(add).To(Equal("OK"))
+
+ resAfter, err := client.ACLList(ctx).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resAfter).To(HaveLen(2))
+ Expect(resAfter[1]).To(ContainSubstring(TestUserName))
+
+ deletedN, err := client.ACLDelUser(ctx, TestUserName).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(deletedN).To(BeNumerically("==", 1))
+
+ resAfterDeletion, err := client.ACLList(ctx).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resAfterDeletion).To(HaveLen(1))
+ Expect(resAfterDeletion[0]).To(BeEquivalentTo(res[0]))
+ })
+
+ It("should acl dryrun", func() {
+ dryRun := client.ACLDryRun(ctx, "default", "get", "randomKey")
+ Expect(dryRun.Err()).NotTo(HaveOccurred())
+ Expect(dryRun.Val()).To(Equal("OK"))
+ })
+})
+
+var _ = Describe("ACL permissions", Label("NonRedisEnterprise"), func() {
+ var client *redis.Client
+ var ctx context.Context
+
+ BeforeEach(func() {
+ ctx = context.Background()
+ opt := redisOptions()
+ opt.UnstableResp3 = true
+ client = redis.NewClient(opt)
+ })
+
+ AfterEach(func() {
+ _, err := client.ACLDelUser(context.Background(), TestUserName).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(client.Close()).NotTo(HaveOccurred())
+ })
+
+ It("reset permissions", func() {
+ add, err := client.ACLSetUser(ctx,
+ TestUserName,
+ "reset",
+ "nopass",
+ "on",
+ ).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(add).To(Equal("OK"))
+
+ connection := client.Conn()
+ authed, err := connection.AuthACL(ctx, TestUserName, "").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(authed).To(Equal("OK"))
+
+ _, err = connection.Get(ctx, "anykey").Result()
+ Expect(err).To(HaveOccurred())
+ })
+
+ It("add write permissions", func() {
+ add, err := client.ACLSetUser(ctx,
+ TestUserName,
+ "reset",
+ "nopass",
+ "on",
+ "~*",
+ "+SET",
+ ).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(add).To(Equal("OK"))
+
+ connection := client.Conn()
+ authed, err := connection.AuthACL(ctx, TestUserName, "").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(authed).To(Equal("OK"))
+
+ // can write
+ v, err := connection.Set(ctx, "anykey", "anyvalue", 0).Result()
+ Expect(err).ToNot(HaveOccurred())
+ Expect(v).To(Equal("OK"))
+
+ // but can't read
+ value, err := connection.Get(ctx, "anykey").Result()
+ Expect(err).To(HaveOccurred())
+ Expect(value).To(BeEmpty())
+ })
+
+ It("add read permissions", func() {
+ add, err := client.ACLSetUser(ctx,
+ TestUserName,
+ "reset",
+ "nopass",
+ "on",
+ "~*",
+ "+GET",
+ ).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(add).To(Equal("OK"))
+
+ connection := client.Conn()
+ authed, err := connection.AuthACL(ctx, TestUserName, "").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(authed).To(Equal("OK"))
+
+ // can read
+ value, err := connection.Get(ctx, "anykey").Result()
+ Expect(err).ToNot(HaveOccurred())
+ Expect(value).To(Equal("anyvalue"))
+
+ // but can't delete
+ del, err := connection.Del(ctx, "anykey").Result()
+ Expect(err).To(HaveOccurred())
+ Expect(del).ToNot(Equal(1))
+ })
+
+ It("add del permissions", func() {
+ add, err := client.ACLSetUser(ctx,
+ TestUserName,
+ "reset",
+ "nopass",
+ "on",
+ "~*",
+ "+DEL",
+ ).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(add).To(Equal("OK"))
+
+ connection := client.Conn()
+ authed, err := connection.AuthACL(ctx, TestUserName, "").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(authed).To(Equal("OK"))
+
+ // can read
+ del, err := connection.Del(ctx, "anykey").Result()
+ Expect(err).ToNot(HaveOccurred())
+ Expect(del).To(BeEquivalentTo(1))
+ })
+
+ It("set permissions for module commands", func() {
+ SkipBeforeRedisVersion(8, "permissions for modules are supported for Redis Version >=8")
+ Expect(client.FlushDB(ctx).Err()).NotTo(HaveOccurred())
+ val, err := client.FTCreate(ctx, "txt", &redis.FTCreateOptions{}, &redis.FieldSchema{FieldName: "txt", FieldType: redis.SearchFieldTypeText}).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(BeEquivalentTo("OK"))
+ WaitForIndexing(client, "txt")
+ client.HSet(ctx, "doc1", "txt", "foo baz")
+ client.HSet(ctx, "doc2", "txt", "foo bar")
+ add, err := client.ACLSetUser(ctx,
+ TestUserName,
+ "reset",
+ "nopass",
+ "on",
+ "~*",
+ "+FT.SEARCH",
+ "-FT.DROPINDEX",
+ "+json.set",
+ "+json.get",
+ "-json.clear",
+ "+bf.reserve",
+ "-bf.info",
+ "+cf.reserve",
+ "+cms.initbydim",
+ "+topk.reserve",
+ "+tdigest.create",
+ "+ts.create",
+ "-ts.info",
+ ).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(add).To(Equal("OK"))
+
+ c := client.Conn()
+ authed, err := c.AuthACL(ctx, TestUserName, "").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(authed).To(Equal("OK"))
+
+ // has perm for search
+ Expect(c.FTSearch(ctx, "txt", "foo ~bar").Err()).NotTo(HaveOccurred())
+
+ // no perm for dropindex
+ err = c.FTDropIndex(ctx, "txt").Err()
+ Expect(err).ToNot(BeEmpty())
+ Expect(err.Error()).To(ContainSubstring("NOPERM"))
+
+ // json set and get have perm
+ Expect(c.JSONSet(ctx, "foo", "$", "\"bar\"").Err()).NotTo(HaveOccurred())
+ Expect(c.JSONGet(ctx, "foo", "$").Val()).To(BeEquivalentTo("[\"bar\"]"))
+
+ // no perm for json clear
+ err = c.JSONClear(ctx, "foo", "$").Err()
+ Expect(err).ToNot(BeEmpty())
+ Expect(err.Error()).To(ContainSubstring("NOPERM"))
+
+ // perm for reserve
+ Expect(c.BFReserve(ctx, "bloom", 0.01, 100).Err()).NotTo(HaveOccurred())
+
+ // no perm for info
+ err = c.BFInfo(ctx, "bloom").Err()
+ Expect(err).ToNot(BeEmpty())
+ Expect(err.Error()).To(ContainSubstring("NOPERM"))
+
+ // perm for cf.reserve
+ Expect(c.CFReserve(ctx, "cfres", 100).Err()).NotTo(HaveOccurred())
+ // perm for cms.initbydim
+ Expect(c.CMSInitByDim(ctx, "cmsdim", 100, 5).Err()).NotTo(HaveOccurred())
+ // perm for topk.reserve
+ Expect(c.TopKReserve(ctx, "topk", 10).Err()).NotTo(HaveOccurred())
+ // perm for tdigest.create
+ Expect(c.TDigestCreate(ctx, "tdc").Err()).NotTo(HaveOccurred())
+ // perm for ts.create
+ Expect(c.TSCreate(ctx, "tsts").Err()).NotTo(HaveOccurred())
+ // noperm for ts.info
+ err = c.TSInfo(ctx, "tsts").Err()
+ Expect(err).ToNot(BeEmpty())
+ Expect(err.Error()).To(ContainSubstring("NOPERM"))
+
+ Expect(client.FlushDB(ctx).Err()).NotTo(HaveOccurred())
+ })
+
+ It("set permissions for module categories", func() {
+ SkipBeforeRedisVersion(8, "permissions for modules are supported for Redis Version >=8")
+ Expect(client.FlushDB(ctx).Err()).NotTo(HaveOccurred())
+ val, err := client.FTCreate(ctx, "txt", &redis.FTCreateOptions{}, &redis.FieldSchema{FieldName: "txt", FieldType: redis.SearchFieldTypeText}).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(BeEquivalentTo("OK"))
+ WaitForIndexing(client, "txt")
+ client.HSet(ctx, "doc1", "txt", "foo baz")
+ client.HSet(ctx, "doc2", "txt", "foo bar")
+ add, err := client.ACLSetUser(ctx,
+ TestUserName,
+ "reset",
+ "nopass",
+ "on",
+ "~*",
+ "+@search",
+ "+@json",
+ "+@bloom",
+ "+@cuckoo",
+ "+@topk",
+ "+@cms",
+ "+@timeseries",
+ "+@tdigest",
+ ).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(add).To(Equal("OK"))
+
+ c := client.Conn()
+ authed, err := c.AuthACL(ctx, TestUserName, "").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(authed).To(Equal("OK"))
+
+ // has perm for search
+ Expect(c.FTSearch(ctx, "txt", "foo ~bar").Err()).NotTo(HaveOccurred())
+ // perm for dropindex
+ Expect(c.FTDropIndex(ctx, "txt").Err()).NotTo(HaveOccurred())
+ // json set and get have perm
+ Expect(c.JSONSet(ctx, "foo", "$", "\"bar\"").Err()).NotTo(HaveOccurred())
+ Expect(c.JSONGet(ctx, "foo", "$").Val()).To(BeEquivalentTo("[\"bar\"]"))
+ // perm for json clear
+ Expect(c.JSONClear(ctx, "foo", "$").Err()).NotTo(HaveOccurred())
+ // perm for reserve
+ Expect(c.BFReserve(ctx, "bloom", 0.01, 100).Err()).NotTo(HaveOccurred())
+ // perm for info
+ Expect(c.BFInfo(ctx, "bloom").Err()).NotTo(HaveOccurred())
+ // perm for cf.reserve
+ Expect(c.CFReserve(ctx, "cfres", 100).Err()).NotTo(HaveOccurred())
+ // perm for cms.initbydim
+ Expect(c.CMSInitByDim(ctx, "cmsdim", 100, 5).Err()).NotTo(HaveOccurred())
+ // perm for topk.reserve
+ Expect(c.TopKReserve(ctx, "topk", 10).Err()).NotTo(HaveOccurred())
+ // perm for tdigest.create
+ Expect(c.TDigestCreate(ctx, "tdc").Err()).NotTo(HaveOccurred())
+ // perm for ts.create
+ Expect(c.TSCreate(ctx, "tsts").Err()).NotTo(HaveOccurred())
+ // perm for ts.info
+ Expect(c.TSInfo(ctx, "tsts").Err()).NotTo(HaveOccurred())
+
+ Expect(client.FlushDB(ctx).Err()).NotTo(HaveOccurred())
+ })
+})
+
+var _ = Describe("ACL Categories", func() {
+ var client *redis.Client
+ var ctx context.Context
+
+ BeforeEach(func() {
+ ctx = context.Background()
+ opt := redisOptions()
+ client = redis.NewClient(opt)
+ })
+
+ AfterEach(func() {
+ Expect(client.Close()).NotTo(HaveOccurred())
+ })
+
+ It("lists acl categories and subcategories", func() {
+ res, err := client.ACLCat(ctx).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(res)).To(BeNumerically(">", 20))
+ Expect(res).To(ContainElements(
+ "read",
+ "write",
+ "keyspace",
+ "dangerous",
+ "slow",
+ "set",
+ "sortedset",
+ "list",
+ "hash",
+ ))
+
+ res, err = client.ACLCatArgs(ctx, &redis.ACLCatArgs{Category: "read"}).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(ContainElement("get"))
+ })
+
+ It("lists acl categories and subcategories with Modules", func() {
+ SkipBeforeRedisVersion(8, "modules are included in acl for redis version >= 8")
+ aclTestCase := map[string]string{
+ "search": "FT.CREATE",
+ "bloom": "bf.add",
+ "json": "json.get",
+ "cuckoo": "cf.insert",
+ "cms": "cms.query",
+ "topk": "topk.list",
+ "tdigest": "tdigest.rank",
+ "timeseries": "ts.range",
+ }
+ var cats []interface{}
+
+ for cat, subitem := range aclTestCase {
+ cats = append(cats, cat)
+
+ res, err := client.ACLCatArgs(ctx, &redis.ACLCatArgs{
+ Category: cat,
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(ContainElement(subitem))
+ }
+
+ res, err := client.ACLCat(ctx).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(ContainElements(cats...))
+ })
+})
diff --git a/adapters.go b/adapters.go
new file mode 100644
index 0000000000..4146153bf3
--- /dev/null
+++ b/adapters.go
@@ -0,0 +1,111 @@
+package redis
+
+import (
+ "context"
+ "errors"
+ "net"
+ "time"
+
+ "github.com/redis/go-redis/v9/internal/interfaces"
+ "github.com/redis/go-redis/v9/push"
+)
+
+// ErrInvalidCommand is returned when an invalid command is passed to ExecuteCommand.
+var ErrInvalidCommand = errors.New("invalid command type")
+
+// ErrInvalidPool is returned when the pool type is not supported.
+var ErrInvalidPool = errors.New("invalid pool type")
+
+// newClientAdapter creates a new client adapter for regular Redis clients.
+func newClientAdapter(client *baseClient) interfaces.ClientInterface {
+ return &clientAdapter{client: client}
+}
+
+// clientAdapter adapts a Redis client to implement interfaces.ClientInterface.
+type clientAdapter struct {
+ client *baseClient
+}
+
+// GetOptions returns the client options.
+func (ca *clientAdapter) GetOptions() interfaces.OptionsInterface {
+ return &optionsAdapter{options: ca.client.opt}
+}
+
+// GetPushProcessor returns the client's push notification processor.
+func (ca *clientAdapter) GetPushProcessor() interfaces.NotificationProcessor {
+ return &pushProcessorAdapter{processor: ca.client.pushProcessor}
+}
+
+// optionsAdapter adapts Redis options to implement interfaces.OptionsInterface.
+type optionsAdapter struct {
+ options *Options
+}
+
+// GetReadTimeout returns the read timeout.
+func (oa *optionsAdapter) GetReadTimeout() time.Duration {
+ return oa.options.ReadTimeout
+}
+
+// GetWriteTimeout returns the write timeout.
+func (oa *optionsAdapter) GetWriteTimeout() time.Duration {
+ return oa.options.WriteTimeout
+}
+
+// GetNetwork returns the network type.
+func (oa *optionsAdapter) GetNetwork() string {
+ return oa.options.Network
+}
+
+// GetAddr returns the connection address.
+func (oa *optionsAdapter) GetAddr() string {
+ return oa.options.Addr
+}
+
+// IsTLSEnabled returns true if TLS is enabled.
+func (oa *optionsAdapter) IsTLSEnabled() bool {
+ return oa.options.TLSConfig != nil
+}
+
+// GetProtocol returns the protocol version.
+func (oa *optionsAdapter) GetProtocol() int {
+ return oa.options.Protocol
+}
+
+// GetPoolSize returns the connection pool size.
+func (oa *optionsAdapter) GetPoolSize() int {
+ return oa.options.PoolSize
+}
+
+// NewDialer returns a new dialer function for the connection.
+func (oa *optionsAdapter) NewDialer() func(context.Context) (net.Conn, error) {
+ baseDialer := oa.options.NewDialer()
+ return func(ctx context.Context) (net.Conn, error) {
+ // Extract network and address from the options
+ network := oa.options.Network
+ addr := oa.options.Addr
+ return baseDialer(ctx, network, addr)
+ }
+}
+
+// pushProcessorAdapter adapts a push.NotificationProcessor to implement interfaces.NotificationProcessor.
+type pushProcessorAdapter struct {
+ processor push.NotificationProcessor
+}
+
+// RegisterHandler registers a handler for a specific push notification name.
+func (ppa *pushProcessorAdapter) RegisterHandler(pushNotificationName string, handler interface{}, protected bool) error {
+ if pushHandler, ok := handler.(push.NotificationHandler); ok {
+ return ppa.processor.RegisterHandler(pushNotificationName, pushHandler, protected)
+ }
+ return errors.New("handler must implement push.NotificationHandler")
+}
+
+// UnregisterHandler removes a handler for a specific push notification name.
+func (ppa *pushProcessorAdapter) UnregisterHandler(pushNotificationName string) error {
+ return ppa.processor.UnregisterHandler(pushNotificationName)
+}
+
+// GetHandler returns the handler for a specific push notification name.
+func (ppa *pushProcessorAdapter) GetHandler(pushNotificationName string) interface{} {
+ return ppa.processor.GetHandler(pushNotificationName)
+}
diff --git a/async_handoff_integration_test.go b/async_handoff_integration_test.go
new file mode 100644
index 0000000000..29960df5e9
--- /dev/null
+++ b/async_handoff_integration_test.go
@@ -0,0 +1,353 @@
+package redis
+
+import (
+ "context"
+ "net"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/redis/go-redis/v9/maintnotifications"
+ "github.com/redis/go-redis/v9/internal/pool"
+ "github.com/redis/go-redis/v9/logging"
+)
+
+// mockNetConn implements net.Conn for testing
+type mockNetConn struct {
+ addr string
+}
+
+func (m *mockNetConn) Read(b []byte) (n int, err error) { return 0, nil }
+func (m *mockNetConn) Write(b []byte) (n int, err error) { return len(b), nil }
+func (m *mockNetConn) Close() error { return nil }
+func (m *mockNetConn) LocalAddr() net.Addr { return &mockAddr{m.addr} }
+func (m *mockNetConn) RemoteAddr() net.Addr { return &mockAddr{m.addr} }
+func (m *mockNetConn) SetDeadline(t time.Time) error { return nil }
+func (m *mockNetConn) SetReadDeadline(t time.Time) error { return nil }
+func (m *mockNetConn) SetWriteDeadline(t time.Time) error { return nil }
+
+type mockAddr struct {
+ addr string
+}
+
+func (m *mockAddr) Network() string { return "tcp" }
+func (m *mockAddr) String() string { return m.addr }
+
+// TestEventDrivenHandoffIntegration tests the complete event-driven handoff flow
+func TestEventDrivenHandoffIntegration(t *testing.T) {
+ t.Run("EventDrivenHandoffWithPoolSkipping", func(t *testing.T) {
+ // Create a base dialer for testing
+ baseDialer := func(ctx context.Context, network, addr string) (net.Conn, error) {
+ return &mockNetConn{addr: addr}, nil
+ }
+
+ // Create processor with event-driven handoff support
+ processor := maintnotifications.NewPoolHook(baseDialer, "tcp", nil, nil)
+ defer processor.Shutdown(context.Background())
+
+ // Create a test pool with hooks
+ hookManager := pool.NewPoolHookManager()
+ hookManager.AddHook(processor)
+
+ testPool := pool.NewConnPool(&pool.Options{
+ Dialer: func(ctx context.Context) (net.Conn, error) {
+ return &mockNetConn{addr: "original:6379"}, nil
+ },
+ PoolSize: int32(5),
+ PoolTimeout: time.Second,
+ })
+
+ // Add the hook to the pool after creation
+ testPool.AddPoolHook(processor)
+ defer testPool.Close()
+
+ // Set the pool reference in the processor for connection removal on handoff failure
+ processor.SetPool(testPool)
+
+ ctx := context.Background()
+
+ // Get a connection and mark it for handoff
+ conn, err := testPool.Get(ctx)
+ if err != nil {
+ t.Fatalf("Failed to get connection: %v", err)
+ }
+
+ // Set initialization function with a small delay to ensure handoff is pending
+ initConnCalled := false
+ initConnFunc := func(ctx context.Context, cn *pool.Conn) error {
+ time.Sleep(50 * time.Millisecond) // Add delay to keep handoff pending
+ initConnCalled = true
+ return nil
+ }
+ conn.SetInitConnFunc(initConnFunc)
+
+ // Mark connection for handoff
+ err = conn.MarkForHandoff("new-endpoint:6379", 12345)
+ if err != nil {
+ t.Fatalf("Failed to mark connection for handoff: %v", err)
+ }
+
+ // Return connection to pool - this should queue handoff
+ testPool.Put(ctx, conn)
+
+ // Give the on-demand worker a moment to start processing
+ time.Sleep(10 * time.Millisecond)
+
+ // Verify handoff was queued
+ if !processor.IsHandoffPending(conn) {
+ t.Error("Handoff should be queued in pending map")
+ }
+
+ // Try to get the same connection - should be skipped due to pending handoff
+ conn2, err := testPool.Get(ctx)
+ if err != nil {
+ t.Fatalf("Failed to get second connection: %v", err)
+ }
+
+ // Should get a different connection (the pending one should be skipped)
+ if conn == conn2 {
+ t.Error("Should have gotten a different connection while handoff is pending")
+ }
+
+ // Return the second connection
+ testPool.Put(ctx, conn2)
+
+ // Wait for handoff to complete
+ time.Sleep(200 * time.Millisecond)
+
+ // Verify handoff completed (removed from pending map)
+ if processor.IsHandoffPending(conn) {
+ t.Error("Handoff should have completed and been removed from pending map")
+ }
+
+ if !initConnCalled {
+ t.Error("InitConn should have been called during handoff")
+ }
+
+ // Now the original connection should be available again
+ conn3, err := testPool.Get(ctx)
+ if err != nil {
+ t.Fatalf("Failed to get third connection: %v", err)
+ }
+
+ // Could be the original connection (now handed off) or a new one
+ testPool.Put(ctx, conn3)
+ })
+
+ t.Run("ConcurrentHandoffs", func(t *testing.T) {
+ // Create a base dialer that simulates slow handoffs
+ baseDialer := func(ctx context.Context, network, addr string) (net.Conn, error) {
+ time.Sleep(50 * time.Millisecond) // Simulate network delay
+ return &mockNetConn{addr: addr}, nil
+ }
+
+ processor := maintnotifications.NewPoolHook(baseDialer, "tcp", nil, nil)
+ defer processor.Shutdown(context.Background())
+
+ // Create hooks manager and add processor as hook
+ hookManager := pool.NewPoolHookManager()
+ hookManager.AddHook(processor)
+
+ testPool := pool.NewConnPool(&pool.Options{
+ Dialer: func(ctx context.Context) (net.Conn, error) {
+ return &mockNetConn{addr: "original:6379"}, nil
+ },
+
+ PoolSize: int32(10),
+ PoolTimeout: time.Second,
+ })
+ defer testPool.Close()
+
+ // Add the hook to the pool after creation
+ testPool.AddPoolHook(processor)
+
+ // Set the pool reference in the processor
+ processor.SetPool(testPool)
+
+ ctx := context.Background()
+ var wg sync.WaitGroup
+
+ // Start multiple concurrent handoffs
+ for i := 0; i < 5; i++ {
+ wg.Add(1)
+ go func(id int) {
+ defer wg.Done()
+
+ // Get connection
+ conn, err := testPool.Get(ctx)
+ if err != nil {
+ t.Errorf("Failed to get conn[%d]: %v", id, err)
+ return
+ }
+
+ // Set initialization function
+ initConnFunc := func(ctx context.Context, cn *pool.Conn) error {
+ return nil
+ }
+ conn.SetInitConnFunc(initConnFunc)
+
+ // Mark for handoff
+ conn.MarkForHandoff("new-endpoint:6379", int64(id))
+
+ // Return to pool (starts async handoff)
+ testPool.Put(ctx, conn)
+ }(i)
+ }
+
+ wg.Wait()
+
+ // Wait for all handoffs to complete
+ time.Sleep(300 * time.Millisecond)
+
+ // Verify pool is still functional
+ conn, err := testPool.Get(ctx)
+ if err != nil {
+ t.Fatalf("Pool should still be functional after concurrent handoffs: %v", err)
+ }
+ testPool.Put(ctx, conn)
+ })
+
+ t.Run("HandoffFailureRecovery", func(t *testing.T) {
+ // Create a failing base dialer
+ failingDialer := func(ctx context.Context, network, addr string) (net.Conn, error) {
+ return nil, &net.OpError{Op: "dial", Err: &net.DNSError{Name: addr}}
+ }
+
+ processor := maintnotifications.NewPoolHook(failingDialer, "tcp", nil, nil)
+ defer processor.Shutdown(context.Background())
+
+ // Create hooks manager and add processor as hook
+ hookManager := pool.NewPoolHookManager()
+ hookManager.AddHook(processor)
+
+ testPool := pool.NewConnPool(&pool.Options{
+ Dialer: func(ctx context.Context) (net.Conn, error) {
+ return &mockNetConn{addr: "original:6379"}, nil
+ },
+
+ PoolSize: int32(3),
+ PoolTimeout: time.Second,
+ })
+ defer testPool.Close()
+
+ // Add the hook to the pool after creation
+ testPool.AddPoolHook(processor)
+
+ // Set the pool reference in the processor
+ processor.SetPool(testPool)
+
+ ctx := context.Background()
+
+ // Get connection and mark for handoff
+ conn, err := testPool.Get(ctx)
+ if err != nil {
+ t.Fatalf("Failed to get connection: %v", err)
+ }
+
+ conn.MarkForHandoff("unreachable-endpoint:6379", 12345)
+
+ // Return to pool (starts async handoff that will fail)
+ testPool.Put(ctx, conn)
+
+ // Wait for handoff to fail
+ time.Sleep(200 * time.Millisecond)
+
+ // Connection should be removed from pending map after failed handoff
+ if processor.IsHandoffPending(conn) {
+ t.Error("Connection should be removed from pending map after failed handoff")
+ }
+
+ // Pool should still be functional
+ conn2, err := testPool.Get(ctx)
+ if err != nil {
+ t.Fatalf("Pool should still be functional: %v", err)
+ }
+
+ // In event-driven approach, the original connection remains in pool
+ // even after failed handoff (it's still a valid connection)
+ // We might get the same connection or a different one
+ testPool.Put(ctx, conn2)
+ })
+
+ t.Run("GracefulShutdown", func(t *testing.T) {
+ // Create a slow base dialer
+ slowDialer := func(ctx context.Context, network, addr string) (net.Conn, error) {
+ time.Sleep(100 * time.Millisecond)
+ return &mockNetConn{addr: addr}, nil
+ }
+
+ processor := maintnotifications.NewPoolHook(slowDialer, "tcp", nil, nil)
+ defer processor.Shutdown(context.Background())
+
+ // Create hooks manager and add processor as hook
+ hookManager := pool.NewPoolHookManager()
+ hookManager.AddHook(processor)
+
+ testPool := pool.NewConnPool(&pool.Options{
+ Dialer: func(ctx context.Context) (net.Conn, error) {
+ return &mockNetConn{addr: "original:6379"}, nil
+ },
+
+ PoolSize: int32(2),
+ PoolTimeout: time.Second,
+ })
+ defer testPool.Close()
+
+ // Add the hook to the pool after creation
+ testPool.AddPoolHook(processor)
+
+ // Set the pool reference in the processor
+ processor.SetPool(testPool)
+
+ ctx := context.Background()
+
+ // Start a handoff
+ conn, err := testPool.Get(ctx)
+ if err != nil {
+ t.Fatalf("Failed to get connection: %v", err)
+ }
+
+ if err := conn.MarkForHandoff("new-endpoint:6379", 12345); err != nil {
+ t.Fatalf("Failed to mark connection for handoff: %v", err)
+ }
+
+ // Set a mock initialization function with delay to ensure handoff is pending
+ conn.SetInitConnFunc(func(ctx context.Context, cn *pool.Conn) error {
+ time.Sleep(50 * time.Millisecond) // Add delay to keep handoff pending
+ return nil
+ })
+
+ testPool.Put(ctx, conn)
+
+ // Give the on-demand worker a moment to start and begin processing
+ // The handoff should be pending because the slowDialer takes 100ms
+ time.Sleep(10 * time.Millisecond)
+
+ // Verify handoff was queued and is being processed
+ if !processor.IsHandoffPending(conn) {
+ t.Error("Handoff should be queued in pending map")
+ }
+
+ // Give the handoff a moment to start processing
+ time.Sleep(50 * time.Millisecond)
+
+ // Shutdown processor gracefully
+ // Use a longer timeout to account for slow dialer (100ms) plus processing overhead
+ shutdownCtx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
+ defer cancel()
+
+ err = processor.Shutdown(shutdownCtx)
+ if err != nil {
+ t.Errorf("Graceful shutdown should succeed: %v", err)
+ }
+
+ // Handoff should have completed (removed from pending map)
+ if processor.IsHandoffPending(conn) {
+ t.Error("Handoff should have completed and been removed from pending map after shutdown")
+ }
+ })
+}
+
+func init() {
+ logging.Disable()
+}
diff --git a/auth/auth.go b/auth/auth.go
new file mode 100644
index 0000000000..1f5c802248
--- /dev/null
+++ b/auth/auth.go
@@ -0,0 +1,61 @@
+// Package auth package provides authentication-related interfaces and types.
+// It also includes a basic implementation of credentials using username and password.
+package auth
+
+// StreamingCredentialsProvider is an interface that defines the methods for a streaming credentials provider.
+// It is used to provide credentials for authentication.
+// The CredentialsListener is used to receive updates when the credentials change.
+type StreamingCredentialsProvider interface {
+ // Subscribe subscribes to the credentials provider for updates.
+ // It returns the current credentials, a cancel function to unsubscribe from the provider,
+ // and an error if any.
+ // TODO(ndyakov): Should we add context to the Subscribe method?
+ Subscribe(listener CredentialsListener) (Credentials, UnsubscribeFunc, error)
+}
+
+// UnsubscribeFunc is a function that is used to cancel the subscription to the credentials provider.
+// It is used to unsubscribe from the provider when the credentials are no longer needed.
+type UnsubscribeFunc func() error
+
+// CredentialsListener is an interface that defines the methods for a credentials listener.
+// It is used to receive updates when the credentials change.
+// The OnNext method is called when the credentials change.
+// The OnError method is called when an error occurs while requesting the credentials.
+type CredentialsListener interface {
+ OnNext(credentials Credentials)
+ OnError(err error)
+}
+
+// Credentials is an interface that defines the methods for credentials.
+// It is used to provide the credentials for authentication.
+type Credentials interface {
+ // BasicAuth returns the username and password for basic authentication.
+ BasicAuth() (username string, password string)
+ // RawCredentials returns the raw credentials as a string.
+ // This can be used to extract the username and password from the raw credentials or
+ // additional information if present in the token.
+ RawCredentials() string
+}
+
+type basicAuth struct {
+ username string
+ password string
+}
+
+// RawCredentials returns the raw credentials as a string.
+func (b *basicAuth) RawCredentials() string {
+ return b.username + ":" + b.password
+}
+
+// BasicAuth returns the username and password for basic authentication.
+func (b *basicAuth) BasicAuth() (username string, password string) {
+ return b.username, b.password
+}
+
+// NewBasicCredentials creates a new Credentials object from the given username and password.
+func NewBasicCredentials(username, password string) Credentials {
+ return &basicAuth{
+ username: username,
+ password: password,
+ }
+}
diff --git a/auth/auth_test.go b/auth/auth_test.go
new file mode 100644
index 0000000000..73984e331b
--- /dev/null
+++ b/auth/auth_test.go
@@ -0,0 +1,363 @@
+package auth
+
+import (
+ "errors"
+ "strings"
+ "sync"
+ "testing"
+ "time"
+)
+
+type mockStreamingProvider struct {
+ credentials Credentials
+ err error
+ updates chan Credentials
+}
+
+func newMockStreamingProvider(initialCreds Credentials) *mockStreamingProvider {
+ return &mockStreamingProvider{
+ credentials: initialCreds,
+ updates: make(chan Credentials, 10),
+ }
+}
+
+func (m *mockStreamingProvider) Subscribe(listener CredentialsListener) (Credentials, UnsubscribeFunc, error) {
+ if m.err != nil {
+ return nil, nil, m.err
+ }
+
+ // Send initial credentials
+ listener.OnNext(m.credentials)
+
+ // Start goroutine to handle updates
+ go func() {
+ for creds := range m.updates {
+ listener.OnNext(creds)
+ }
+ }()
+
+ return m.credentials, func() error {
+ close(m.updates)
+ return nil
+ }, nil
+}
+
+func TestStreamingCredentialsProvider(t *testing.T) {
+ t.Run("successful subscription", func(t *testing.T) {
+ initialCreds := NewBasicCredentials("user1", "pass1")
+ provider := newMockStreamingProvider(initialCreds)
+
+ var receivedCreds []Credentials
+ var receivedErrors []error
+ var mu sync.Mutex
+
+ listener := NewReAuthCredentialsListener(
+ func(creds Credentials) error {
+ mu.Lock()
+ receivedCreds = append(receivedCreds, creds)
+ mu.Unlock()
+ return nil
+ },
+ func(err error) {
+ receivedErrors = append(receivedErrors, err)
+ },
+ )
+
+ creds, cancel, err := provider.Subscribe(listener)
+ if err != nil {
+ t.Fatalf("unexpected error: %v", err)
+ }
+ if cancel == nil {
+ t.Fatal("expected cancel function to be non-nil")
+ }
+ if creds != initialCreds {
+ t.Fatalf("expected credentials %v, got %v", initialCreds, creds)
+ }
+ if len(receivedCreds) != 1 {
+ t.Fatalf("expected 1 received credential, got %d", len(receivedCreds))
+ }
+ if receivedCreds[0] != initialCreds {
+ t.Fatalf("expected received credential %v, got %v", initialCreds, receivedCreds[0])
+ }
+ if len(receivedErrors) != 0 {
+ t.Fatalf("expected no errors, got %d", len(receivedErrors))
+ }
+
+ // Send an update
+ newCreds := NewBasicCredentials("user2", "pass2")
+ provider.updates <- newCreds
+
+ // Wait for update to be processed
+ time.Sleep(100 * time.Millisecond)
+ mu.Lock()
+ if len(receivedCreds) != 2 {
+ t.Fatalf("expected 2 received credentials, got %d", len(receivedCreds))
+ }
+ if receivedCreds[1] != newCreds {
+ t.Fatalf("expected received credential %v, got %v", newCreds, receivedCreds[1])
+ }
+ mu.Unlock()
+
+ // Cancel subscription
+ if err := cancel(); err != nil {
+ t.Fatalf("unexpected error cancelling subscription: %v", err)
+ }
+ })
+
+ t.Run("subscription error", func(t *testing.T) {
+ provider := &mockStreamingProvider{
+ err: errors.New("subscription failed"),
+ }
+
+ var receivedCreds []Credentials
+ var receivedErrors []error
+
+ listener := NewReAuthCredentialsListener(
+ func(creds Credentials) error {
+ receivedCreds = append(receivedCreds, creds)
+ return nil
+ },
+ func(err error) {
+ receivedErrors = append(receivedErrors, err)
+ },
+ )
+
+ creds, cancel, err := provider.Subscribe(listener)
+ if err == nil {
+ t.Fatal("expected error, got nil")
+ }
+ if cancel != nil {
+ t.Fatal("expected cancel function to be nil")
+ }
+ if creds != nil {
+ t.Fatalf("expected nil credentials, got %v", creds)
+ }
+ if len(receivedCreds) != 0 {
+ t.Fatalf("expected no received credentials, got %d", len(receivedCreds))
+ }
+ if len(receivedErrors) != 0 {
+ t.Fatalf("expected no errors, got %d", len(receivedErrors))
+ }
+ })
+
+ t.Run("re-auth error", func(t *testing.T) {
+ initialCreds := NewBasicCredentials("user1", "pass1")
+ provider := newMockStreamingProvider(initialCreds)
+
+ reauthErr := errors.New("re-auth failed")
+ var receivedErrors []error
+
+ listener := NewReAuthCredentialsListener(
+ func(creds Credentials) error {
+ return reauthErr
+ },
+ func(err error) {
+ receivedErrors = append(receivedErrors, err)
+ },
+ )
+
+ creds, cancel, err := provider.Subscribe(listener)
+ if err != nil {
+ t.Fatalf("unexpected error: %v", err)
+ }
+ if cancel == nil {
+ t.Fatal("expected cancel function to be non-nil")
+ }
+ if creds != initialCreds {
+ t.Fatalf("expected credentials %v, got %v", initialCreds, creds)
+ }
+ if len(receivedErrors) != 1 {
+ t.Fatalf("expected 1 error, got %d", len(receivedErrors))
+ }
+ if receivedErrors[0] != reauthErr {
+ t.Fatalf("expected error %v, got %v", reauthErr, receivedErrors[0])
+ }
+
+ if err := cancel(); err != nil {
+ t.Fatalf("unexpected error cancelling subscription: %v", err)
+ }
+ })
+}
+
+func TestBasicCredentials(t *testing.T) {
+ tests := []struct {
+ name string
+ username string
+ password string
+ expectedUser string
+ expectedPass string
+ expectedRaw string
+ }{
+ {
+ name: "basic auth",
+ username: "user1",
+ password: "pass1",
+ expectedUser: "user1",
+ expectedPass: "pass1",
+ expectedRaw: "user1:pass1",
+ },
+ {
+ name: "empty username",
+ username: "",
+ password: "pass1",
+ expectedUser: "",
+ expectedPass: "pass1",
+ expectedRaw: ":pass1",
+ },
+ {
+ name: "empty password",
+ username: "user1",
+ password: "",
+ expectedUser: "user1",
+ expectedPass: "",
+ expectedRaw: "user1:",
+ },
+ {
+ name: "both username and password empty",
+ username: "",
+ password: "",
+ expectedUser: "",
+ expectedPass: "",
+ expectedRaw: ":",
+ },
+ {
+ name: "special characters",
+ username: "user:1",
+ password: "pa:ss@!#",
+ expectedUser: "user:1",
+ expectedPass: "pa:ss@!#",
+ expectedRaw: "user:1:pa:ss@!#",
+ },
+ {
+ name: "unicode characters",
+ username: "ใฆใผใถใผ",
+ password: "ๅฏ็ขผ123",
+ expectedUser: "ใฆใผใถใผ",
+ expectedPass: "ๅฏ็ขผ123",
+ expectedRaw: "ใฆใผใถใผ:ๅฏ็ขผ123",
+ },
+ {
+ name: "long credentials",
+ username: strings.Repeat("u", 1000),
+ password: strings.Repeat("p", 1000),
+ expectedUser: strings.Repeat("u", 1000),
+ expectedPass: strings.Repeat("p", 1000),
+ expectedRaw: strings.Repeat("u", 1000) + ":" + strings.Repeat("p", 1000),
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ creds := NewBasicCredentials(tt.username, tt.password)
+
+ user, pass := creds.BasicAuth()
+ if user != tt.expectedUser {
+ t.Errorf("BasicAuth() username = %q; want %q", user, tt.expectedUser)
+ }
+ if pass != tt.expectedPass {
+ t.Errorf("BasicAuth() password = %q; want %q", pass, tt.expectedPass)
+ }
+
+ raw := creds.RawCredentials()
+ if raw != tt.expectedRaw {
+ t.Errorf("RawCredentials() = %q; want %q", raw, tt.expectedRaw)
+ }
+ })
+ }
+}
+
+func TestReAuthCredentialsListener(t *testing.T) {
+ t.Run("successful re-auth", func(t *testing.T) {
+ var reAuthCalled bool
+ var onErrCalled bool
+ var receivedCreds Credentials
+
+ listener := NewReAuthCredentialsListener(
+ func(creds Credentials) error {
+ reAuthCalled = true
+ receivedCreds = creds
+ return nil
+ },
+ func(err error) {
+ onErrCalled = true
+ },
+ )
+
+ creds := NewBasicCredentials("user1", "pass1")
+ listener.OnNext(creds)
+
+ if !reAuthCalled {
+ t.Fatal("expected reAuth to be called")
+ }
+ if onErrCalled {
+ t.Fatal("expected onErr not to be called")
+ }
+ if receivedCreds != creds {
+ t.Fatalf("expected credentials %v, got %v", creds, receivedCreds)
+ }
+ })
+
+ t.Run("re-auth error", func(t *testing.T) {
+ var reAuthCalled bool
+ var onErrCalled bool
+ var receivedErr error
+ expectedErr := errors.New("re-auth failed")
+
+ listener := NewReAuthCredentialsListener(
+ func(creds Credentials) error {
+ reAuthCalled = true
+ return expectedErr
+ },
+ func(err error) {
+ onErrCalled = true
+ receivedErr = err
+ },
+ )
+
+ creds := NewBasicCredentials("user1", "pass1")
+ listener.OnNext(creds)
+
+ if !reAuthCalled {
+ t.Fatal("expected reAuth to be called")
+ }
+ if !onErrCalled {
+ t.Fatal("expected onErr to be called")
+ }
+ if receivedErr != expectedErr {
+ t.Fatalf("expected error %v, got %v", expectedErr, receivedErr)
+ }
+ })
+
+ t.Run("on error", func(t *testing.T) {
+ var onErrCalled bool
+ var receivedErr error
+ expectedErr := errors.New("provider error")
+
+ listener := NewReAuthCredentialsListener(
+ func(creds Credentials) error {
+ return nil
+ },
+ func(err error) {
+ onErrCalled = true
+ receivedErr = err
+ },
+ )
+
+ listener.OnError(expectedErr)
+
+ if !onErrCalled {
+ t.Fatal("expected onErr to be called")
+ }
+ if receivedErr != expectedErr {
+ t.Fatalf("expected error %v, got %v", expectedErr, receivedErr)
+ }
+ })
+
+ t.Run("nil callbacks", func(t *testing.T) {
+ listener := NewReAuthCredentialsListener(nil, nil)
+
+ // Should not panic
+ listener.OnNext(NewBasicCredentials("user1", "pass1"))
+ listener.OnError(errors.New("test error"))
+ })
+}
diff --git a/auth/reauth_credentials_listener.go b/auth/reauth_credentials_listener.go
new file mode 100644
index 0000000000..40076a0b13
--- /dev/null
+++ b/auth/reauth_credentials_listener.go
@@ -0,0 +1,47 @@
+package auth
+
+// ReAuthCredentialsListener is a struct that implements the CredentialsListener interface.
+// It is used to re-authenticate the credentials when they are updated.
+// It contains:
+// - reAuth: a function that takes the new credentials and returns an error if any.
+// - onErr: a function that takes an error and handles it.
+type ReAuthCredentialsListener struct {
+ reAuth func(credentials Credentials) error
+ onErr func(err error)
+}
+
+// OnNext is called when the credentials are updated.
+// It calls the reAuth function with the new credentials.
+// If the reAuth function returns an error, it calls the onErr function with the error.
+func (c *ReAuthCredentialsListener) OnNext(credentials Credentials) {
+ if c.reAuth == nil {
+ return
+ }
+
+ err := c.reAuth(credentials)
+ if err != nil {
+ c.OnError(err)
+ }
+}
+
+// OnError is called when an error occurs.
+// It can be called from both the credentials provider and the reAuth function.
+func (c *ReAuthCredentialsListener) OnError(err error) {
+ if c.onErr == nil {
+ return
+ }
+
+ c.onErr(err)
+}
+
+// NewReAuthCredentialsListener creates a new ReAuthCredentialsListener.
+// Implements the auth.CredentialsListener interface.
+func NewReAuthCredentialsListener(reAuth func(credentials Credentials) error, onErr func(err error)) *ReAuthCredentialsListener {
+ return &ReAuthCredentialsListener{
+ reAuth: reAuth,
+ onErr: onErr,
+ }
+}
+
+// Ensure ReAuthCredentialsListener implements the CredentialsListener interface.
+var _ CredentialsListener = (*ReAuthCredentialsListener)(nil)
diff --git a/bench_test.go b/bench_test.go
index f6b75c72a2..263216c17b 100644
--- a/bench_test.go
+++ b/bench_test.go
@@ -2,13 +2,18 @@ package redis_test
import (
"bytes"
+ "context"
+ "fmt"
+ "strconv"
+ "strings"
+ "sync"
"testing"
"time"
- "github.com/go-redis/redis"
+ "github.com/redis/go-redis/v9"
)
-func benchmarkRedisClient(poolSize int) *redis.Client {
+func benchmarkRedisClient(ctx context.Context, poolSize int) *redis.Client {
client := redis.NewClient(&redis.Options{
Addr: ":6379",
DialTimeout: time.Second,
@@ -16,110 +21,116 @@ func benchmarkRedisClient(poolSize int) *redis.Client {
WriteTimeout: time.Second,
PoolSize: poolSize,
})
- if err := client.FlushDB().Err(); err != nil {
+ if err := client.FlushDB(ctx).Err(); err != nil {
panic(err)
}
return client
}
func BenchmarkRedisPing(b *testing.B) {
- client := benchmarkRedisClient(10)
- defer client.Close()
+ ctx := context.Background()
+ rdb := benchmarkRedisClient(ctx, 10)
+ defer rdb.Close()
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
- if err := client.Ping().Err(); err != nil {
+ if err := rdb.Ping(ctx).Err(); err != nil {
b.Fatal(err)
}
}
})
}
-func BenchmarkRedisSetString(b *testing.B) {
- client := benchmarkRedisClient(10)
- defer client.Close()
+func BenchmarkSetGoroutines(b *testing.B) {
+ ctx := context.Background()
+ rdb := benchmarkRedisClient(ctx, 10)
+ defer rdb.Close()
- value := string(bytes.Repeat([]byte{'1'}, 10000))
+ for i := 0; i < b.N; i++ {
+ var wg sync.WaitGroup
- b.ResetTimer()
+ for i := 0; i < 1000; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
- b.RunParallel(func(pb *testing.PB) {
- for pb.Next() {
- if err := client.Set("key", value, 0).Err(); err != nil {
- b.Fatal(err)
- }
+ err := rdb.Set(ctx, "hello", "world", 0).Err()
+ if err != nil {
+ panic(err)
+ }
+ }()
}
- })
-}
-func BenchmarkRedisGetNil(b *testing.B) {
- client := benchmarkRedisClient(10)
- defer client.Close()
-
- b.ResetTimer()
-
- b.RunParallel(func(pb *testing.PB) {
- for pb.Next() {
- if err := client.Get("key").Err(); err != redis.Nil {
- b.Fatal(err)
- }
- }
- })
+ wg.Wait()
+ }
}
-func benchmarkSetRedis(b *testing.B, poolSize, payloadSize int) {
- client := benchmarkRedisClient(poolSize)
+func BenchmarkRedisGetNil(b *testing.B) {
+ ctx := context.Background()
+ client := benchmarkRedisClient(ctx, 10)
defer client.Close()
- value := string(bytes.Repeat([]byte{'1'}, payloadSize))
-
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
- if err := client.Set("key", value, 0).Err(); err != nil {
+ if err := client.Get(ctx, "key").Err(); err != redis.Nil {
b.Fatal(err)
}
}
})
}
-func BenchmarkSetRedis10Conns64Bytes(b *testing.B) {
- benchmarkSetRedis(b, 10, 64)
-}
-
-func BenchmarkSetRedis100Conns64Bytes(b *testing.B) {
- benchmarkSetRedis(b, 100, 64)
-}
-
-func BenchmarkSetRedis10Conns1KB(b *testing.B) {
- benchmarkSetRedis(b, 10, 1024)
+type setStringBenchmark struct {
+ poolSize int
+ valueSize int
}
-func BenchmarkSetRedis100Conns1KB(b *testing.B) {
- benchmarkSetRedis(b, 100, 1024)
+func (bm setStringBenchmark) String() string {
+ return fmt.Sprintf("pool=%d value=%d", bm.poolSize, bm.valueSize)
}
-func BenchmarkSetRedis10Conns10KB(b *testing.B) {
- benchmarkSetRedis(b, 10, 10*1024)
-}
-
-func BenchmarkSetRedis100Conns10KB(b *testing.B) {
- benchmarkSetRedis(b, 100, 10*1024)
-}
-
-func BenchmarkSetRedis10Conns1MB(b *testing.B) {
- benchmarkSetRedis(b, 10, 1024*1024)
-}
-
-func BenchmarkSetRedis100Conns1MB(b *testing.B) {
- benchmarkSetRedis(b, 100, 1024*1024)
+func BenchmarkRedisSetString(b *testing.B) {
+ benchmarks := []setStringBenchmark{
+ {10, 64},
+ {10, 1024},
+ {10, 64 * 1024},
+ {10, 1024 * 1024},
+ {10, 10 * 1024 * 1024},
+
+ {100, 64},
+ {100, 1024},
+ {100, 64 * 1024},
+ {100, 1024 * 1024},
+ {100, 10 * 1024 * 1024},
+ }
+ for _, bm := range benchmarks {
+ b.Run(bm.String(), func(b *testing.B) {
+ ctx := context.Background()
+ client := benchmarkRedisClient(ctx, bm.poolSize)
+ defer client.Close()
+
+ value := strings.Repeat("1", bm.valueSize)
+
+ b.ResetTimer()
+
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ err := client.Set(ctx, "key", value, 0).Err()
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+ })
+ })
+ }
}
func BenchmarkRedisSetGetBytes(b *testing.B) {
- client := benchmarkRedisClient(10)
+ ctx := context.Background()
+ client := benchmarkRedisClient(ctx, 10)
defer client.Close()
value := bytes.Repeat([]byte{'1'}, 10000)
@@ -128,11 +139,11 @@ func BenchmarkRedisSetGetBytes(b *testing.B) {
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
- if err := client.Set("key", value, 0).Err(); err != nil {
+ if err := client.Set(ctx, "key", value, 0).Err(); err != nil {
b.Fatal(err)
}
- got, err := client.Get("key").Bytes()
+ got, err := client.Get(ctx, "key").Bytes()
if err != nil {
b.Fatal(err)
}
@@ -144,10 +155,11 @@ func BenchmarkRedisSetGetBytes(b *testing.B) {
}
func BenchmarkRedisMGet(b *testing.B) {
- client := benchmarkRedisClient(10)
+ ctx := context.Background()
+ client := benchmarkRedisClient(ctx, 10)
defer client.Close()
- if err := client.MSet("key1", "hello1", "key2", "hello2").Err(); err != nil {
+ if err := client.MSet(ctx, "key1", "hello1", "key2", "hello2").Err(); err != nil {
b.Fatal(err)
}
@@ -155,7 +167,7 @@ func BenchmarkRedisMGet(b *testing.B) {
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
- if err := client.MGet("key1", "key2").Err(); err != nil {
+ if err := client.MGet(ctx, "key1", "key2").Err(); err != nil {
b.Fatal(err)
}
}
@@ -163,17 +175,18 @@ func BenchmarkRedisMGet(b *testing.B) {
}
func BenchmarkSetExpire(b *testing.B) {
- client := benchmarkRedisClient(10)
+ ctx := context.Background()
+ client := benchmarkRedisClient(ctx, 10)
defer client.Close()
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
- if err := client.Set("key", "hello", 0).Err(); err != nil {
+ if err := client.Set(ctx, "key", "hello", 0).Err(); err != nil {
b.Fatal(err)
}
- if err := client.Expire("key", time.Second).Err(); err != nil {
+ if err := client.Expire(ctx, "key", time.Second).Err(); err != nil {
b.Fatal(err)
}
}
@@ -181,16 +194,17 @@ func BenchmarkSetExpire(b *testing.B) {
}
func BenchmarkPipeline(b *testing.B) {
- client := benchmarkRedisClient(10)
+ ctx := context.Background()
+ client := benchmarkRedisClient(ctx, 10)
defer client.Close()
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
- _, err := client.Pipelined(func(pipe redis.Pipeliner) error {
- pipe.Set("key", "hello", 0)
- pipe.Expire("key", time.Second)
+ _, err := client.Pipelined(ctx, func(pipe redis.Pipeliner) error {
+ pipe.Set(ctx, "key", "hello", 0)
+ pipe.Expire(ctx, "key", time.Second)
return nil
})
if err != nil {
@@ -201,16 +215,223 @@ func BenchmarkPipeline(b *testing.B) {
}
func BenchmarkZAdd(b *testing.B) {
- client := benchmarkRedisClient(10)
+ ctx := context.Background()
+ client := benchmarkRedisClient(ctx, 10)
defer client.Close()
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
- if err := client.ZAdd("key", redis.Z{float64(1), "hello"}).Err(); err != nil {
+ err := client.ZAdd(ctx, "key", redis.Z{
+ Score: float64(1),
+ Member: "hello",
+ }).Err()
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+ })
+}
+
+func BenchmarkXRead(b *testing.B) {
+ ctx := context.Background()
+ client := benchmarkRedisClient(ctx, 10)
+ defer client.Close()
+
+ args := redis.XAddArgs{
+ Stream: "1",
+ ID: "*",
+ Values: map[string]string{"uno": "dos"},
+ }
+
+ lenStreams := 16
+ streams := make([]string, 0, lenStreams)
+ for i := 0; i < lenStreams; i++ {
+ streams = append(streams, strconv.Itoa(i))
+ }
+ for i := 0; i < lenStreams; i++ {
+ streams = append(streams, "0")
+ }
+
+ b.ReportAllocs()
+ b.ResetTimer()
+
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ client.XAdd(ctx, &args)
+
+ err := client.XRead(ctx, &redis.XReadArgs{
+ Streams: streams,
+ Count: 1,
+ Block: time.Second,
+ }).Err()
+ if err != nil {
b.Fatal(err)
}
}
})
}
+
+//------------------------------------------------------------------------------
+
+func newClusterScenario() *clusterScenario {
+ return &clusterScenario{
+ ports: []string{"16600", "16601", "16602", "16603", "16604", "16605"},
+ nodeIDs: make([]string, 6),
+ clients: make(map[string]*redis.Client, 6),
+ }
+}
+
+var clusterBench *clusterScenario
+
+func BenchmarkClusterPing(b *testing.B) {
+ if testing.Short() {
+ b.Skip("skipping in short mode")
+ }
+
+ ctx := context.Background()
+ if clusterBench == nil {
+ clusterBench = newClusterScenario()
+ if err := configureClusterTopology(ctx, clusterBench); err != nil {
+ b.Fatal(err)
+ }
+ }
+
+ client := clusterBench.newClusterClient(ctx, redisClusterOptions())
+ defer client.Close()
+
+ b.Run("cluster ping", func(b *testing.B) {
+ b.ResetTimer()
+
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ err := client.Ping(ctx).Err()
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+ })
+ })
+}
+
+func BenchmarkClusterDoInt(b *testing.B) {
+ if testing.Short() {
+ b.Skip("skipping in short mode")
+ }
+
+ ctx := context.Background()
+ if clusterBench == nil {
+ clusterBench = newClusterScenario()
+ if err := configureClusterTopology(ctx, clusterBench); err != nil {
+ b.Fatal(err)
+ }
+ }
+
+ client := clusterBench.newClusterClient(ctx, redisClusterOptions())
+ defer client.Close()
+
+ b.Run("cluster do set int", func(b *testing.B) {
+ b.ResetTimer()
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ err := client.Do(ctx, "SET", 10, 10).Err()
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+ })
+ })
+}
+
+func BenchmarkClusterSetString(b *testing.B) {
+ if testing.Short() {
+ b.Skip("skipping in short mode")
+ }
+
+ ctx := context.Background()
+ if clusterBench == nil {
+ clusterBench = newClusterScenario()
+ if err := configureClusterTopology(ctx, clusterBench); err != nil {
+ b.Fatal(err)
+ }
+ }
+
+ client := clusterBench.newClusterClient(ctx, redisClusterOptions())
+ defer client.Close()
+
+ value := string(bytes.Repeat([]byte{'1'}, 10000))
+
+ b.Run("cluster set string", func(b *testing.B) {
+ b.ResetTimer()
+
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ err := client.Set(ctx, "key", value, 0).Err()
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+ })
+ })
+}
+
+func BenchmarkExecRingSetAddrsCmd(b *testing.B) {
+ const (
+ ringShard1Name = "ringShardOne"
+ ringShard2Name = "ringShardTwo"
+ )
+
+ ring := redis.NewRing(&redis.RingOptions{
+ Addrs: map[string]string{
+ "ringShardOne": ":" + ringShard1Port,
+ },
+ NewClient: func(opt *redis.Options) *redis.Client {
+ // Simulate slow shard creation
+ time.Sleep(100 * time.Millisecond)
+ return redis.NewClient(opt)
+ },
+ })
+ defer ring.Close()
+
+ if _, err := ring.Ping(context.Background()).Result(); err != nil {
+ b.Fatal(err)
+ }
+
+ // Continuously update addresses by adding and removing one address
+ updatesDone := make(chan struct{})
+ defer func() { close(updatesDone) }()
+ go func() {
+ ticker := time.NewTicker(10 * time.Millisecond)
+ defer ticker.Stop()
+ for i := 0; ; i++ {
+ select {
+ case <-ticker.C:
+ if i%2 == 0 {
+ ring.SetAddrs(map[string]string{
+ ringShard1Name: ":" + ringShard1Port,
+ })
+ } else {
+ ring.SetAddrs(map[string]string{
+ ringShard1Name: ":" + ringShard1Port,
+ ringShard2Name: ":" + ringShard2Port,
+ })
+ }
+ case <-updatesDone:
+ return
+ }
+ }
+ }()
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ if _, err := ring.Ping(context.Background()).Result(); err != nil {
+ if err == redis.ErrClosed {
+ // The shard client could be closed while ping command is in progress
+ continue
+ } else {
+ b.Fatal(err)
+ }
+ }
+ }
+}
diff --git a/bitmap_commands.go b/bitmap_commands.go
new file mode 100644
index 0000000000..4dbc862a19
--- /dev/null
+++ b/bitmap_commands.go
@@ -0,0 +1,193 @@
+package redis
+
+import (
+ "context"
+ "errors"
+)
+
+type BitMapCmdable interface {
+ GetBit(ctx context.Context, key string, offset int64) *IntCmd
+ SetBit(ctx context.Context, key string, offset int64, value int) *IntCmd
+ BitCount(ctx context.Context, key string, bitCount *BitCount) *IntCmd
+ BitOpAnd(ctx context.Context, destKey string, keys ...string) *IntCmd
+ BitOpOr(ctx context.Context, destKey string, keys ...string) *IntCmd
+ BitOpXor(ctx context.Context, destKey string, keys ...string) *IntCmd
+ BitOpDiff(ctx context.Context, destKey string, keys ...string) *IntCmd
+ BitOpDiff1(ctx context.Context, destKey string, keys ...string) *IntCmd
+ BitOpAndOr(ctx context.Context, destKey string, keys ...string) *IntCmd
+ BitOpOne(ctx context.Context, destKey string, keys ...string) *IntCmd
+ BitOpNot(ctx context.Context, destKey string, key string) *IntCmd
+ BitPos(ctx context.Context, key string, bit int64, pos ...int64) *IntCmd
+ BitPosSpan(ctx context.Context, key string, bit int8, start, end int64, span string) *IntCmd
+ BitField(ctx context.Context, key string, values ...interface{}) *IntSliceCmd
+ BitFieldRO(ctx context.Context, key string, values ...interface{}) *IntSliceCmd
+}
+
+func (c cmdable) GetBit(ctx context.Context, key string, offset int64) *IntCmd {
+ cmd := NewIntCmd(ctx, "getbit", key, offset)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SetBit(ctx context.Context, key string, offset int64, value int) *IntCmd {
+ cmd := NewIntCmd(
+ ctx,
+ "setbit",
+ key,
+ offset,
+ value,
+ )
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type BitCount struct {
+ Start, End int64
+ Unit string // BYTE(default) | BIT
+}
+
+const BitCountIndexByte string = "BYTE"
+const BitCountIndexBit string = "BIT"
+
+func (c cmdable) BitCount(ctx context.Context, key string, bitCount *BitCount) *IntCmd {
+ args := make([]any, 2, 5)
+ args[0] = "bitcount"
+ args[1] = key
+ if bitCount != nil {
+ args = append(args, bitCount.Start, bitCount.End)
+ if bitCount.Unit != "" {
+ if bitCount.Unit != BitCountIndexByte && bitCount.Unit != BitCountIndexBit {
+ cmd := NewIntCmd(ctx)
+ cmd.SetErr(errors.New("redis: invalid bitcount index"))
+ return cmd
+ }
+ args = append(args, bitCount.Unit)
+ }
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) bitOp(ctx context.Context, op, destKey string, keys ...string) *IntCmd {
+ args := make([]interface{}, 3+len(keys))
+ args[0] = "bitop"
+ args[1] = op
+ args[2] = destKey
+ for i, key := range keys {
+ args[3+i] = key
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// BitOpAnd creates a new bitmap in which users are members of all given bitmaps
+func (c cmdable) BitOpAnd(ctx context.Context, destKey string, keys ...string) *IntCmd {
+ return c.bitOp(ctx, "and", destKey, keys...)
+}
+
+// BitOpOr creates a new bitmap in which users are member of at least one given bitmap
+func (c cmdable) BitOpOr(ctx context.Context, destKey string, keys ...string) *IntCmd {
+ return c.bitOp(ctx, "or", destKey, keys...)
+}
+
+// BitOpXor creates a new bitmap in which users are the result of XORing all given bitmaps
+func (c cmdable) BitOpXor(ctx context.Context, destKey string, keys ...string) *IntCmd {
+ return c.bitOp(ctx, "xor", destKey, keys...)
+}
+
+// BitOpNot creates a new bitmap in which users are not members of a given bitmap
+func (c cmdable) BitOpNot(ctx context.Context, destKey string, key string) *IntCmd {
+ return c.bitOp(ctx, "not", destKey, key)
+}
+
+// BitOpDiff creates a new bitmap in which users are members of bitmap X but not of any of bitmaps Y1, Y2, โฆ
+// Introduced with Redis 8.2
+func (c cmdable) BitOpDiff(ctx context.Context, destKey string, keys ...string) *IntCmd {
+ return c.bitOp(ctx, "diff", destKey, keys...)
+}
+
+// BitOpDiff1 creates a new bitmap in which users are members of one or more of bitmaps Y1, Y2, โฆ but not members of bitmap X
+// Introduced with Redis 8.2
+func (c cmdable) BitOpDiff1(ctx context.Context, destKey string, keys ...string) *IntCmd {
+ return c.bitOp(ctx, "diff1", destKey, keys...)
+}
+
+// BitOpAndOr creates a new bitmap in which users are members of bitmap X and also members of one or more of bitmaps Y1, Y2, โฆ
+// Introduced with Redis 8.2
+func (c cmdable) BitOpAndOr(ctx context.Context, destKey string, keys ...string) *IntCmd {
+ return c.bitOp(ctx, "andor", destKey, keys...)
+}
+
+// BitOpOne creates a new bitmap in which users are members of exactly one of the given bitmaps
+// Introduced with Redis 8.2
+func (c cmdable) BitOpOne(ctx context.Context, destKey string, keys ...string) *IntCmd {
+ return c.bitOp(ctx, "one", destKey, keys...)
+}
+
+// BitPos is an API before Redis version 7.0, cmd: bitpos key bit start end
+// if you need the `byte | bit` parameter, please use `BitPosSpan`.
+func (c cmdable) BitPos(ctx context.Context, key string, bit int64, pos ...int64) *IntCmd {
+ args := make([]interface{}, 3+len(pos))
+ args[0] = "bitpos"
+ args[1] = key
+ args[2] = bit
+ switch len(pos) {
+ case 0:
+ case 1:
+ args[3] = pos[0]
+ case 2:
+ args[3] = pos[0]
+ args[4] = pos[1]
+ default:
+ panic("too many arguments")
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// BitPosSpan supports the `byte | bit` parameters in redis version 7.0,
+// the bitpos command defaults to using byte type for the `start-end` range,
+// which means it counts in bytes from start to end. you can set the value
+// of "span" to determine the type of `start-end`.
+// span = "bit", cmd: bitpos key bit start end bit
+// span = "byte", cmd: bitpos key bit start end byte
+func (c cmdable) BitPosSpan(ctx context.Context, key string, bit int8, start, end int64, span string) *IntCmd {
+ cmd := NewIntCmd(ctx, "bitpos", key, bit, start, end, span)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// BitField accepts multiple values:
+// - BitField("set", "i1", "offset1", "value1","cmd2", "type2", "offset2", "value2")
+// - BitField([]string{"cmd1", "type1", "offset1", "value1","cmd2", "type2", "offset2", "value2"})
+// - BitField([]interface{}{"cmd1", "type1", "offset1", "value1","cmd2", "type2", "offset2", "value2"})
+func (c cmdable) BitField(ctx context.Context, key string, values ...interface{}) *IntSliceCmd {
+ args := make([]interface{}, 2, 2+len(values))
+ args[0] = "bitfield"
+ args[1] = key
+ args = appendArgs(args, values)
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// BitFieldRO - Read-only variant of the BITFIELD command.
+// It is like the original BITFIELD but only accepts GET subcommand and can safely be used in read-only replicas.
+// - BitFieldRO(ctx, key, "", "", "","")
+func (c cmdable) BitFieldRO(ctx context.Context, key string, values ...interface{}) *IntSliceCmd {
+ args := make([]interface{}, 2, 2+len(values))
+ args[0] = "BITFIELD_RO"
+ args[1] = key
+ if len(values)%2 != 0 {
+ panic("BitFieldRO: invalid number of arguments, must be even")
+ }
+ for i := 0; i < len(values); i += 2 {
+ args = append(args, "GET", values[i], values[i+1])
+ }
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
diff --git a/bitmap_commands_test.go b/bitmap_commands_test.go
new file mode 100644
index 0000000000..f3cc3205f2
--- /dev/null
+++ b/bitmap_commands_test.go
@@ -0,0 +1,98 @@
+package redis_test
+
+import (
+ . "github.com/bsm/ginkgo/v2"
+ . "github.com/bsm/gomega"
+ "github.com/redis/go-redis/v9"
+)
+
+type bitCountExpected struct {
+ Start int64
+ End int64
+ Expected int64
+}
+
+var _ = Describe("BitCountBite", func() {
+ var client *redis.Client
+ key := "bit_count_test"
+
+ BeforeEach(func() {
+ client = redis.NewClient(redisOptions())
+ Expect(client.FlushDB(ctx).Err()).NotTo(HaveOccurred())
+ values := []int{0, 1, 0, 0, 1, 0, 1, 0, 1, 1}
+ for i, v := range values {
+ cmd := client.SetBit(ctx, key, int64(i), v)
+ Expect(cmd.Err()).NotTo(HaveOccurred())
+ }
+ })
+
+ AfterEach(func() {
+ Expect(client.Close()).NotTo(HaveOccurred())
+ })
+
+ It("bit count bite", func() {
+ var expected = []bitCountExpected{
+ {0, 0, 0},
+ {0, 1, 1},
+ {0, 2, 1},
+ {0, 3, 1},
+ {0, 4, 2},
+ {0, 5, 2},
+ {0, 6, 3},
+ {0, 7, 3},
+ {0, 8, 4},
+ {0, 9, 5},
+ }
+
+ for _, e := range expected {
+ cmd := client.BitCount(ctx, key, &redis.BitCount{Start: e.Start, End: e.End, Unit: redis.BitCountIndexBit})
+ Expect(cmd.Err()).NotTo(HaveOccurred())
+ Expect(cmd.Val()).To(Equal(e.Expected))
+ }
+ })
+})
+
+var _ = Describe("BitCountByte", func() {
+ var client *redis.Client
+ key := "bit_count_test"
+
+ BeforeEach(func() {
+ client = redis.NewClient(redisOptions())
+ Expect(client.FlushDB(ctx).Err()).NotTo(HaveOccurred())
+ values := []int{0, 0, 0, 0, 0, 0, 0, 1, 1, 1}
+ for i, v := range values {
+ cmd := client.SetBit(ctx, key, int64(i), v)
+ Expect(cmd.Err()).NotTo(HaveOccurred())
+ }
+ })
+
+ AfterEach(func() {
+ Expect(client.Close()).NotTo(HaveOccurred())
+ })
+
+ It("bit count byte", func() {
+ var expected = []bitCountExpected{
+ {0, 0, 1},
+ {0, 1, 3},
+ }
+
+ for _, e := range expected {
+ cmd := client.BitCount(ctx, key, &redis.BitCount{Start: e.Start, End: e.End, Unit: redis.BitCountIndexByte})
+ Expect(cmd.Err()).NotTo(HaveOccurred())
+ Expect(cmd.Val()).To(Equal(e.Expected))
+ }
+ })
+
+ It("bit count byte with no unit specified", func() {
+ var expected = []bitCountExpected{
+ {0, 0, 1},
+ {0, 1, 3},
+ }
+
+ for _, e := range expected {
+ cmd := client.BitCount(ctx, key, &redis.BitCount{Start: e.Start, End: e.End})
+ Expect(cmd.Err()).NotTo(HaveOccurred())
+ Expect(cmd.Val()).To(Equal(e.Expected))
+ }
+ })
+})
diff --git a/cluster.go b/cluster.go
deleted file mode 100644
index c81fc1d57a..0000000000
--- a/cluster.go
+++ /dev/null
@@ -1,1242 +0,0 @@
-package redis
-
-import (
- "fmt"
- "math/rand"
- "net"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/go-redis/redis/internal"
- "github.com/go-redis/redis/internal/hashtag"
- "github.com/go-redis/redis/internal/pool"
- "github.com/go-redis/redis/internal/proto"
-)
-
-var errClusterNoNodes = fmt.Errorf("redis: cluster has no nodes")
-var errNilClusterState = fmt.Errorf("redis: cannot load cluster slots")
-
-// ClusterOptions are used to configure a cluster client and should be
-// passed to NewClusterClient.
-type ClusterOptions struct {
- // A seed list of host:port addresses of cluster nodes.
- Addrs []string
-
- // The maximum number of retries before giving up. Command is retried
- // on network errors and MOVED/ASK redirects.
- // Default is 16.
- MaxRedirects int
-
- // Enables read-only commands on slave nodes.
- ReadOnly bool
- // Allows routing read-only commands to the closest master or slave node.
- RouteByLatency bool
-
- // Following options are copied from Options struct.
-
- OnConnect func(*Conn) error
-
- MaxRetries int
- MinRetryBackoff time.Duration
- MaxRetryBackoff time.Duration
- Password string
-
- DialTimeout time.Duration
- ReadTimeout time.Duration
- WriteTimeout time.Duration
-
- // PoolSize applies per cluster node and not for the whole cluster.
- PoolSize int
- PoolTimeout time.Duration
- IdleTimeout time.Duration
- IdleCheckFrequency time.Duration
-}
-
-func (opt *ClusterOptions) init() {
- if opt.MaxRedirects == -1 {
- opt.MaxRedirects = 0
- } else if opt.MaxRedirects == 0 {
- opt.MaxRedirects = 16
- }
-
- if opt.RouteByLatency {
- opt.ReadOnly = true
- }
-
- switch opt.ReadTimeout {
- case -1:
- opt.ReadTimeout = 0
- case 0:
- opt.ReadTimeout = 3 * time.Second
- }
- switch opt.WriteTimeout {
- case -1:
- opt.WriteTimeout = 0
- case 0:
- opt.WriteTimeout = opt.ReadTimeout
- }
-
- switch opt.MinRetryBackoff {
- case -1:
- opt.MinRetryBackoff = 0
- case 0:
- opt.MinRetryBackoff = 8 * time.Millisecond
- }
- switch opt.MaxRetryBackoff {
- case -1:
- opt.MaxRetryBackoff = 0
- case 0:
- opt.MaxRetryBackoff = 512 * time.Millisecond
- }
-}
-
-func (opt *ClusterOptions) clientOptions() *Options {
- const disableIdleCheck = -1
-
- return &Options{
- OnConnect: opt.OnConnect,
-
- MaxRetries: opt.MaxRetries,
- MinRetryBackoff: opt.MinRetryBackoff,
- MaxRetryBackoff: opt.MaxRetryBackoff,
- Password: opt.Password,
- readOnly: opt.ReadOnly,
-
- DialTimeout: opt.DialTimeout,
- ReadTimeout: opt.ReadTimeout,
- WriteTimeout: opt.WriteTimeout,
-
- PoolSize: opt.PoolSize,
- PoolTimeout: opt.PoolTimeout,
- IdleTimeout: opt.IdleTimeout,
-
- IdleCheckFrequency: disableIdleCheck,
- }
-}
-
-//------------------------------------------------------------------------------
-
-type clusterNode struct {
- Client *Client
- Latency time.Duration
-
- loading time.Time
- generation uint32
-}
-
-func newClusterNode(clOpt *ClusterOptions, addr string) *clusterNode {
- opt := clOpt.clientOptions()
- opt.Addr = addr
- node := clusterNode{
- Client: NewClient(opt),
- }
-
- if clOpt.RouteByLatency {
- node.updateLatency()
- }
-
- return &node
-}
-
-func (n *clusterNode) updateLatency() {
- const probes = 10
- for i := 0; i < probes; i++ {
- start := time.Now()
- n.Client.Ping()
- n.Latency += time.Since(start)
- }
- n.Latency = n.Latency / probes
-}
-
-func (n *clusterNode) Loading() bool {
- return !n.loading.IsZero() && time.Since(n.loading) < time.Minute
-}
-
-func (n *clusterNode) Generation() uint32 {
- return n.generation
-}
-
-func (n *clusterNode) SetGeneration(gen uint32) {
- if gen < n.generation {
- panic("gen < n.generation")
- }
- n.generation = gen
-}
-
-//------------------------------------------------------------------------------
-
-type clusterNodes struct {
- opt *ClusterOptions
-
- mu sync.RWMutex
- addrs []string
- nodes map[string]*clusterNode
- closed bool
-
- generation uint32
-}
-
-func newClusterNodes(opt *ClusterOptions) *clusterNodes {
- return &clusterNodes{
- opt: opt,
- nodes: make(map[string]*clusterNode),
- }
-}
-
-func (c *clusterNodes) Close() error {
- c.mu.Lock()
- defer c.mu.Unlock()
-
- if c.closed {
- return nil
- }
- c.closed = true
-
- var firstErr error
- for _, node := range c.nodes {
- if err := node.Client.Close(); err != nil && firstErr == nil {
- firstErr = err
- }
- }
- c.addrs = nil
- c.nodes = nil
-
- return firstErr
-}
-
-func (c *clusterNodes) Addrs() ([]string, error) {
- c.mu.RLock()
- closed := c.closed
- addrs := c.addrs
- c.mu.RUnlock()
-
- if closed {
- return nil, pool.ErrClosed
- }
- if len(addrs) == 0 {
- return nil, errClusterNoNodes
- }
- return addrs, nil
-}
-
-func (c *clusterNodes) NextGeneration() uint32 {
- c.generation++
- return c.generation
-}
-
-// GC removes unused nodes.
-func (c *clusterNodes) GC(generation uint32) error {
- var collected []*clusterNode
- c.mu.Lock()
- for i := 0; i < len(c.addrs); {
- addr := c.addrs[i]
- node := c.nodes[addr]
- if node.Generation() >= generation {
- i++
- continue
- }
-
- c.addrs = append(c.addrs[:i], c.addrs[i+1:]...)
- delete(c.nodes, addr)
- collected = append(collected, node)
- }
- c.mu.Unlock()
-
- var firstErr error
- for _, node := range collected {
- if err := node.Client.Close(); err != nil && firstErr == nil {
- firstErr = err
- }
- }
-
- return firstErr
-}
-
-func (c *clusterNodes) All() ([]*clusterNode, error) {
- c.mu.RLock()
- defer c.mu.RUnlock()
-
- if c.closed {
- return nil, pool.ErrClosed
- }
-
- nodes := make([]*clusterNode, 0, len(c.nodes))
- for _, node := range c.nodes {
- nodes = append(nodes, node)
- }
- return nodes, nil
-}
-
-func (c *clusterNodes) GetOrCreate(addr string) (*clusterNode, error) {
- var node *clusterNode
- var ok bool
-
- c.mu.RLock()
- if !c.closed {
- node, ok = c.nodes[addr]
- }
- c.mu.RUnlock()
- if ok {
- return node, nil
- }
-
- c.mu.Lock()
- defer c.mu.Unlock()
-
- if c.closed {
- return nil, pool.ErrClosed
- }
-
- node, ok = c.nodes[addr]
- if ok {
- return node, nil
- }
-
- c.addrs = append(c.addrs, addr)
- node = newClusterNode(c.opt, addr)
- c.nodes[addr] = node
- return node, nil
-}
-
-func (c *clusterNodes) Random() (*clusterNode, error) {
- addrs, err := c.Addrs()
- if err != nil {
- return nil, err
- }
-
- var nodeErr error
- for i := 0; i <= c.opt.MaxRedirects; i++ {
- n := rand.Intn(len(addrs))
- node, err := c.GetOrCreate(addrs[n])
- if err != nil {
- return nil, err
- }
-
- nodeErr = node.Client.ClusterInfo().Err()
- if nodeErr == nil {
- return node, nil
- }
- }
- return nil, nodeErr
-}
-
-//------------------------------------------------------------------------------
-
-type clusterState struct {
- nodes *clusterNodes
- masters []*clusterNode
- slaves []*clusterNode
-
- slots [][]*clusterNode
-
- generation uint32
-}
-
-func newClusterState(nodes *clusterNodes, slots []ClusterSlot, origin string) (*clusterState, error) {
- c := clusterState{
- nodes: nodes,
- generation: nodes.NextGeneration(),
-
- slots: make([][]*clusterNode, hashtag.SlotNumber),
- }
-
- isLoopbackOrigin := isLoopbackAddr(origin)
- for _, slot := range slots {
- var nodes []*clusterNode
- for i, slotNode := range slot.Nodes {
- addr := slotNode.Addr
- if !isLoopbackOrigin && isLoopbackAddr(addr) {
- addr = origin
- }
-
- node, err := c.nodes.GetOrCreate(addr)
- if err != nil {
- return nil, err
- }
-
- node.SetGeneration(c.generation)
- nodes = append(nodes, node)
-
- if i == 0 {
- c.masters = appendNode(c.masters, node)
- } else {
- c.slaves = appendNode(c.slaves, node)
- }
- }
-
- for i := slot.Start; i <= slot.End; i++ {
- c.slots[i] = nodes
- }
- }
-
- return &c, nil
-}
-
-func (c *clusterState) slotMasterNode(slot int) (*clusterNode, error) {
- nodes := c.slotNodes(slot)
- if len(nodes) > 0 {
- return nodes[0], nil
- }
- return c.nodes.Random()
-}
-
-func (c *clusterState) slotSlaveNode(slot int) (*clusterNode, error) {
- nodes := c.slotNodes(slot)
- switch len(nodes) {
- case 0:
- return c.nodes.Random()
- case 1:
- return nodes[0], nil
- case 2:
- if slave := nodes[1]; !slave.Loading() {
- return slave, nil
- }
- return nodes[0], nil
- default:
- var slave *clusterNode
- for i := 0; i < 10; i++ {
- n := rand.Intn(len(nodes)-1) + 1
- slave = nodes[n]
- if !slave.Loading() {
- break
- }
- }
- return slave, nil
- }
-}
-
-func (c *clusterState) slotClosestNode(slot int) (*clusterNode, error) {
- const threshold = time.Millisecond
-
- nodes := c.slotNodes(slot)
- if len(nodes) == 0 {
- return c.nodes.Random()
- }
-
- var node *clusterNode
- for _, n := range nodes {
- if n.Loading() {
- continue
- }
- if node == nil || node.Latency-n.Latency > threshold {
- node = n
- }
- }
- return node, nil
-}
-
-func (c *clusterState) slotNodes(slot int) []*clusterNode {
- if slot >= 0 && slot < len(c.slots) {
- return c.slots[slot]
- }
- return nil
-}
-
-//------------------------------------------------------------------------------
-
-// ClusterClient is a Redis Cluster client representing a pool of zero
-// or more underlying connections. It's safe for concurrent use by
-// multiple goroutines.
-type ClusterClient struct {
- cmdable
-
- opt *ClusterOptions
- nodes *clusterNodes
- _state atomic.Value
-
- cmdsInfoOnce internal.Once
- cmdsInfo map[string]*CommandInfo
-
- // Reports whether slots reloading is in progress.
- reloading uint32
-}
-
-// NewClusterClient returns a Redis Cluster client as described in
-// http://redis.io/topics/cluster-spec.
-func NewClusterClient(opt *ClusterOptions) *ClusterClient {
- opt.init()
-
- c := &ClusterClient{
- opt: opt,
- nodes: newClusterNodes(opt),
- }
- c.setProcessor(c.Process)
-
- // Add initial nodes.
- for _, addr := range opt.Addrs {
- _, _ = c.nodes.GetOrCreate(addr)
- }
-
- // Preload cluster slots.
- for i := 0; i < 10; i++ {
- state, err := c.reloadState()
- if err == nil {
- c._state.Store(state)
- break
- }
- }
-
- if opt.IdleCheckFrequency > 0 {
- go c.reaper(opt.IdleCheckFrequency)
- }
-
- return c
-}
-
-// Options returns read-only Options that were used to create the client.
-func (c *ClusterClient) Options() *ClusterOptions {
- return c.opt
-}
-
-func (c *ClusterClient) retryBackoff(attempt int) time.Duration {
- return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff)
-}
-
-func (c *ClusterClient) state() (*clusterState, error) {
- v := c._state.Load()
- if v != nil {
- return v.(*clusterState), nil
- }
-
- _, err := c.nodes.Addrs()
- if err != nil {
- return nil, err
- }
-
- c.lazyReloadState()
- return nil, errNilClusterState
-}
-
-func (c *ClusterClient) cmdInfo(name string) *CommandInfo {
- err := c.cmdsInfoOnce.Do(func() error {
- node, err := c.nodes.Random()
- if err != nil {
- return err
- }
-
- cmdsInfo, err := node.Client.Command().Result()
- if err != nil {
- return err
- }
-
- c.cmdsInfo = cmdsInfo
- return nil
- })
- if err != nil {
- return nil
- }
- info := c.cmdsInfo[name]
- if info == nil {
- internal.Logf("info for cmd=%s not found", name)
- }
- return info
-}
-
-func (c *ClusterClient) cmdSlot(cmd Cmder) int {
- cmdInfo := c.cmdInfo(cmd.Name())
- firstKey := cmd.stringArg(cmdFirstKeyPos(cmd, cmdInfo))
- return hashtag.Slot(firstKey)
-}
-
-func (c *ClusterClient) cmdSlotAndNode(state *clusterState, cmd Cmder) (int, *clusterNode, error) {
- cmdInfo := c.cmdInfo(cmd.Name())
- firstKey := cmd.stringArg(cmdFirstKeyPos(cmd, cmdInfo))
- slot := hashtag.Slot(firstKey)
-
- if cmdInfo != nil && cmdInfo.ReadOnly && c.opt.ReadOnly {
- if c.opt.RouteByLatency {
- node, err := state.slotClosestNode(slot)
- return slot, node, err
- }
-
- node, err := state.slotSlaveNode(slot)
- return slot, node, err
- }
-
- node, err := state.slotMasterNode(slot)
- return slot, node, err
-}
-
-func (c *ClusterClient) Watch(fn func(*Tx) error, keys ...string) error {
- if len(keys) == 0 {
- return fmt.Errorf("redis: keys don't hash to the same slot")
- }
-
- state, err := c.state()
- if err != nil {
- return err
- }
-
- slot := hashtag.Slot(keys[0])
- for _, key := range keys[1:] {
- if hashtag.Slot(key) != slot {
- return fmt.Errorf("redis: Watch requires all keys to be in the same slot")
- }
- }
-
- node, err := state.slotMasterNode(slot)
- if err != nil {
- return err
- }
-
- for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ {
- if attempt > 0 {
- time.Sleep(c.retryBackoff(attempt))
- }
-
- err = node.Client.Watch(fn, keys...)
- if err == nil {
- break
- }
-
- moved, ask, addr := internal.IsMovedError(err)
- if moved || ask {
- c.lazyReloadState()
- node, err = c.nodes.GetOrCreate(addr)
- if err != nil {
- return err
- }
- continue
- }
-
- return err
- }
-
- return err
-}
-
-// Close closes the cluster client, releasing any open resources.
-//
-// It is rare to Close a ClusterClient, as the ClusterClient is meant
-// to be long-lived and shared between many goroutines.
-func (c *ClusterClient) Close() error {
- return c.nodes.Close()
-}
-
-func (c *ClusterClient) Process(cmd Cmder) error {
- state, err := c.state()
- if err != nil {
- cmd.setErr(err)
- return err
- }
-
- _, node, err := c.cmdSlotAndNode(state, cmd)
- if err != nil {
- cmd.setErr(err)
- return err
- }
-
- var ask bool
- for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ {
- if attempt > 0 {
- time.Sleep(c.retryBackoff(attempt))
- }
-
- if ask {
- pipe := node.Client.Pipeline()
- pipe.Process(NewCmd("ASKING"))
- pipe.Process(cmd)
- _, err = pipe.Exec()
- pipe.Close()
- ask = false
- } else {
- err = node.Client.Process(cmd)
- }
-
- // If there is no error - we are done.
- if err == nil {
- break
- }
-
- // If slave is loading - read from master.
- if c.opt.ReadOnly && internal.IsLoadingError(err) {
- // TODO: race
- node.loading = time.Now()
- continue
- }
-
- if internal.IsRetryableError(err, true) {
- var nodeErr error
- node, nodeErr = c.nodes.Random()
- if nodeErr != nil {
- break
- }
- continue
- }
-
- var moved bool
- var addr string
- moved, ask, addr = internal.IsMovedError(err)
- if moved || ask {
- c.lazyReloadState()
-
- var nodeErr error
- node, nodeErr = c.nodes.GetOrCreate(addr)
- if nodeErr != nil {
- break
- }
- continue
- }
-
- break
- }
-
- return cmd.Err()
-}
-
-// ForEachMaster concurrently calls the fn on each master node in the cluster.
-// It returns the first error if any.
-func (c *ClusterClient) ForEachMaster(fn func(client *Client) error) error {
- state, err := c.state()
- if err != nil {
- return err
- }
-
- var wg sync.WaitGroup
- errCh := make(chan error, 1)
- for _, master := range state.masters {
- wg.Add(1)
- go func(node *clusterNode) {
- defer wg.Done()
- err := fn(node.Client)
- if err != nil {
- select {
- case errCh <- err:
- default:
- }
- }
- }(master)
- }
- wg.Wait()
-
- select {
- case err := <-errCh:
- return err
- default:
- return nil
- }
-}
-
-// ForEachSlave concurrently calls the fn on each slave node in the cluster.
-// It returns the first error if any.
-func (c *ClusterClient) ForEachSlave(fn func(client *Client) error) error {
- state, err := c.state()
- if err != nil {
- return err
- }
-
- var wg sync.WaitGroup
- errCh := make(chan error, 1)
- for _, slave := range state.slaves {
- wg.Add(1)
- go func(node *clusterNode) {
- defer wg.Done()
- err := fn(node.Client)
- if err != nil {
- select {
- case errCh <- err:
- default:
- }
- }
- }(slave)
- }
- wg.Wait()
-
- select {
- case err := <-errCh:
- return err
- default:
- return nil
- }
-}
-
-// ForEachNode concurrently calls the fn on each known node in the cluster.
-// It returns the first error if any.
-func (c *ClusterClient) ForEachNode(fn func(client *Client) error) error {
- state, err := c.state()
- if err != nil {
- return err
- }
-
- var wg sync.WaitGroup
- errCh := make(chan error, 1)
- worker := func(node *clusterNode) {
- defer wg.Done()
- err := fn(node.Client)
- if err != nil {
- select {
- case errCh <- err:
- default:
- }
- }
- }
-
- for _, node := range state.masters {
- wg.Add(1)
- go worker(node)
- }
- for _, node := range state.slaves {
- wg.Add(1)
- go worker(node)
- }
-
- wg.Wait()
- select {
- case err := <-errCh:
- return err
- default:
- return nil
- }
-}
-
-// PoolStats returns accumulated connection pool stats.
-func (c *ClusterClient) PoolStats() *PoolStats {
- var acc PoolStats
-
- state, _ := c.state()
- if state == nil {
- return &acc
- }
-
- for _, node := range state.masters {
- s := node.Client.connPool.Stats()
- acc.Hits += s.Hits
- acc.Misses += s.Misses
- acc.Timeouts += s.Timeouts
-
- acc.TotalConns += s.TotalConns
- acc.FreeConns += s.FreeConns
- acc.StaleConns += s.StaleConns
- }
-
- for _, node := range state.slaves {
- s := node.Client.connPool.Stats()
- acc.Hits += s.Hits
- acc.Misses += s.Misses
- acc.Timeouts += s.Timeouts
-
- acc.TotalConns += s.TotalConns
- acc.FreeConns += s.FreeConns
- acc.StaleConns += s.StaleConns
- }
-
- return &acc
-}
-
-func (c *ClusterClient) lazyReloadState() {
- if !atomic.CompareAndSwapUint32(&c.reloading, 0, 1) {
- return
- }
-
- go func() {
- defer atomic.StoreUint32(&c.reloading, 0)
-
- for {
- state, err := c.reloadState()
- if err == pool.ErrClosed {
- return
- }
-
- if err != nil {
- time.Sleep(time.Millisecond)
- continue
- }
-
- c._state.Store(state)
- time.Sleep(5 * time.Second)
- c.nodes.GC(state.generation)
- break
- }
- }()
-}
-
-// Not thread-safe.
-func (c *ClusterClient) reloadState() (*clusterState, error) {
- node, err := c.nodes.Random()
- if err != nil {
- return nil, err
- }
-
- slots, err := node.Client.ClusterSlots().Result()
- if err != nil {
- return nil, err
- }
-
- return newClusterState(c.nodes, slots, node.Client.opt.Addr)
-}
-
-// reaper closes idle connections to the cluster.
-func (c *ClusterClient) reaper(idleCheckFrequency time.Duration) {
- ticker := time.NewTicker(idleCheckFrequency)
- defer ticker.Stop()
-
- for range ticker.C {
- nodes, err := c.nodes.All()
- if err != nil {
- break
- }
-
- for _, node := range nodes {
- _, err := node.Client.connPool.(*pool.ConnPool).ReapStaleConns()
- if err != nil {
- internal.Logf("ReapStaleConns failed: %s", err)
- }
- }
- }
-}
-
-func (c *ClusterClient) Pipeline() Pipeliner {
- pipe := Pipeline{
- exec: c.pipelineExec,
- }
- pipe.setProcessor(pipe.Process)
- return &pipe
-}
-
-func (c *ClusterClient) Pipelined(fn func(Pipeliner) error) ([]Cmder, error) {
- return c.Pipeline().Pipelined(fn)
-}
-
-func (c *ClusterClient) pipelineExec(cmds []Cmder) error {
- cmdsMap, err := c.mapCmdsByNode(cmds)
- if err != nil {
- setCmdsErr(cmds, err)
- return err
- }
-
- for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ {
- if attempt > 0 {
- time.Sleep(c.retryBackoff(attempt))
- }
-
- failedCmds := make(map[*clusterNode][]Cmder)
-
- for node, cmds := range cmdsMap {
- cn, _, err := node.Client.getConn()
- if err != nil {
- setCmdsErr(cmds, err)
- continue
- }
-
- err = c.pipelineProcessCmds(node, cn, cmds, failedCmds)
- if err == nil || internal.IsRedisError(err) {
- _ = node.Client.connPool.Put(cn)
- } else {
- _ = node.Client.connPool.Remove(cn)
- }
- }
-
- if len(failedCmds) == 0 {
- break
- }
- cmdsMap = failedCmds
- }
-
- return firstCmdsErr(cmds)
-}
-
-func (c *ClusterClient) mapCmdsByNode(cmds []Cmder) (map[*clusterNode][]Cmder, error) {
- state, err := c.state()
- if err != nil {
- setCmdsErr(cmds, err)
- return nil, err
- }
-
- cmdsMap := make(map[*clusterNode][]Cmder)
- for _, cmd := range cmds {
- slot := c.cmdSlot(cmd)
- node, err := state.slotMasterNode(slot)
- if err != nil {
- return nil, err
- }
- cmdsMap[node] = append(cmdsMap[node], cmd)
- }
- return cmdsMap, nil
-}
-
-func (c *ClusterClient) pipelineProcessCmds(
- node *clusterNode, cn *pool.Conn, cmds []Cmder, failedCmds map[*clusterNode][]Cmder,
-) error {
- cn.SetWriteTimeout(c.opt.WriteTimeout)
- if err := writeCmd(cn, cmds...); err != nil {
- setCmdsErr(cmds, err)
- failedCmds[node] = cmds
- return err
- }
-
- // Set read timeout for all commands.
- cn.SetReadTimeout(c.opt.ReadTimeout)
-
- return c.pipelineReadCmds(cn, cmds, failedCmds)
-}
-
-func (c *ClusterClient) pipelineReadCmds(
- cn *pool.Conn, cmds []Cmder, failedCmds map[*clusterNode][]Cmder,
-) error {
- for _, cmd := range cmds {
- err := cmd.readReply(cn)
- if err == nil {
- continue
- }
-
- if c.checkMovedErr(cmd, err, failedCmds) {
- continue
- }
-
- if internal.IsRedisError(err) {
- continue
- }
-
- return err
- }
- return nil
-}
-
-func (c *ClusterClient) checkMovedErr(
- cmd Cmder, err error, failedCmds map[*clusterNode][]Cmder,
-) bool {
- moved, ask, addr := internal.IsMovedError(err)
-
- if moved {
- c.lazyReloadState()
-
- node, err := c.nodes.GetOrCreate(addr)
- if err != nil {
- return false
- }
-
- failedCmds[node] = append(failedCmds[node], cmd)
- return true
- }
-
- if ask {
- node, err := c.nodes.GetOrCreate(addr)
- if err != nil {
- return false
- }
-
- failedCmds[node] = append(failedCmds[node], NewCmd("ASKING"), cmd)
- return true
- }
-
- return false
-}
-
-// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC.
-func (c *ClusterClient) TxPipeline() Pipeliner {
- pipe := Pipeline{
- exec: c.txPipelineExec,
- }
- pipe.setProcessor(pipe.Process)
- return &pipe
-}
-
-func (c *ClusterClient) TxPipelined(fn func(Pipeliner) error) ([]Cmder, error) {
- return c.TxPipeline().Pipelined(fn)
-}
-
-func (c *ClusterClient) txPipelineExec(cmds []Cmder) error {
- state, err := c.state()
- if err != nil {
- return err
- }
-
- cmdsMap := c.mapCmdsBySlot(cmds)
- for slot, cmds := range cmdsMap {
- node, err := state.slotMasterNode(slot)
- if err != nil {
- setCmdsErr(cmds, err)
- continue
- }
- cmdsMap := map[*clusterNode][]Cmder{node: cmds}
-
- for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ {
- if attempt > 0 {
- time.Sleep(c.retryBackoff(attempt))
- }
-
- failedCmds := make(map[*clusterNode][]Cmder)
-
- for node, cmds := range cmdsMap {
- cn, _, err := node.Client.getConn()
- if err != nil {
- setCmdsErr(cmds, err)
- continue
- }
-
- err = c.txPipelineProcessCmds(node, cn, cmds, failedCmds)
- if err == nil || internal.IsRedisError(err) {
- _ = node.Client.connPool.Put(cn)
- } else {
- _ = node.Client.connPool.Remove(cn)
- }
- }
-
- if len(failedCmds) == 0 {
- break
- }
- cmdsMap = failedCmds
- }
- }
-
- return firstCmdsErr(cmds)
-}
-
-func (c *ClusterClient) mapCmdsBySlot(cmds []Cmder) map[int][]Cmder {
- cmdsMap := make(map[int][]Cmder)
- for _, cmd := range cmds {
- slot := c.cmdSlot(cmd)
- cmdsMap[slot] = append(cmdsMap[slot], cmd)
- }
- return cmdsMap
-}
-
-func (c *ClusterClient) txPipelineProcessCmds(
- node *clusterNode, cn *pool.Conn, cmds []Cmder, failedCmds map[*clusterNode][]Cmder,
-) error {
- cn.SetWriteTimeout(c.opt.WriteTimeout)
- if err := txPipelineWriteMulti(cn, cmds); err != nil {
- setCmdsErr(cmds, err)
- failedCmds[node] = cmds
- return err
- }
-
- // Set read timeout for all commands.
- cn.SetReadTimeout(c.opt.ReadTimeout)
-
- if err := c.txPipelineReadQueued(cn, cmds, failedCmds); err != nil {
- setCmdsErr(cmds, err)
- return err
- }
-
- return pipelineReadCmds(cn, cmds)
-}
-
-func (c *ClusterClient) txPipelineReadQueued(
- cn *pool.Conn, cmds []Cmder, failedCmds map[*clusterNode][]Cmder,
-) error {
- // Parse queued replies.
- var statusCmd StatusCmd
- if err := statusCmd.readReply(cn); err != nil {
- return err
- }
-
- for _, cmd := range cmds {
- err := statusCmd.readReply(cn)
- if err == nil {
- continue
- }
-
- if c.checkMovedErr(cmd, err, failedCmds) || internal.IsRedisError(err) {
- continue
- }
-
- return err
- }
-
- // Parse number of replies.
- line, err := cn.Rd.ReadLine()
- if err != nil {
- if err == Nil {
- err = TxFailedErr
- }
- return err
- }
-
- switch line[0] {
- case proto.ErrorReply:
- err := proto.ParseErrorReply(line)
- for _, cmd := range cmds {
- if !c.checkMovedErr(cmd, err, failedCmds) {
- break
- }
- }
- return err
- case proto.ArrayReply:
- // ok
- default:
- err := fmt.Errorf("redis: expected '*', but got line %q", line)
- return err
- }
-
- return nil
-}
-
-func (c *ClusterClient) pubSub(channels []string) *PubSub {
- opt := c.opt.clientOptions()
-
- var node *clusterNode
- return &PubSub{
- opt: opt,
-
- newConn: func(channels []string) (*pool.Conn, error) {
- if node == nil {
- var slot int
- if len(channels) > 0 {
- slot = hashtag.Slot(channels[0])
- } else {
- slot = -1
- }
-
- state, err := c.state()
- if err != nil {
- return nil, err
- }
-
- masterNode, err := state.slotMasterNode(slot)
- if err != nil {
- return nil, err
- }
- node = masterNode
- }
- return node.Client.newConn()
- },
- closeConn: func(cn *pool.Conn) error {
- return node.Client.connPool.CloseConn(cn)
- },
- }
-}
-
-// Subscribe subscribes the client to the specified channels.
-// Channels can be omitted to create empty subscription.
-func (c *ClusterClient) Subscribe(channels ...string) *PubSub {
- pubsub := c.pubSub(channels)
- if len(channels) > 0 {
- _ = pubsub.Subscribe(channels...)
- }
- return pubsub
-}
-
-// PSubscribe subscribes the client to the given patterns.
-// Patterns can be omitted to create empty subscription.
-func (c *ClusterClient) PSubscribe(channels ...string) *PubSub {
- pubsub := c.pubSub(channels)
- if len(channels) > 0 {
- _ = pubsub.PSubscribe(channels...)
- }
- return pubsub
-}
-
-func isLoopbackAddr(addr string) bool {
- host, _, err := net.SplitHostPort(addr)
- if err != nil {
- return false
- }
-
- ip := net.ParseIP(host)
- if ip == nil {
- return false
- }
-
- return ip.IsLoopback()
-}
-
-func appendNode(nodes []*clusterNode, node *clusterNode) []*clusterNode {
- for _, n := range nodes {
- if n == node {
- return nodes
- }
- }
- return append(nodes, node)
-}
diff --git a/cluster_commands.go b/cluster_commands.go
index dff62c902d..4857b01eaa 100644
--- a/cluster_commands.go
+++ b/cluster_commands.go
@@ -1,22 +1,199 @@
package redis
-import "sync/atomic"
-
-func (c *ClusterClient) DBSize() *IntCmd {
- cmd := NewIntCmd("dbsize")
- var size int64
- err := c.ForEachMaster(func(master *Client) error {
- n, err := master.DBSize().Result()
- if err != nil {
- return err
- }
- atomic.AddInt64(&size, n)
- return nil
- })
- if err != nil {
- cmd.setErr(err)
- return cmd
+import "context"
+
+type ClusterCmdable interface {
+ ClusterMyShardID(ctx context.Context) *StringCmd
+ ClusterMyID(ctx context.Context) *StringCmd
+ ClusterSlots(ctx context.Context) *ClusterSlotsCmd
+ ClusterShards(ctx context.Context) *ClusterShardsCmd
+ ClusterLinks(ctx context.Context) *ClusterLinksCmd
+ ClusterNodes(ctx context.Context) *StringCmd
+ ClusterMeet(ctx context.Context, host, port string) *StatusCmd
+ ClusterForget(ctx context.Context, nodeID string) *StatusCmd
+ ClusterReplicate(ctx context.Context, nodeID string) *StatusCmd
+ ClusterResetSoft(ctx context.Context) *StatusCmd
+ ClusterResetHard(ctx context.Context) *StatusCmd
+ ClusterInfo(ctx context.Context) *StringCmd
+ ClusterKeySlot(ctx context.Context, key string) *IntCmd
+ ClusterGetKeysInSlot(ctx context.Context, slot int, count int) *StringSliceCmd
+ ClusterCountFailureReports(ctx context.Context, nodeID string) *IntCmd
+ ClusterCountKeysInSlot(ctx context.Context, slot int) *IntCmd
+ ClusterDelSlots(ctx context.Context, slots ...int) *StatusCmd
+ ClusterDelSlotsRange(ctx context.Context, min, max int) *StatusCmd
+ ClusterSaveConfig(ctx context.Context) *StatusCmd
+ ClusterSlaves(ctx context.Context, nodeID string) *StringSliceCmd
+ ClusterFailover(ctx context.Context) *StatusCmd
+ ClusterAddSlots(ctx context.Context, slots ...int) *StatusCmd
+ ClusterAddSlotsRange(ctx context.Context, min, max int) *StatusCmd
+ ReadOnly(ctx context.Context) *StatusCmd
+ ReadWrite(ctx context.Context) *StatusCmd
+}
+
+func (c cmdable) ClusterMyShardID(ctx context.Context) *StringCmd {
+ cmd := NewStringCmd(ctx, "cluster", "myshardid")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterMyID(ctx context.Context) *StringCmd {
+ cmd := NewStringCmd(ctx, "cluster", "myid")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterSlots(ctx context.Context) *ClusterSlotsCmd {
+ cmd := NewClusterSlotsCmd(ctx, "cluster", "slots")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterShards(ctx context.Context) *ClusterShardsCmd {
+ cmd := NewClusterShardsCmd(ctx, "cluster", "shards")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterLinks(ctx context.Context) *ClusterLinksCmd {
+ cmd := NewClusterLinksCmd(ctx, "cluster", "links")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterNodes(ctx context.Context) *StringCmd {
+ cmd := NewStringCmd(ctx, "cluster", "nodes")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterMeet(ctx context.Context, host, port string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "cluster", "meet", host, port)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterForget(ctx context.Context, nodeID string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "cluster", "forget", nodeID)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterReplicate(ctx context.Context, nodeID string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "cluster", "replicate", nodeID)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterResetSoft(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "cluster", "reset", "soft")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterResetHard(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "cluster", "reset", "hard")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterInfo(ctx context.Context) *StringCmd {
+ cmd := NewStringCmd(ctx, "cluster", "info")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterKeySlot(ctx context.Context, key string) *IntCmd {
+ cmd := NewIntCmd(ctx, "cluster", "keyslot", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterGetKeysInSlot(ctx context.Context, slot int, count int) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "cluster", "getkeysinslot", slot, count)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterCountFailureReports(ctx context.Context, nodeID string) *IntCmd {
+ cmd := NewIntCmd(ctx, "cluster", "count-failure-reports", nodeID)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterCountKeysInSlot(ctx context.Context, slot int) *IntCmd {
+ cmd := NewIntCmd(ctx, "cluster", "countkeysinslot", slot)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterDelSlots(ctx context.Context, slots ...int) *StatusCmd {
+ args := make([]interface{}, 2+len(slots))
+ args[0] = "cluster"
+ args[1] = "delslots"
+ for i, slot := range slots {
+ args[2+i] = slot
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterDelSlotsRange(ctx context.Context, min, max int) *StatusCmd {
+ size := max - min + 1
+ slots := make([]int, size)
+ for i := 0; i < size; i++ {
+ slots[i] = min + i
+ }
+ return c.ClusterDelSlots(ctx, slots...)
+}
+
+func (c cmdable) ClusterSaveConfig(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "cluster", "saveconfig")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterSlaves(ctx context.Context, nodeID string) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "cluster", "slaves", nodeID)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterFailover(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "cluster", "failover")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterAddSlots(ctx context.Context, slots ...int) *StatusCmd {
+ args := make([]interface{}, 2+len(slots))
+ args[0] = "cluster"
+ args[1] = "addslots"
+ for i, num := range slots {
+ args[2+i] = num
}
- cmd.val = size
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterAddSlotsRange(ctx context.Context, min, max int) *StatusCmd {
+ size := max - min + 1
+ slots := make([]int, size)
+ for i := 0; i < size; i++ {
+ slots[i] = min + i
+ }
+ return c.ClusterAddSlots(ctx, slots...)
+}
+
+func (c cmdable) ReadOnly(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "readonly")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ReadWrite(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "readwrite")
+ _ = c(ctx, cmd)
return cmd
}
diff --git a/cluster_test.go b/cluster_test.go
deleted file mode 100644
index 6f3677b938..0000000000
--- a/cluster_test.go
+++ /dev/null
@@ -1,787 +0,0 @@
-package redis_test
-
-import (
- "bytes"
- "fmt"
- "net"
- "strconv"
- "strings"
- "sync"
- "testing"
- "time"
-
- "github.com/go-redis/redis"
- "github.com/go-redis/redis/internal/hashtag"
-
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-)
-
-type clusterScenario struct {
- ports []string
- nodeIds []string
- processes map[string]*redisProcess
- clients map[string]*redis.Client
-}
-
-func (s *clusterScenario) masters() []*redis.Client {
- result := make([]*redis.Client, 3)
- for pos, port := range s.ports[:3] {
- result[pos] = s.clients[port]
- }
- return result
-}
-
-func (s *clusterScenario) slaves() []*redis.Client {
- result := make([]*redis.Client, 3)
- for pos, port := range s.ports[3:] {
- result[pos] = s.clients[port]
- }
- return result
-}
-
-func (s *clusterScenario) addrs() []string {
- addrs := make([]string, len(s.ports))
- for i, port := range s.ports {
- addrs[i] = net.JoinHostPort("127.0.0.1", port)
- }
- return addrs
-}
-
-func (s *clusterScenario) clusterClient(opt *redis.ClusterOptions) *redis.ClusterClient {
- opt.Addrs = s.addrs()
- return redis.NewClusterClient(opt)
-}
-
-func startCluster(scenario *clusterScenario) error {
- // Start processes and collect node ids
- for pos, port := range scenario.ports {
- process, err := startRedis(port, "--cluster-enabled", "yes")
- if err != nil {
- return err
- }
-
- client := redis.NewClient(&redis.Options{
- Addr: ":" + port,
- })
-
- info, err := client.ClusterNodes().Result()
- if err != nil {
- return err
- }
-
- scenario.processes[port] = process
- scenario.clients[port] = client
- scenario.nodeIds[pos] = info[:40]
- }
-
- // Meet cluster nodes.
- for _, client := range scenario.clients {
- err := client.ClusterMeet("127.0.0.1", scenario.ports[0]).Err()
- if err != nil {
- return err
- }
- }
-
- // Bootstrap masters.
- slots := []int{0, 5000, 10000, 16384}
- for pos, master := range scenario.masters() {
- err := master.ClusterAddSlotsRange(slots[pos], slots[pos+1]-1).Err()
- if err != nil {
- return err
- }
- }
-
- // Bootstrap slaves.
- for idx, slave := range scenario.slaves() {
- masterId := scenario.nodeIds[idx]
-
- // Wait until master is available
- err := eventually(func() error {
- s := slave.ClusterNodes().Val()
- wanted := masterId
- if !strings.Contains(s, wanted) {
- return fmt.Errorf("%q does not contain %q", s, wanted)
- }
- return nil
- }, 10*time.Second)
- if err != nil {
- return err
- }
-
- err = slave.ClusterReplicate(masterId).Err()
- if err != nil {
- return err
- }
- }
-
- // Wait until all nodes have consistent info.
- for _, client := range scenario.clients {
- err := eventually(func() error {
- res, err := client.ClusterSlots().Result()
- if err != nil {
- return err
- }
- wanted := []redis.ClusterSlot{
- {0, 4999, []redis.ClusterNode{{"", "127.0.0.1:8220"}, {"", "127.0.0.1:8223"}}},
- {5000, 9999, []redis.ClusterNode{{"", "127.0.0.1:8221"}, {"", "127.0.0.1:8224"}}},
- {10000, 16383, []redis.ClusterNode{{"", "127.0.0.1:8222"}, {"", "127.0.0.1:8225"}}},
- }
- return assertSlotsEqual(res, wanted)
- }, 30*time.Second)
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func assertSlotsEqual(slots, wanted []redis.ClusterSlot) error {
-outer_loop:
- for _, s2 := range wanted {
- for _, s1 := range slots {
- if slotEqual(s1, s2) {
- continue outer_loop
- }
- }
- return fmt.Errorf("%v not found in %v", s2, slots)
- }
- return nil
-}
-
-func slotEqual(s1, s2 redis.ClusterSlot) bool {
- if s1.Start != s2.Start {
- return false
- }
- if s1.End != s2.End {
- return false
- }
- if len(s1.Nodes) != len(s2.Nodes) {
- return false
- }
- for i, n1 := range s1.Nodes {
- if n1.Addr != s2.Nodes[i].Addr {
- return false
- }
- }
- return true
-}
-
-func stopCluster(scenario *clusterScenario) error {
- for _, client := range scenario.clients {
- if err := client.Close(); err != nil {
- return err
- }
- }
- for _, process := range scenario.processes {
- if err := process.Close(); err != nil {
- return err
- }
- }
- return nil
-}
-
-//------------------------------------------------------------------------------
-
-var _ = Describe("ClusterClient", func() {
- var opt *redis.ClusterOptions
- var client *redis.ClusterClient
-
- assertClusterClient := func() {
- It("should GET/SET/DEL", func() {
- val, err := client.Get("A").Result()
- Expect(err).To(Equal(redis.Nil))
- Expect(val).To(Equal(""))
-
- val, err = client.Set("A", "VALUE", 0).Result()
- Expect(err).NotTo(HaveOccurred())
- Expect(val).To(Equal("OK"))
-
- Eventually(func() string {
- return client.Get("A").Val()
- }, 30*time.Second).Should(Equal("VALUE"))
-
- cnt, err := client.Del("A").Result()
- Expect(err).NotTo(HaveOccurred())
- Expect(cnt).To(Equal(int64(1)))
- })
-
- It("follows redirects", func() {
- Expect(client.Set("A", "VALUE", 0).Err()).NotTo(HaveOccurred())
-
- slot := hashtag.Slot("A")
- client.SwapSlotNodes(slot)
-
- Eventually(func() string {
- return client.Get("A").Val()
- }, 30*time.Second).Should(Equal("VALUE"))
- })
-
- It("distributes keys", func() {
- for i := 0; i < 100; i++ {
- err := client.Set(fmt.Sprintf("key%d", i), "value", 0).Err()
- Expect(err).NotTo(HaveOccurred())
- }
-
- for _, master := range cluster.masters() {
- Eventually(func() string {
- return master.Info("keyspace").Val()
- }, 30*time.Second).Should(Or(
- ContainSubstring("keys=31"),
- ContainSubstring("keys=29"),
- ContainSubstring("keys=40"),
- ))
- }
- })
-
- It("distributes keys when using EVAL", func() {
- script := redis.NewScript(`
- local r = redis.call('SET', KEYS[1], ARGV[1])
- return r
- `)
-
- var key string
- for i := 0; i < 100; i++ {
- key = fmt.Sprintf("key%d", i)
- err := script.Run(client, []string{key}, "value").Err()
- Expect(err).NotTo(HaveOccurred())
- }
-
- for _, master := range cluster.masters() {
- Eventually(func() string {
- return master.Info("keyspace").Val()
- }, 30*time.Second).Should(Or(
- ContainSubstring("keys=31"),
- ContainSubstring("keys=29"),
- ContainSubstring("keys=40"),
- ))
- }
- })
-
- It("supports Watch", func() {
- var incr func(string) error
-
- // Transactionally increments key using GET and SET commands.
- incr = func(key string) error {
- err := client.Watch(func(tx *redis.Tx) error {
- n, err := tx.Get(key).Int64()
- if err != nil && err != redis.Nil {
- return err
- }
-
- _, err = tx.Pipelined(func(pipe redis.Pipeliner) error {
- pipe.Set(key, strconv.FormatInt(n+1, 10), 0)
- return nil
- })
- return err
- }, key)
- if err == redis.TxFailedErr {
- return incr(key)
- }
- return err
- }
-
- var wg sync.WaitGroup
- for i := 0; i < 100; i++ {
- wg.Add(1)
- go func() {
- defer GinkgoRecover()
- defer wg.Done()
-
- err := incr("key")
- Expect(err).NotTo(HaveOccurred())
- }()
- }
- wg.Wait()
-
- n, err := client.Get("key").Int64()
- Expect(err).NotTo(HaveOccurred())
- Expect(n).To(Equal(int64(100)))
- })
-
- Describe("pipelining", func() {
- var pipe *redis.Pipeline
-
- assertPipeline := func() {
- keys := []string{"A", "B", "C", "D", "E", "F", "G"}
-
- It("follows redirects", func() {
- for _, key := range keys {
- slot := hashtag.Slot(key)
- client.SwapSlotNodes(slot)
- }
-
- for i, key := range keys {
- pipe.Set(key, key+"_value", 0)
- pipe.Expire(key, time.Duration(i+1)*time.Hour)
- }
- cmds, err := pipe.Exec()
- Expect(err).NotTo(HaveOccurred())
- Expect(cmds).To(HaveLen(14))
-
- for _, key := range keys {
- slot := hashtag.Slot(key)
- client.SwapSlotNodes(slot)
- }
-
- for _, key := range keys {
- pipe.Get(key)
- pipe.TTL(key)
- }
- cmds, err = pipe.Exec()
- Expect(err).NotTo(HaveOccurred())
- Expect(cmds).To(HaveLen(14))
-
- for i, key := range keys {
- get := cmds[i*2].(*redis.StringCmd)
- Expect(get.Val()).To(Equal(key + "_value"))
-
- ttl := cmds[(i*2)+1].(*redis.DurationCmd)
- dur := time.Duration(i+1) * time.Hour
- Expect(ttl.Val()).To(BeNumerically("~", dur, 5*time.Second))
- }
- })
-
- It("works with missing keys", func() {
- pipe.Set("A", "A_value", 0)
- pipe.Set("C", "C_value", 0)
- _, err := pipe.Exec()
- Expect(err).NotTo(HaveOccurred())
-
- a := pipe.Get("A")
- b := pipe.Get("B")
- c := pipe.Get("C")
- cmds, err := pipe.Exec()
- Expect(err).To(Equal(redis.Nil))
- Expect(cmds).To(HaveLen(3))
-
- Expect(a.Err()).NotTo(HaveOccurred())
- Expect(a.Val()).To(Equal("A_value"))
-
- Expect(b.Err()).To(Equal(redis.Nil))
- Expect(b.Val()).To(Equal(""))
-
- Expect(c.Err()).NotTo(HaveOccurred())
- Expect(c.Val()).To(Equal("C_value"))
- })
- }
-
- Describe("with Pipeline", func() {
- BeforeEach(func() {
- pipe = client.Pipeline().(*redis.Pipeline)
- })
-
- AfterEach(func() {
- Expect(pipe.Close()).NotTo(HaveOccurred())
- })
-
- assertPipeline()
- })
-
- Describe("with TxPipeline", func() {
- BeforeEach(func() {
- pipe = client.TxPipeline().(*redis.Pipeline)
- })
-
- AfterEach(func() {
- Expect(pipe.Close()).NotTo(HaveOccurred())
- })
-
- assertPipeline()
- })
- })
-
- It("supports PubSub", func() {
- pubsub := client.Subscribe("mychannel")
- defer pubsub.Close()
-
- Eventually(func() error {
- _, err := client.Publish("mychannel", "hello").Result()
- if err != nil {
- return err
- }
-
- msg, err := pubsub.ReceiveTimeout(time.Second)
- if err != nil {
- return err
- }
-
- _, ok := msg.(*redis.Message)
- if !ok {
- return fmt.Errorf("got %T, wanted *redis.Message", msg)
- }
-
- return nil
- }, 30*time.Second).ShouldNot(HaveOccurred())
- })
- }
-
- Describe("ClusterClient", func() {
- BeforeEach(func() {
- opt = redisClusterOptions()
- client = cluster.clusterClient(opt)
-
- _ = client.ForEachMaster(func(master *redis.Client) error {
- return master.FlushDB().Err()
- })
- })
-
- AfterEach(func() {
- _ = client.ForEachMaster(func(master *redis.Client) error {
- return master.FlushDB().Err()
- })
- Expect(client.Close()).NotTo(HaveOccurred())
- })
-
- It("returns pool stats", func() {
- Expect(client.PoolStats()).To(BeAssignableToTypeOf(&redis.PoolStats{}))
- })
-
- It("removes idle connections", func() {
- stats := client.PoolStats()
- Expect(stats.TotalConns).NotTo(BeZero())
- Expect(stats.FreeConns).NotTo(BeZero())
-
- time.Sleep(2 * time.Second)
-
- stats = client.PoolStats()
- Expect(stats.TotalConns).To(BeZero())
- Expect(stats.FreeConns).To(BeZero())
- })
-
- It("returns an error when there are no attempts left", func() {
- opt := redisClusterOptions()
- opt.MaxRedirects = -1
- client := cluster.clusterClient(opt)
-
- slot := hashtag.Slot("A")
- client.SwapSlotNodes(slot)
-
- err := client.Get("A").Err()
- Expect(err).To(HaveOccurred())
- Expect(err.Error()).To(ContainSubstring("MOVED"))
-
- Expect(client.Close()).NotTo(HaveOccurred())
- })
-
- It("calls fn for every master node", func() {
- for i := 0; i < 10; i++ {
- Expect(client.Set(strconv.Itoa(i), "", 0).Err()).NotTo(HaveOccurred())
- }
-
- err := client.ForEachMaster(func(master *redis.Client) error {
- return master.FlushDB().Err()
- })
- Expect(err).NotTo(HaveOccurred())
-
- size, err := client.DBSize().Result()
- Expect(err).NotTo(HaveOccurred())
- Expect(size).To(Equal(int64(0)))
- })
-
- It("should CLUSTER SLOTS", func() {
- res, err := client.ClusterSlots().Result()
- Expect(err).NotTo(HaveOccurred())
- Expect(res).To(HaveLen(3))
-
- wanted := []redis.ClusterSlot{
- {0, 4999, []redis.ClusterNode{{"", "127.0.0.1:8220"}, {"", "127.0.0.1:8223"}}},
- {5000, 9999, []redis.ClusterNode{{"", "127.0.0.1:8221"}, {"", "127.0.0.1:8224"}}},
- {10000, 16383, []redis.ClusterNode{{"", "127.0.0.1:8222"}, {"", "127.0.0.1:8225"}}},
- }
- Expect(assertSlotsEqual(res, wanted)).NotTo(HaveOccurred())
- })
-
- It("should CLUSTER NODES", func() {
- res, err := client.ClusterNodes().Result()
- Expect(err).NotTo(HaveOccurred())
- Expect(len(res)).To(BeNumerically(">", 400))
- })
-
- It("should CLUSTER INFO", func() {
- res, err := client.ClusterInfo().Result()
- Expect(err).NotTo(HaveOccurred())
- Expect(res).To(ContainSubstring("cluster_known_nodes:6"))
- })
-
- It("should CLUSTER KEYSLOT", func() {
- hashSlot, err := client.ClusterKeySlot("somekey").Result()
- Expect(err).NotTo(HaveOccurred())
- Expect(hashSlot).To(Equal(int64(hashtag.Slot("somekey"))))
- })
-
- It("should CLUSTER COUNT-FAILURE-REPORTS", func() {
- n, err := client.ClusterCountFailureReports(cluster.nodeIds[0]).Result()
- Expect(err).NotTo(HaveOccurred())
- Expect(n).To(Equal(int64(0)))
- })
-
- It("should CLUSTER COUNTKEYSINSLOT", func() {
- n, err := client.ClusterCountKeysInSlot(10).Result()
- Expect(err).NotTo(HaveOccurred())
- Expect(n).To(Equal(int64(0)))
- })
-
- It("should CLUSTER SAVECONFIG", func() {
- res, err := client.ClusterSaveConfig().Result()
- Expect(err).NotTo(HaveOccurred())
- Expect(res).To(Equal("OK"))
- })
-
- It("should CLUSTER SLAVES", func() {
- nodesList, err := client.ClusterSlaves(cluster.nodeIds[0]).Result()
- Expect(err).NotTo(HaveOccurred())
- Expect(nodesList).Should(ContainElement(ContainSubstring("slave")))
- Expect(nodesList).Should(HaveLen(1))
- })
-
- assertClusterClient()
- })
-
- Describe("ClusterClient failover", func() {
- BeforeEach(func() {
- opt = redisClusterOptions()
- client = cluster.clusterClient(opt)
-
- _ = client.ForEachMaster(func(master *redis.Client) error {
- return master.FlushDB().Err()
- })
-
- _ = client.ForEachSlave(func(slave *redis.Client) error {
- Eventually(func() int64 {
- return client.DBSize().Val()
- }, 30*time.Second).Should(Equal(int64(0)))
- return slave.ClusterFailover().Err()
- })
- })
-
- AfterEach(func() {
- _ = client.ForEachMaster(func(master *redis.Client) error {
- return master.FlushDB().Err()
- })
- Expect(client.Close()).NotTo(HaveOccurred())
- })
-
- assertClusterClient()
- })
-
- Describe("ClusterClient with RouteByLatency", func() {
- BeforeEach(func() {
- opt = redisClusterOptions()
- opt.RouteByLatency = true
- client = cluster.clusterClient(opt)
-
- _ = client.ForEachMaster(func(master *redis.Client) error {
- return master.FlushDB().Err()
- })
-
- _ = client.ForEachSlave(func(slave *redis.Client) error {
- Eventually(func() int64 {
- return client.DBSize().Val()
- }, 30*time.Second).Should(Equal(int64(0)))
- return nil
- })
- })
-
- AfterEach(func() {
- _ = client.ForEachMaster(func(master *redis.Client) error {
- return master.FlushDB().Err()
- })
- Expect(client.Close()).NotTo(HaveOccurred())
- })
-
- assertClusterClient()
- })
-})
-
-var _ = Describe("ClusterClient without nodes", func() {
- var client *redis.ClusterClient
-
- BeforeEach(func() {
- client = redis.NewClusterClient(&redis.ClusterOptions{})
- })
-
- AfterEach(func() {
- Expect(client.Close()).NotTo(HaveOccurred())
- })
-
- It("Ping returns an error", func() {
- err := client.Ping().Err()
- Expect(err).To(MatchError("redis: cluster has no nodes"))
- })
-
- It("pipeline returns an error", func() {
- _, err := client.Pipelined(func(pipe redis.Pipeliner) error {
- pipe.Ping()
- return nil
- })
- Expect(err).To(MatchError("redis: cluster has no nodes"))
- })
-})
-
-var _ = Describe("ClusterClient without valid nodes", func() {
- var client *redis.ClusterClient
-
- BeforeEach(func() {
- client = redis.NewClusterClient(&redis.ClusterOptions{
- Addrs: []string{redisAddr},
- })
- })
-
- AfterEach(func() {
- Expect(client.Close()).NotTo(HaveOccurred())
- })
-
- It("returns an error", func() {
- err := client.Ping().Err()
- Expect(err).To(MatchError("redis: cannot load cluster slots"))
- })
-
- It("pipeline returns an error", func() {
- _, err := client.Pipelined(func(pipe redis.Pipeliner) error {
- pipe.Ping()
- return nil
- })
- Expect(err).To(MatchError("redis: cannot load cluster slots"))
- })
-})
-
-var _ = Describe("ClusterClient timeout", func() {
- var client *redis.ClusterClient
-
- AfterEach(func() {
- _ = client.Close()
- })
-
- testTimeout := func() {
- It("Ping timeouts", func() {
- err := client.Ping().Err()
- Expect(err).To(HaveOccurred())
- Expect(err.(net.Error).Timeout()).To(BeTrue())
- })
-
- It("Pipeline timeouts", func() {
- _, err := client.Pipelined(func(pipe redis.Pipeliner) error {
- pipe.Ping()
- return nil
- })
- Expect(err).To(HaveOccurred())
- Expect(err.(net.Error).Timeout()).To(BeTrue())
- })
-
- It("Tx timeouts", func() {
- err := client.Watch(func(tx *redis.Tx) error {
- return tx.Ping().Err()
- }, "foo")
- Expect(err).To(HaveOccurred())
- Expect(err.(net.Error).Timeout()).To(BeTrue())
- })
-
- It("Tx Pipeline timeouts", func() {
- err := client.Watch(func(tx *redis.Tx) error {
- _, err := tx.Pipelined(func(pipe redis.Pipeliner) error {
- pipe.Ping()
- return nil
- })
- return err
- }, "foo")
- Expect(err).To(HaveOccurred())
- Expect(err.(net.Error).Timeout()).To(BeTrue())
- })
- }
-
- const pause = time.Second
-
- Context("read/write timeout", func() {
- BeforeEach(func() {
- opt := redisClusterOptions()
- opt.ReadTimeout = 100 * time.Millisecond
- opt.WriteTimeout = 100 * time.Millisecond
- opt.MaxRedirects = 1
- client = cluster.clusterClient(opt)
-
- err := client.ForEachNode(func(client *redis.Client) error {
- return client.ClientPause(pause).Err()
- })
- Expect(err).NotTo(HaveOccurred())
- })
-
- AfterEach(func() {
- client.ForEachNode(func(client *redis.Client) error {
- Eventually(func() error {
- return client.Ping().Err()
- }, 2*pause).ShouldNot(HaveOccurred())
- return nil
- })
- })
-
- testTimeout()
- })
-})
-
-//------------------------------------------------------------------------------
-
-func BenchmarkRedisClusterPing(b *testing.B) {
- if testing.Short() {
- b.Skip("skipping in short mode")
- }
-
- cluster := &clusterScenario{
- ports: []string{"8220", "8221", "8222", "8223", "8224", "8225"},
- nodeIds: make([]string, 6),
- processes: make(map[string]*redisProcess, 6),
- clients: make(map[string]*redis.Client, 6),
- }
-
- if err := startCluster(cluster); err != nil {
- b.Fatal(err)
- }
- defer stopCluster(cluster)
-
- client := cluster.clusterClient(redisClusterOptions())
- defer client.Close()
-
- b.ResetTimer()
-
- b.RunParallel(func(pb *testing.PB) {
- for pb.Next() {
- if err := client.Ping().Err(); err != nil {
- b.Fatal(err)
- }
- }
- })
-}
-
-func BenchmarkRedisClusterSetString(b *testing.B) {
- if testing.Short() {
- b.Skip("skipping in short mode")
- }
-
- cluster := &clusterScenario{
- ports: []string{"8220", "8221", "8222", "8223", "8224", "8225"},
- nodeIds: make([]string, 6),
- processes: make(map[string]*redisProcess, 6),
- clients: make(map[string]*redis.Client, 6),
- }
-
- if err := startCluster(cluster); err != nil {
- b.Fatal(err)
- }
- defer stopCluster(cluster)
-
- client := cluster.clusterClient(redisClusterOptions())
- defer client.Close()
-
- value := string(bytes.Repeat([]byte{'1'}, 10000))
-
- b.ResetTimer()
-
- b.RunParallel(func(pb *testing.PB) {
- for pb.Next() {
- if err := client.Set("key", value, 0).Err(); err != nil {
- b.Fatal(err)
- }
- }
- })
-}
diff --git a/command.go b/command.go
index 601a2882da..d3fb231b5e 100644
--- a/command.go
+++ b/command.go
@@ -1,40 +1,108 @@
package redis
import (
- "bytes"
+ "bufio"
+ "context"
"fmt"
+ "net"
+ "regexp"
"strconv"
"strings"
+ "sync"
"time"
- "github.com/go-redis/redis/internal"
- "github.com/go-redis/redis/internal/pool"
- "github.com/go-redis/redis/internal/proto"
+ "github.com/redis/go-redis/v9/internal"
+ "github.com/redis/go-redis/v9/internal/hscan"
+ "github.com/redis/go-redis/v9/internal/proto"
+ "github.com/redis/go-redis/v9/internal/util"
)
+// keylessCommands contains Redis commands that have empty key specifications (9th slot empty)
+// Only includes core Redis commands, excludes FT.*, ts.*, timeseries.*, search.* and subcommands
+var keylessCommands = map[string]struct{}{
+ "acl": {},
+ "asking": {},
+ "auth": {},
+ "bgrewriteaof": {},
+ "bgsave": {},
+ "client": {},
+ "cluster": {},
+ "config": {},
+ "debug": {},
+ "discard": {},
+ "echo": {},
+ "exec": {},
+ "failover": {},
+ "function": {},
+ "hello": {},
+ "latency": {},
+ "lolwut": {},
+ "module": {},
+ "monitor": {},
+ "multi": {},
+ "pfselftest": {},
+ "ping": {},
+ "psubscribe": {},
+ "psync": {},
+ "publish": {},
+ "pubsub": {},
+ "punsubscribe": {},
+ "quit": {},
+ "readonly": {},
+ "readwrite": {},
+ "replconf": {},
+ "replicaof": {},
+ "role": {},
+ "save": {},
+ "script": {},
+ "select": {},
+ "shutdown": {},
+ "slaveof": {},
+ "slowlog": {},
+ "subscribe": {},
+ "swapdb": {},
+ "sync": {},
+ "unsubscribe": {},
+ "unwatch": {},
+}
+
type Cmder interface {
+ // command name.
+ // e.g. "set k v ex 10" -> "set", "cluster info" -> "cluster".
Name() string
+
+ // full command name.
+ // e.g. "set k v ex 10" -> "set", "cluster info" -> "cluster info".
+ FullName() string
+
+ // all args of the command.
+ // e.g. "set k v ex 10" -> "[set k v ex 10]".
Args() []interface{}
- stringArg(int) string
- readReply(*pool.Conn) error
- setErr(error)
+ // format request and response string.
+ // e.g. "set k v ex 10" -> "set k v ex 10: OK", "get k" -> "get k: v".
+ String() string
- readTimeout() *time.Duration
+ stringArg(int) string
+ firstKeyPos() int8
+ SetFirstKeyPos(int8)
+ readTimeout() *time.Duration
+ readReply(rd *proto.Reader) error
+ readRawReply(rd *proto.Reader) error
+ SetErr(error)
Err() error
- fmt.Stringer
}
func setCmdsErr(cmds []Cmder, e error) {
for _, cmd := range cmds {
if cmd.Err() == nil {
- cmd.setErr(e)
+ cmd.SetErr(e)
}
}
}
-func firstCmdsErr(cmds []Cmder) error {
+func cmdsFirstErr(cmds []Cmder) error {
for _, cmd := range cmds {
if err := cmd.Err(); err != nil {
return err
@@ -43,91 +111,143 @@ func firstCmdsErr(cmds []Cmder) error {
return nil
}
-func writeCmd(cn *pool.Conn, cmds ...Cmder) error {
- cn.Wb.Reset()
+func writeCmds(wr *proto.Writer, cmds []Cmder) error {
for _, cmd := range cmds {
- if err := cn.Wb.Append(cmd.Args()); err != nil {
+ if err := writeCmd(wr, cmd); err != nil {
return err
}
}
+ return nil
+}
- _, err := cn.Write(cn.Wb.Bytes())
- return err
+func writeCmd(wr *proto.Writer, cmd Cmder) error {
+ return wr.WriteArgs(cmd.Args())
}
-func cmdString(cmd Cmder, val interface{}) string {
- var ss []string
- for _, arg := range cmd.Args() {
- ss = append(ss, fmt.Sprint(arg))
- }
- s := strings.Join(ss, " ")
- if err := cmd.Err(); err != nil {
- return s + ": " + err.Error()
- }
- if val != nil {
- switch vv := val.(type) {
- case []byte:
- return s + ": " + string(vv)
- default:
- return s + ": " + fmt.Sprint(val)
- }
+// cmdFirstKeyPos returns the position of the first key in the command's arguments.
+// If the command does not have a key, it returns 0.
+// TODO: Use the data in CommandInfo to determine the first key position.
+func cmdFirstKeyPos(cmd Cmder) int {
+ if pos := cmd.firstKeyPos(); pos != 0 {
+ return int(pos)
}
- return s
-}
+ name := cmd.Name()
+
+ // first check if the command is keyless
+ if _, ok := keylessCommands[name]; ok {
+ return 0
+ }
-func cmdFirstKeyPos(cmd Cmder, info *CommandInfo) int {
- switch cmd.Name() {
- case "eval", "evalsha":
+ switch name {
+ case "eval", "evalsha", "eval_ro", "evalsha_ro":
if cmd.stringArg(2) != "0" {
return 3
- } else {
- return -1
}
+
+ return 0
case "publish":
return 1
+ case "memory":
+ // https://github.com/redis/redis/issues/7493
+ if cmd.stringArg(1) == "usage" {
+ return 2
+ }
+ }
+ return 1
+}
+
+func cmdString(cmd Cmder, val interface{}) string {
+ b := make([]byte, 0, 64)
+
+ for i, arg := range cmd.Args() {
+ if i > 0 {
+ b = append(b, ' ')
+ }
+ b = internal.AppendArg(b, arg)
}
- if info == nil {
- return -1
+
+ if err := cmd.Err(); err != nil {
+ b = append(b, ": "...)
+ b = append(b, err.Error()...)
+ } else if val != nil {
+ b = append(b, ": "...)
+ b = internal.AppendArg(b, val)
}
- return int(info.FirstKeyPos)
+
+ return util.BytesToString(b)
}
//------------------------------------------------------------------------------
type baseCmd struct {
- _args []interface{}
- err error
-
+ ctx context.Context
+ args []interface{}
+ err error
+ keyPos int8
+ rawVal interface{}
_readTimeout *time.Duration
}
var _ Cmder = (*Cmd)(nil)
-func (cmd *baseCmd) Err() error {
- return cmd.err
+func (cmd *baseCmd) Name() string {
+ if len(cmd.args) == 0 {
+ return ""
+ }
+ // Cmd name must be lower cased.
+ return internal.ToLower(cmd.stringArg(0))
+}
+
+func (cmd *baseCmd) FullName() string {
+ switch name := cmd.Name(); name {
+ case "cluster", "command":
+ if len(cmd.args) == 1 {
+ return name
+ }
+ if s2, ok := cmd.args[1].(string); ok {
+ return name + " " + s2
+ }
+ return name
+ default:
+ return name
+ }
}
func (cmd *baseCmd) Args() []interface{} {
- return cmd._args
+ return cmd.args
}
func (cmd *baseCmd) stringArg(pos int) string {
- if pos < 0 || pos >= len(cmd._args) {
+ if pos < 0 || pos >= len(cmd.args) {
return ""
}
- s, _ := cmd._args[pos].(string)
- return s
+ arg := cmd.args[pos]
+ switch v := arg.(type) {
+ case string:
+ return v
+ case []byte:
+ return string(v)
+ default:
+ // TODO: consider using appendArg
+ return fmt.Sprint(v)
+ }
}
-func (cmd *baseCmd) Name() string {
- if len(cmd._args) > 0 {
- // Cmd name must be lower cased.
- s := internal.ToLower(cmd.stringArg(0))
- cmd._args[0] = s
- return s
- }
- return ""
+func (cmd *baseCmd) firstKeyPos() int8 {
+ return cmd.keyPos
+}
+
+func (cmd *baseCmd) SetFirstKeyPos(keyPos int8) {
+ cmd.keyPos = keyPos
+}
+
+func (cmd *baseCmd) SetErr(e error) {
+ cmd.err = e
+}
+
+func (cmd *baseCmd) Err() error {
+ return cmd.err
}
func (cmd *baseCmd) readTimeout() *time.Duration {
@@ -138,8 +258,9 @@ func (cmd *baseCmd) setReadTimeout(d time.Duration) {
cmd._readTimeout = &d
}
-func (cmd *baseCmd) setErr(e error) {
- cmd.err = e
+func (cmd *baseCmd) readRawReply(rd *proto.Reader) (err error) {
+ cmd.rawVal, err = rd.ReadReply()
+ return err
}
//------------------------------------------------------------------------------
@@ -150,12 +271,23 @@ type Cmd struct {
val interface{}
}
-func NewCmd(args ...interface{}) *Cmd {
+func NewCmd(ctx context.Context, args ...interface{}) *Cmd {
return &Cmd{
- baseCmd: baseCmd{_args: args},
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
}
}
+func (cmd *Cmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *Cmd) SetVal(val interface{}) {
+ cmd.val = val
+}
+
func (cmd *Cmd) Val() interface{} {
return cmd.val
}
@@ -164,20 +296,256 @@ func (cmd *Cmd) Result() (interface{}, error) {
return cmd.val, cmd.err
}
-func (cmd *Cmd) String() string {
- return cmdString(cmd, cmd.val)
+func (cmd *Cmd) Text() (string, error) {
+ if cmd.err != nil {
+ return "", cmd.err
+ }
+ return toString(cmd.val)
+}
+
+func toString(val interface{}) (string, error) {
+ switch val := val.(type) {
+ case string:
+ return val, nil
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for String", val)
+ return "", err
+ }
}
-func (cmd *Cmd) readReply(cn *pool.Conn) error {
- cmd.val, cmd.err = cn.Rd.ReadReply(sliceParser)
+func (cmd *Cmd) Int() (int, error) {
if cmd.err != nil {
- return cmd.err
+ return 0, cmd.err
}
- if b, ok := cmd.val.([]byte); ok {
- // Bytes must be copied, because underlying memory is reused.
- cmd.val = string(b)
+ switch val := cmd.val.(type) {
+ case int64:
+ return int(val), nil
+ case string:
+ return strconv.Atoi(val)
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for Int", val)
+ return 0, err
+ }
+}
+
+func (cmd *Cmd) Int64() (int64, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ return toInt64(cmd.val)
+}
+
+func toInt64(val interface{}) (int64, error) {
+ switch val := val.(type) {
+ case int64:
+ return val, nil
+ case string:
+ return strconv.ParseInt(val, 10, 64)
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for Int64", val)
+ return 0, err
+ }
+}
+
+func (cmd *Cmd) Uint64() (uint64, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ return toUint64(cmd.val)
+}
+
+func toUint64(val interface{}) (uint64, error) {
+ switch val := val.(type) {
+ case int64:
+ return uint64(val), nil
+ case string:
+ return strconv.ParseUint(val, 10, 64)
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for Uint64", val)
+ return 0, err
+ }
+}
+
+func (cmd *Cmd) Float32() (float32, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ return toFloat32(cmd.val)
+}
+
+func toFloat32(val interface{}) (float32, error) {
+ switch val := val.(type) {
+ case int64:
+ return float32(val), nil
+ case string:
+ f, err := strconv.ParseFloat(val, 32)
+ if err != nil {
+ return 0, err
+ }
+ return float32(f), nil
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for Float32", val)
+ return 0, err
+ }
+}
+
+func (cmd *Cmd) Float64() (float64, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ return toFloat64(cmd.val)
+}
+
+func toFloat64(val interface{}) (float64, error) {
+ switch val := val.(type) {
+ case int64:
+ return float64(val), nil
+ case string:
+ return strconv.ParseFloat(val, 64)
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for Float64", val)
+ return 0, err
+ }
+}
+
+func (cmd *Cmd) Bool() (bool, error) {
+ if cmd.err != nil {
+ return false, cmd.err
+ }
+ return toBool(cmd.val)
+}
+
+func toBool(val interface{}) (bool, error) {
+ switch val := val.(type) {
+ case bool:
+ return val, nil
+ case int64:
+ return val != 0, nil
+ case string:
+ return strconv.ParseBool(val)
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for Bool", val)
+ return false, err
}
- return nil
+}
+
+func (cmd *Cmd) Slice() ([]interface{}, error) {
+ if cmd.err != nil {
+ return nil, cmd.err
+ }
+ switch val := cmd.val.(type) {
+ case []interface{}:
+ return val, nil
+ default:
+ return nil, fmt.Errorf("redis: unexpected type=%T for Slice", val)
+ }
+}
+
+func (cmd *Cmd) StringSlice() ([]string, error) {
+ slice, err := cmd.Slice()
+ if err != nil {
+ return nil, err
+ }
+
+ ss := make([]string, len(slice))
+ for i, iface := range slice {
+ val, err := toString(iface)
+ if err != nil {
+ return nil, err
+ }
+ ss[i] = val
+ }
+ return ss, nil
+}
+
+func (cmd *Cmd) Int64Slice() ([]int64, error) {
+ slice, err := cmd.Slice()
+ if err != nil {
+ return nil, err
+ }
+
+ nums := make([]int64, len(slice))
+ for i, iface := range slice {
+ val, err := toInt64(iface)
+ if err != nil {
+ return nil, err
+ }
+ nums[i] = val
+ }
+ return nums, nil
+}
+
+func (cmd *Cmd) Uint64Slice() ([]uint64, error) {
+ slice, err := cmd.Slice()
+ if err != nil {
+ return nil, err
+ }
+
+ nums := make([]uint64, len(slice))
+ for i, iface := range slice {
+ val, err := toUint64(iface)
+ if err != nil {
+ return nil, err
+ }
+ nums[i] = val
+ }
+ return nums, nil
+}
+
+func (cmd *Cmd) Float32Slice() ([]float32, error) {
+ slice, err := cmd.Slice()
+ if err != nil {
+ return nil, err
+ }
+
+ floats := make([]float32, len(slice))
+ for i, iface := range slice {
+ val, err := toFloat32(iface)
+ if err != nil {
+ return nil, err
+ }
+ floats[i] = val
+ }
+ return floats, nil
+}
+
+func (cmd *Cmd) Float64Slice() ([]float64, error) {
+ slice, err := cmd.Slice()
+ if err != nil {
+ return nil, err
+ }
+
+ floats := make([]float64, len(slice))
+ for i, iface := range slice {
+ val, err := toFloat64(iface)
+ if err != nil {
+ return nil, err
+ }
+ floats[i] = val
+ }
+ return floats, nil
+}
+
+func (cmd *Cmd) BoolSlice() ([]bool, error) {
+ slice, err := cmd.Slice()
+ if err != nil {
+ return nil, err
+ }
+
+ bools := make([]bool, len(slice))
+ for i, iface := range slice {
+ val, err := toBool(iface)
+ if err != nil {
+ return nil, err
+ }
+ bools[i] = val
+ }
+ return bools, nil
+}
+
+func (cmd *Cmd) readReply(rd *proto.Reader) (err error) {
+ cmd.val, err = rd.ReadReply()
+ return err
}
//------------------------------------------------------------------------------
@@ -190,12 +558,19 @@ type SliceCmd struct {
var _ Cmder = (*SliceCmd)(nil)
-func NewSliceCmd(args ...interface{}) *SliceCmd {
+func NewSliceCmd(ctx context.Context, args ...interface{}) *SliceCmd {
return &SliceCmd{
- baseCmd: baseCmd{_args: args},
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
}
}
+func (cmd *SliceCmd) SetVal(val []interface{}) {
+ cmd.val = val
+}
+
func (cmd *SliceCmd) Val() []interface{} {
return cmd.val
}
@@ -208,14 +583,29 @@ func (cmd *SliceCmd) String() string {
return cmdString(cmd, cmd.val)
}
-func (cmd *SliceCmd) readReply(cn *pool.Conn) error {
- var v interface{}
- v, cmd.err = cn.Rd.ReadArrayReply(sliceParser)
+// Scan scans the results from the map into a destination struct. The map keys
+// are matched in the Redis struct fields by the `redis:"field"` tag.
+func (cmd *SliceCmd) Scan(dst interface{}) error {
if cmd.err != nil {
return cmd.err
}
- cmd.val = v.([]interface{})
- return nil
+
+ // Pass the list of keys and values.
+ // Skip the first two args for: HMGET key
+ var args []interface{}
+ if cmd.args[0] == "hmget" {
+ args = cmd.args[2:]
+ } else {
+ // Otherwise, it's: MGET field field ...
+ args = cmd.args[1:]
+ }
+
+ return hscan.Scan(dst, args, cmd.val)
+}
+
+func (cmd *SliceCmd) readReply(rd *proto.Reader) (err error) {
+ cmd.val, err = rd.ReadSlice()
+ return err
}
//------------------------------------------------------------------------------
@@ -228,12 +618,19 @@ type StatusCmd struct {
var _ Cmder = (*StatusCmd)(nil)
-func NewStatusCmd(args ...interface{}) *StatusCmd {
+func NewStatusCmd(ctx context.Context, args ...interface{}) *StatusCmd {
return &StatusCmd{
- baseCmd: baseCmd{_args: args},
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
}
}
+func (cmd *StatusCmd) SetVal(val string) {
+ cmd.val = val
+}
+
func (cmd *StatusCmd) Val() string {
return cmd.val
}
@@ -242,13 +639,17 @@ func (cmd *StatusCmd) Result() (string, error) {
return cmd.val, cmd.err
}
+func (cmd *StatusCmd) Bytes() ([]byte, error) {
+ return util.StringToBytes(cmd.val), cmd.err
+}
+
func (cmd *StatusCmd) String() string {
return cmdString(cmd, cmd.val)
}
-func (cmd *StatusCmd) readReply(cn *pool.Conn) error {
- cmd.val, cmd.err = cn.Rd.ReadStringReply()
- return cmd.err
+func (cmd *StatusCmd) readReply(rd *proto.Reader) (err error) {
+ cmd.val, err = rd.ReadString()
+ return err
}
//------------------------------------------------------------------------------
@@ -261,12 +662,19 @@ type IntCmd struct {
var _ Cmder = (*IntCmd)(nil)
-func NewIntCmd(args ...interface{}) *IntCmd {
+func NewIntCmd(ctx context.Context, args ...interface{}) *IntCmd {
return &IntCmd{
- baseCmd: baseCmd{_args: args},
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
}
}
+func (cmd *IntCmd) SetVal(val int64) {
+ cmd.val = val
+}
+
func (cmd *IntCmd) Val() int64 {
return cmd.val
}
@@ -275,13 +683,66 @@ func (cmd *IntCmd) Result() (int64, error) {
return cmd.val, cmd.err
}
+func (cmd *IntCmd) Uint64() (uint64, error) {
+ return uint64(cmd.val), cmd.err
+}
+
func (cmd *IntCmd) String() string {
return cmdString(cmd, cmd.val)
}
-func (cmd *IntCmd) readReply(cn *pool.Conn) error {
- cmd.val, cmd.err = cn.Rd.ReadIntReply()
- return cmd.err
+func (cmd *IntCmd) readReply(rd *proto.Reader) (err error) {
+ cmd.val, err = rd.ReadInt()
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type IntSliceCmd struct {
+ baseCmd
+
+ val []int64
+}
+
+var _ Cmder = (*IntSliceCmd)(nil)
+
+func NewIntSliceCmd(ctx context.Context, args ...interface{}) *IntSliceCmd {
+ return &IntSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *IntSliceCmd) SetVal(val []int64) {
+ cmd.val = val
+}
+
+func (cmd *IntSliceCmd) Val() []int64 {
+ return cmd.val
+}
+
+func (cmd *IntSliceCmd) Result() ([]int64, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *IntSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *IntSliceCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]int64, n)
+ for i := 0; i < len(cmd.val); i++ {
+ if cmd.val[i], err = rd.ReadInt(); err != nil {
+ return err
+ }
+ }
+ return nil
}
//------------------------------------------------------------------------------
@@ -295,13 +756,20 @@ type DurationCmd struct {
var _ Cmder = (*DurationCmd)(nil)
-func NewDurationCmd(precision time.Duration, args ...interface{}) *DurationCmd {
+func NewDurationCmd(ctx context.Context, precision time.Duration, args ...interface{}) *DurationCmd {
return &DurationCmd{
- baseCmd: baseCmd{_args: args},
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
precision: precision,
}
}
+func (cmd *DurationCmd) SetVal(val time.Duration) {
+ cmd.val = val
+}
+
func (cmd *DurationCmd) Val() time.Duration {
return cmd.val
}
@@ -314,13 +782,19 @@ func (cmd *DurationCmd) String() string {
return cmdString(cmd, cmd.val)
}
-func (cmd *DurationCmd) readReply(cn *pool.Conn) error {
- var n int64
- n, cmd.err = cn.Rd.ReadIntReply()
- if cmd.err != nil {
- return cmd.err
+func (cmd *DurationCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ switch n {
+ // -2 if the key does not exist
+ // -1 if the key exists but has no associated expire
+ case -2, -1:
+ cmd.val = time.Duration(n)
+ default:
+ cmd.val = time.Duration(n) * cmd.precision
}
- cmd.val = time.Duration(n) * cmd.precision
return nil
}
@@ -334,12 +808,19 @@ type TimeCmd struct {
var _ Cmder = (*TimeCmd)(nil)
-func NewTimeCmd(args ...interface{}) *TimeCmd {
+func NewTimeCmd(ctx context.Context, args ...interface{}) *TimeCmd {
return &TimeCmd{
- baseCmd: baseCmd{_args: args},
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
}
}
+func (cmd *TimeCmd) SetVal(val time.Time) {
+ cmd.val = val
+}
+
func (cmd *TimeCmd) Val() time.Time {
return cmd.val
}
@@ -352,13 +833,19 @@ func (cmd *TimeCmd) String() string {
return cmdString(cmd, cmd.val)
}
-func (cmd *TimeCmd) readReply(cn *pool.Conn) error {
- var v interface{}
- v, cmd.err = cn.Rd.ReadArrayReply(timeParser)
- if cmd.err != nil {
- return cmd.err
+func (cmd *TimeCmd) readReply(rd *proto.Reader) error {
+ if err := rd.ReadFixedArrayLen(2); err != nil {
+ return err
+ }
+ second, err := rd.ReadInt()
+ if err != nil {
+ return err
}
- cmd.val = v.(time.Time)
+ microsecond, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ cmd.val = time.Unix(second, microsecond*1000)
return nil
}
@@ -372,12 +859,19 @@ type BoolCmd struct {
var _ Cmder = (*BoolCmd)(nil)
-func NewBoolCmd(args ...interface{}) *BoolCmd {
+func NewBoolCmd(ctx context.Context, args ...interface{}) *BoolCmd {
return &BoolCmd{
- baseCmd: baseCmd{_args: args},
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
}
}
+func (cmd *BoolCmd) SetVal(val bool) {
+ cmd.val = val
+}
+
func (cmd *BoolCmd) Val() bool {
return cmd.val
}
@@ -390,33 +884,16 @@ func (cmd *BoolCmd) String() string {
return cmdString(cmd, cmd.val)
}
-var ok = []byte("OK")
+func (cmd *BoolCmd) readReply(rd *proto.Reader) (err error) {
+ cmd.val, err = rd.ReadBool()
-func (cmd *BoolCmd) readReply(cn *pool.Conn) error {
- var v interface{}
- v, cmd.err = cn.Rd.ReadReply(nil)
// `SET key value NX` returns nil when key already exists. But
// `SETNX key value` returns bool (0/1). So convert nil to bool.
- // TODO: is this okay?
- if cmd.err == Nil {
+ if err == Nil {
cmd.val = false
- cmd.err = nil
- return nil
- }
- if cmd.err != nil {
- return cmd.err
- }
- switch v := v.(type) {
- case int64:
- cmd.val = v == 1
- return nil
- case []byte:
- cmd.val = bytes.Equal(v, ok)
- return nil
- default:
- cmd.err = fmt.Errorf("got %T, wanted int64 or string", v)
- return cmd.err
+ err = nil
}
+ return err
}
//------------------------------------------------------------------------------
@@ -424,30 +901,51 @@ func (cmd *BoolCmd) readReply(cn *pool.Conn) error {
type StringCmd struct {
baseCmd
- val []byte
+ val string
}
var _ Cmder = (*StringCmd)(nil)
-func NewStringCmd(args ...interface{}) *StringCmd {
+func NewStringCmd(ctx context.Context, args ...interface{}) *StringCmd {
return &StringCmd{
- baseCmd: baseCmd{_args: args},
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
}
}
+func (cmd *StringCmd) SetVal(val string) {
+ cmd.val = val
+}
+
func (cmd *StringCmd) Val() string {
- return internal.BytesToString(cmd.val)
+ return cmd.val
}
func (cmd *StringCmd) Result() (string, error) {
- return cmd.Val(), cmd.err
+ return cmd.val, cmd.err
}
func (cmd *StringCmd) Bytes() ([]byte, error) {
- return cmd.val, cmd.err
+ return util.StringToBytes(cmd.val), cmd.err
}
-func (cmd *StringCmd) Int64() (int64, error) {
+func (cmd *StringCmd) Bool() (bool, error) {
+ if cmd.err != nil {
+ return false, cmd.err
+ }
+ return strconv.ParseBool(cmd.val)
+}
+
+func (cmd *StringCmd) Int() (int, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ return strconv.Atoi(cmd.Val())
+}
+
+func (cmd *StringCmd) Int64() (int64, error) {
if cmd.err != nil {
return 0, cmd.err
}
@@ -461,6 +959,17 @@ func (cmd *StringCmd) Uint64() (uint64, error) {
return strconv.ParseUint(cmd.Val(), 10, 64)
}
+func (cmd *StringCmd) Float32() (float32, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ f, err := strconv.ParseFloat(cmd.Val(), 32)
+ if err != nil {
+ return 0, err
+ }
+ return float32(f), nil
+}
+
func (cmd *StringCmd) Float64() (float64, error) {
if cmd.err != nil {
return 0, cmd.err
@@ -468,20 +977,27 @@ func (cmd *StringCmd) Float64() (float64, error) {
return strconv.ParseFloat(cmd.Val(), 64)
}
+func (cmd *StringCmd) Time() (time.Time, error) {
+ if cmd.err != nil {
+ return time.Time{}, cmd.err
+ }
+ return time.Parse(time.RFC3339Nano, cmd.Val())
+}
+
func (cmd *StringCmd) Scan(val interface{}) error {
if cmd.err != nil {
return cmd.err
}
- return proto.Scan(cmd.val, val)
+ return proto.Scan([]byte(cmd.val), val)
}
func (cmd *StringCmd) String() string {
return cmdString(cmd, cmd.val)
}
-func (cmd *StringCmd) readReply(cn *pool.Conn) error {
- cmd.val, cmd.err = cn.Rd.ReadBytesReply()
- return cmd.err
+func (cmd *StringCmd) readReply(rd *proto.Reader) (err error) {
+ cmd.val, err = rd.ReadString()
+ return err
}
//------------------------------------------------------------------------------
@@ -494,27 +1010,89 @@ type FloatCmd struct {
var _ Cmder = (*FloatCmd)(nil)
-func NewFloatCmd(args ...interface{}) *FloatCmd {
+func NewFloatCmd(ctx context.Context, args ...interface{}) *FloatCmd {
return &FloatCmd{
- baseCmd: baseCmd{_args: args},
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
}
}
+func (cmd *FloatCmd) SetVal(val float64) {
+ cmd.val = val
+}
+
func (cmd *FloatCmd) Val() float64 {
return cmd.val
}
func (cmd *FloatCmd) Result() (float64, error) {
- return cmd.Val(), cmd.Err()
+ return cmd.val, cmd.err
}
func (cmd *FloatCmd) String() string {
return cmdString(cmd, cmd.val)
}
-func (cmd *FloatCmd) readReply(cn *pool.Conn) error {
- cmd.val, cmd.err = cn.Rd.ReadFloatReply()
- return cmd.err
+func (cmd *FloatCmd) readReply(rd *proto.Reader) (err error) {
+ cmd.val, err = rd.ReadFloat()
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type FloatSliceCmd struct {
+ baseCmd
+
+ val []float64
+}
+
+var _ Cmder = (*FloatSliceCmd)(nil)
+
+func NewFloatSliceCmd(ctx context.Context, args ...interface{}) *FloatSliceCmd {
+ return &FloatSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *FloatSliceCmd) SetVal(val []float64) {
+ cmd.val = val
+}
+
+func (cmd *FloatSliceCmd) Val() []float64 {
+ return cmd.val
+}
+
+func (cmd *FloatSliceCmd) Result() ([]float64, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *FloatSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *FloatSliceCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = make([]float64, n)
+ for i := 0; i < len(cmd.val); i++ {
+ switch num, err := rd.ReadFloat(); {
+ case err == Nil:
+ cmd.val[i] = 0
+ case err != nil:
+ return err
+ default:
+ cmd.val[i] = num
+ }
+ }
+ return nil
}
//------------------------------------------------------------------------------
@@ -527,18 +1105,25 @@ type StringSliceCmd struct {
var _ Cmder = (*StringSliceCmd)(nil)
-func NewStringSliceCmd(args ...interface{}) *StringSliceCmd {
+func NewStringSliceCmd(ctx context.Context, args ...interface{}) *StringSliceCmd {
return &StringSliceCmd{
- baseCmd: baseCmd{_args: args},
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
}
}
+func (cmd *StringSliceCmd) SetVal(val []string) {
+ cmd.val = val
+}
+
func (cmd *StringSliceCmd) Val() []string {
return cmd.val
}
func (cmd *StringSliceCmd) Result() ([]string, error) {
- return cmd.Val(), cmd.Err()
+ return cmd.val, cmd.err
}
func (cmd *StringSliceCmd) String() string {
@@ -549,13 +1134,116 @@ func (cmd *StringSliceCmd) ScanSlice(container interface{}) error {
return proto.ScanSlice(cmd.Val(), container)
}
-func (cmd *StringSliceCmd) readReply(cn *pool.Conn) error {
- var v interface{}
- v, cmd.err = cn.Rd.ReadArrayReply(stringSliceParser)
- if cmd.err != nil {
- return cmd.err
+func (cmd *StringSliceCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]string, n)
+ for i := 0; i < len(cmd.val); i++ {
+ switch s, err := rd.ReadString(); {
+ case err == Nil:
+ cmd.val[i] = ""
+ case err != nil:
+ return err
+ default:
+ cmd.val[i] = s
+ }
+ }
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type KeyValue struct {
+ Key string
+ Value string
+}
+
+type KeyValueSliceCmd struct {
+ baseCmd
+
+ val []KeyValue
+}
+
+var _ Cmder = (*KeyValueSliceCmd)(nil)
+
+func NewKeyValueSliceCmd(ctx context.Context, args ...interface{}) *KeyValueSliceCmd {
+ return &KeyValueSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *KeyValueSliceCmd) SetVal(val []KeyValue) {
+ cmd.val = val
+}
+
+func (cmd *KeyValueSliceCmd) Val() []KeyValue {
+ return cmd.val
+}
+
+func (cmd *KeyValueSliceCmd) Result() ([]KeyValue, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *KeyValueSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+// Many commands will respond to two formats:
+// 1. 1) "one"
+// 2. (double) 1
+// 2. 1) "two"
+// 2. (double) 2
+//
+// OR:
+// 1. "two"
+// 2. (double) 2
+// 3. "one"
+// 4. (double) 1
+func (cmd *KeyValueSliceCmd) readReply(rd *proto.Reader) error { // nolint:dupl
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ // If the n is 0, can't continue reading.
+ if n == 0 {
+ cmd.val = make([]KeyValue, 0)
+ return nil
}
- cmd.val = v.([]string)
+
+ typ, err := rd.PeekReplyType()
+ if err != nil {
+ return err
+ }
+ array := typ == proto.RespArray
+
+ if array {
+ cmd.val = make([]KeyValue, n)
+ } else {
+ cmd.val = make([]KeyValue, n/2)
+ }
+
+ for i := 0; i < len(cmd.val); i++ {
+ if array {
+ if err = rd.ReadFixedArrayLen(2); err != nil {
+ return err
+ }
+ }
+
+ if cmd.val[i].Key, err = rd.ReadString(); err != nil {
+ return err
+ }
+
+ if cmd.val[i].Value, err = rd.ReadString(); err != nil {
+ return err
+ }
+ }
+
return nil
}
@@ -569,12 +1257,19 @@ type BoolSliceCmd struct {
var _ Cmder = (*BoolSliceCmd)(nil)
-func NewBoolSliceCmd(args ...interface{}) *BoolSliceCmd {
+func NewBoolSliceCmd(ctx context.Context, args ...interface{}) *BoolSliceCmd {
return &BoolSliceCmd{
- baseCmd: baseCmd{_args: args},
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
}
}
+func (cmd *BoolSliceCmd) SetVal(val []bool) {
+ cmd.val = val
+}
+
func (cmd *BoolSliceCmd) Val() []bool {
return cmd.val
}
@@ -587,89 +1282,249 @@ func (cmd *BoolSliceCmd) String() string {
return cmdString(cmd, cmd.val)
}
-func (cmd *BoolSliceCmd) readReply(cn *pool.Conn) error {
- var v interface{}
- v, cmd.err = cn.Rd.ReadArrayReply(boolSliceParser)
- if cmd.err != nil {
- return cmd.err
+func (cmd *BoolSliceCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]bool, n)
+ for i := 0; i < len(cmd.val); i++ {
+ if cmd.val[i], err = rd.ReadBool(); err != nil {
+ return err
+ }
}
- cmd.val = v.([]bool)
return nil
}
//------------------------------------------------------------------------------
-type StringStringMapCmd struct {
+type MapStringStringCmd struct {
baseCmd
val map[string]string
}
-var _ Cmder = (*StringStringMapCmd)(nil)
+var _ Cmder = (*MapStringStringCmd)(nil)
-func NewStringStringMapCmd(args ...interface{}) *StringStringMapCmd {
- return &StringStringMapCmd{
- baseCmd: baseCmd{_args: args},
+func NewMapStringStringCmd(ctx context.Context, args ...interface{}) *MapStringStringCmd {
+ return &MapStringStringCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
}
}
-func (cmd *StringStringMapCmd) Val() map[string]string {
+func (cmd *MapStringStringCmd) Val() map[string]string {
return cmd.val
}
-func (cmd *StringStringMapCmd) Result() (map[string]string, error) {
+func (cmd *MapStringStringCmd) SetVal(val map[string]string) {
+ cmd.val = val
+}
+
+func (cmd *MapStringStringCmd) Result() (map[string]string, error) {
return cmd.val, cmd.err
}
-func (cmd *StringStringMapCmd) String() string {
+func (cmd *MapStringStringCmd) String() string {
return cmdString(cmd, cmd.val)
}
-func (cmd *StringStringMapCmd) readReply(cn *pool.Conn) error {
- var v interface{}
- v, cmd.err = cn.Rd.ReadArrayReply(stringStringMapParser)
+// Scan scans the results from the map into a destination struct. The map keys
+// are matched in the Redis struct fields by the `redis:"field"` tag.
+func (cmd *MapStringStringCmd) Scan(dest interface{}) error {
if cmd.err != nil {
return cmd.err
}
- cmd.val = v.(map[string]string)
+
+ strct, err := hscan.Struct(dest)
+ if err != nil {
+ return err
+ }
+
+ for k, v := range cmd.val {
+ if err := strct.Scan(k, v); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (cmd *MapStringStringCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = make(map[string]string, n)
+ for i := 0; i < n; i++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ value, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ cmd.val[key] = value
+ }
return nil
}
//------------------------------------------------------------------------------
-type StringIntMapCmd struct {
+type MapStringIntCmd struct {
baseCmd
val map[string]int64
}
-var _ Cmder = (*StringIntMapCmd)(nil)
+var _ Cmder = (*MapStringIntCmd)(nil)
-func NewStringIntMapCmd(args ...interface{}) *StringIntMapCmd {
- return &StringIntMapCmd{
- baseCmd: baseCmd{_args: args},
+func NewMapStringIntCmd(ctx context.Context, args ...interface{}) *MapStringIntCmd {
+ return &MapStringIntCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
}
}
-func (cmd *StringIntMapCmd) Val() map[string]int64 {
+func (cmd *MapStringIntCmd) SetVal(val map[string]int64) {
+ cmd.val = val
+}
+
+func (cmd *MapStringIntCmd) Val() map[string]int64 {
return cmd.val
}
-func (cmd *StringIntMapCmd) Result() (map[string]int64, error) {
+func (cmd *MapStringIntCmd) Result() (map[string]int64, error) {
return cmd.val, cmd.err
}
-func (cmd *StringIntMapCmd) String() string {
+func (cmd *MapStringIntCmd) String() string {
return cmdString(cmd, cmd.val)
}
-func (cmd *StringIntMapCmd) readReply(cn *pool.Conn) error {
- var v interface{}
- v, cmd.err = cn.Rd.ReadArrayReply(stringIntMapParser)
- if cmd.err != nil {
- return cmd.err
+func (cmd *MapStringIntCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = make(map[string]int64, n)
+ for i := 0; i < n; i++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ nn, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ cmd.val[key] = nn
+ }
+ return nil
+}
+
+// ------------------------------------------------------------------------------
+type MapStringSliceInterfaceCmd struct {
+ baseCmd
+ val map[string][]interface{}
+}
+
+func NewMapStringSliceInterfaceCmd(ctx context.Context, args ...interface{}) *MapStringSliceInterfaceCmd {
+ return &MapStringSliceInterfaceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *MapStringSliceInterfaceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *MapStringSliceInterfaceCmd) SetVal(val map[string][]interface{}) {
+ cmd.val = val
+}
+
+func (cmd *MapStringSliceInterfaceCmd) Result() (map[string][]interface{}, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *MapStringSliceInterfaceCmd) Val() map[string][]interface{} {
+ return cmd.val
+}
+
+func (cmd *MapStringSliceInterfaceCmd) readReply(rd *proto.Reader) (err error) {
+ readType, err := rd.PeekReplyType()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = make(map[string][]interface{})
+
+ switch readType {
+ case proto.RespMap:
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+ for i := 0; i < n; i++ {
+ k, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ nn, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val[k] = make([]interface{}, nn)
+ for j := 0; j < nn; j++ {
+ value, err := rd.ReadReply()
+ if err != nil {
+ return err
+ }
+ cmd.val[k][j] = value
+ }
+ }
+ case proto.RespArray:
+ // RESP2 response
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ for i := 0; i < n; i++ {
+ // Each entry in this array is itself an array with key details
+ itemLen, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ key, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ cmd.val[key] = make([]interface{}, 0, itemLen-1)
+ for j := 1; j < itemLen; j++ {
+ // Read the inner array for timestamp-value pairs
+ data, err := rd.ReadReply()
+ if err != nil {
+ return err
+ }
+ cmd.val[key] = append(cmd.val[key], data)
+ }
+ }
}
- cmd.val = v.(map[string]int64)
+
return nil
}
@@ -683,12 +1538,19 @@ type StringStructMapCmd struct {
var _ Cmder = (*StringStructMapCmd)(nil)
-func NewStringStructMapCmd(args ...interface{}) *StringStructMapCmd {
+func NewStringStructMapCmd(ctx context.Context, args ...interface{}) *StringStructMapCmd {
return &StringStructMapCmd{
- baseCmd: baseCmd{_args: args},
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
}
}
+func (cmd *StringStructMapCmd) SetVal(val map[string]struct{}) {
+ cmd.val = val
+}
+
func (cmd *StringStructMapCmd) Val() map[string]struct{} {
return cmd.val
}
@@ -701,324 +1563,4183 @@ func (cmd *StringStructMapCmd) String() string {
return cmdString(cmd, cmd.val)
}
-func (cmd *StringStructMapCmd) readReply(cn *pool.Conn) error {
- var v interface{}
- v, cmd.err = cn.Rd.ReadArrayReply(stringStructMapParser)
- if cmd.err != nil {
- return cmd.err
+func (cmd *StringStructMapCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = make(map[string]struct{}, n)
+ for i := 0; i < n; i++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ cmd.val[key] = struct{}{}
}
- cmd.val = v.(map[string]struct{})
return nil
}
//------------------------------------------------------------------------------
-type ZSliceCmd struct {
+type XMessage struct {
+ ID string
+ Values map[string]interface{}
+}
+
+type XMessageSliceCmd struct {
baseCmd
- val []Z
+ val []XMessage
}
-var _ Cmder = (*ZSliceCmd)(nil)
+var _ Cmder = (*XMessageSliceCmd)(nil)
-func NewZSliceCmd(args ...interface{}) *ZSliceCmd {
- return &ZSliceCmd{
- baseCmd: baseCmd{_args: args},
+func NewXMessageSliceCmd(ctx context.Context, args ...interface{}) *XMessageSliceCmd {
+ return &XMessageSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
}
}
-func (cmd *ZSliceCmd) Val() []Z {
+func (cmd *XMessageSliceCmd) SetVal(val []XMessage) {
+ cmd.val = val
+}
+
+func (cmd *XMessageSliceCmd) Val() []XMessage {
return cmd.val
}
-func (cmd *ZSliceCmd) Result() ([]Z, error) {
+func (cmd *XMessageSliceCmd) Result() ([]XMessage, error) {
return cmd.val, cmd.err
}
-func (cmd *ZSliceCmd) String() string {
+func (cmd *XMessageSliceCmd) String() string {
return cmdString(cmd, cmd.val)
}
-func (cmd *ZSliceCmd) readReply(cn *pool.Conn) error {
- var v interface{}
- v, cmd.err = cn.Rd.ReadArrayReply(zSliceParser)
- if cmd.err != nil {
- return cmd.err
- }
- cmd.val = v.([]Z)
- return nil
+func (cmd *XMessageSliceCmd) readReply(rd *proto.Reader) (err error) {
+ cmd.val, err = readXMessageSlice(rd)
+ return err
}
-//------------------------------------------------------------------------------
-
-type ScanCmd struct {
- baseCmd
-
- page []string
- cursor uint64
+func readXMessageSlice(rd *proto.Reader) ([]XMessage, error) {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
- process func(cmd Cmder) error
+ msgs := make([]XMessage, n)
+ for i := 0; i < len(msgs); i++ {
+ if msgs[i], err = readXMessage(rd); err != nil {
+ return nil, err
+ }
+ }
+ return msgs, nil
}
-var _ Cmder = (*ScanCmd)(nil)
+func readXMessage(rd *proto.Reader) (XMessage, error) {
+ if err := rd.ReadFixedArrayLen(2); err != nil {
+ return XMessage{}, err
+ }
-func NewScanCmd(process func(cmd Cmder) error, args ...interface{}) *ScanCmd {
- return &ScanCmd{
- baseCmd: baseCmd{_args: args},
- process: process,
+ id, err := rd.ReadString()
+ if err != nil {
+ return XMessage{}, err
}
-}
-func (cmd *ScanCmd) Val() (keys []string, cursor uint64) {
- return cmd.page, cmd.cursor
-}
+ v, err := stringInterfaceMapParser(rd)
+ if err != nil {
+ if err != proto.Nil {
+ return XMessage{}, err
+ }
+ }
-func (cmd *ScanCmd) Result() (keys []string, cursor uint64, err error) {
- return cmd.page, cmd.cursor, cmd.err
+ return XMessage{
+ ID: id,
+ Values: v,
+ }, nil
}
-func (cmd *ScanCmd) String() string {
- return cmdString(cmd, cmd.page)
-}
+func stringInterfaceMapParser(rd *proto.Reader) (map[string]interface{}, error) {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return nil, err
+ }
-func (cmd *ScanCmd) readReply(cn *pool.Conn) error {
- cmd.page, cmd.cursor, cmd.err = cn.Rd.ReadScanReply()
- return cmd.err
-}
+ m := make(map[string]interface{}, n)
+ for i := 0; i < n; i++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
-// Iterator creates a new ScanIterator.
-func (cmd *ScanCmd) Iterator() *ScanIterator {
- return &ScanIterator{
- cmd: cmd,
+ value, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ m[key] = value
}
+ return m, nil
}
//------------------------------------------------------------------------------
-type ClusterNode struct {
- Id string
- Addr string
-}
-
-type ClusterSlot struct {
- Start int
- End int
- Nodes []ClusterNode
+type XStream struct {
+ Stream string
+ Messages []XMessage
}
-type ClusterSlotsCmd struct {
+type XStreamSliceCmd struct {
baseCmd
- val []ClusterSlot
+ val []XStream
}
-var _ Cmder = (*ClusterSlotsCmd)(nil)
+var _ Cmder = (*XStreamSliceCmd)(nil)
-func NewClusterSlotsCmd(args ...interface{}) *ClusterSlotsCmd {
- return &ClusterSlotsCmd{
- baseCmd: baseCmd{_args: args},
+func NewXStreamSliceCmd(ctx context.Context, args ...interface{}) *XStreamSliceCmd {
+ return &XStreamSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
}
}
-func (cmd *ClusterSlotsCmd) Val() []ClusterSlot {
- return cmd.val
+func (cmd *XStreamSliceCmd) SetVal(val []XStream) {
+ cmd.val = val
}
-func (cmd *ClusterSlotsCmd) Result() ([]ClusterSlot, error) {
- return cmd.Val(), cmd.Err()
+func (cmd *XStreamSliceCmd) Val() []XStream {
+ return cmd.val
+}
+
+func (cmd *XStreamSliceCmd) Result() ([]XStream, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XStreamSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XStreamSliceCmd) readReply(rd *proto.Reader) error {
+ typ, err := rd.PeekReplyType()
+ if err != nil {
+ return err
+ }
+
+ var n int
+ if typ == proto.RespMap {
+ n, err = rd.ReadMapLen()
+ } else {
+ n, err = rd.ReadArrayLen()
+ }
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]XStream, n)
+ for i := 0; i < len(cmd.val); i++ {
+ if typ != proto.RespMap {
+ if err = rd.ReadFixedArrayLen(2); err != nil {
+ return err
+ }
+ }
+ if cmd.val[i].Stream, err = rd.ReadString(); err != nil {
+ return err
+ }
+ if cmd.val[i].Messages, err = readXMessageSlice(rd); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type XPending struct {
+ Count int64
+ Lower string
+ Higher string
+ Consumers map[string]int64
+}
+
+type XPendingCmd struct {
+ baseCmd
+ val *XPending
+}
+
+var _ Cmder = (*XPendingCmd)(nil)
+
+func NewXPendingCmd(ctx context.Context, args ...interface{}) *XPendingCmd {
+ return &XPendingCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *XPendingCmd) SetVal(val *XPending) {
+ cmd.val = val
+}
+
+func (cmd *XPendingCmd) Val() *XPending {
+ return cmd.val
+}
+
+func (cmd *XPendingCmd) Result() (*XPending, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XPendingCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XPendingCmd) readReply(rd *proto.Reader) error {
+ var err error
+ if err = rd.ReadFixedArrayLen(4); err != nil {
+ return err
+ }
+ cmd.val = &XPending{}
+
+ if cmd.val.Count, err = rd.ReadInt(); err != nil {
+ return err
+ }
+
+ if cmd.val.Lower, err = rd.ReadString(); err != nil && err != Nil {
+ return err
+ }
+
+ if cmd.val.Higher, err = rd.ReadString(); err != nil && err != Nil {
+ return err
+ }
+
+ n, err := rd.ReadArrayLen()
+ if err != nil && err != Nil {
+ return err
+ }
+ cmd.val.Consumers = make(map[string]int64, n)
+ for i := 0; i < n; i++ {
+ if err = rd.ReadFixedArrayLen(2); err != nil {
+ return err
+ }
+
+ consumerName, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ consumerPending, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ cmd.val.Consumers[consumerName] = consumerPending
+ }
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type XPendingExt struct {
+ ID string
+ Consumer string
+ Idle time.Duration
+ RetryCount int64
+}
+
+type XPendingExtCmd struct {
+ baseCmd
+ val []XPendingExt
+}
+
+var _ Cmder = (*XPendingExtCmd)(nil)
+
+func NewXPendingExtCmd(ctx context.Context, args ...interface{}) *XPendingExtCmd {
+ return &XPendingExtCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *XPendingExtCmd) SetVal(val []XPendingExt) {
+ cmd.val = val
+}
+
+func (cmd *XPendingExtCmd) Val() []XPendingExt {
+ return cmd.val
+}
+
+func (cmd *XPendingExtCmd) Result() ([]XPendingExt, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XPendingExtCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XPendingExtCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]XPendingExt, n)
+
+ for i := 0; i < len(cmd.val); i++ {
+ if err = rd.ReadFixedArrayLen(4); err != nil {
+ return err
+ }
+
+ if cmd.val[i].ID, err = rd.ReadString(); err != nil {
+ return err
+ }
+
+ if cmd.val[i].Consumer, err = rd.ReadString(); err != nil && err != Nil {
+ return err
+ }
+
+ idle, err := rd.ReadInt()
+ if err != nil && err != Nil {
+ return err
+ }
+ cmd.val[i].Idle = time.Duration(idle) * time.Millisecond
+
+ if cmd.val[i].RetryCount, err = rd.ReadInt(); err != nil && err != Nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type XAutoClaimCmd struct {
+ baseCmd
+
+ start string
+ val []XMessage
+}
+
+var _ Cmder = (*XAutoClaimCmd)(nil)
+
+func NewXAutoClaimCmd(ctx context.Context, args ...interface{}) *XAutoClaimCmd {
+ return &XAutoClaimCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *XAutoClaimCmd) SetVal(val []XMessage, start string) {
+ cmd.val = val
+ cmd.start = start
+}
+
+func (cmd *XAutoClaimCmd) Val() (messages []XMessage, start string) {
+ return cmd.val, cmd.start
+}
+
+func (cmd *XAutoClaimCmd) Result() (messages []XMessage, start string, err error) {
+ return cmd.val, cmd.start, cmd.err
+}
+
+func (cmd *XAutoClaimCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XAutoClaimCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ switch n {
+ case 2, // Redis 6
+ 3: // Redis 7:
+ // ok
+ default:
+ return fmt.Errorf("redis: got %d elements in XAutoClaim reply, wanted 2/3", n)
+ }
+
+ cmd.start, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ cmd.val, err = readXMessageSlice(rd)
+ if err != nil {
+ return err
+ }
+
+ if n >= 3 {
+ if err := rd.DiscardNext(); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type XAutoClaimJustIDCmd struct {
+ baseCmd
+
+ start string
+ val []string
+}
+
+var _ Cmder = (*XAutoClaimJustIDCmd)(nil)
+
+func NewXAutoClaimJustIDCmd(ctx context.Context, args ...interface{}) *XAutoClaimJustIDCmd {
+ return &XAutoClaimJustIDCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *XAutoClaimJustIDCmd) SetVal(val []string, start string) {
+ cmd.val = val
+ cmd.start = start
+}
+
+func (cmd *XAutoClaimJustIDCmd) Val() (ids []string, start string) {
+ return cmd.val, cmd.start
+}
+
+func (cmd *XAutoClaimJustIDCmd) Result() (ids []string, start string, err error) {
+ return cmd.val, cmd.start, cmd.err
+}
+
+func (cmd *XAutoClaimJustIDCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XAutoClaimJustIDCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ switch n {
+ case 2, // Redis 6
+ 3: // Redis 7:
+ // ok
+ default:
+ return fmt.Errorf("redis: got %d elements in XAutoClaimJustID reply, wanted 2/3", n)
+ }
+
+ cmd.start, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ nn, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = make([]string, nn)
+ for i := 0; i < nn; i++ {
+ cmd.val[i], err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+ }
+
+ if n >= 3 {
+ if err := rd.DiscardNext(); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type XInfoConsumersCmd struct {
+ baseCmd
+ val []XInfoConsumer
+}
+
+type XInfoConsumer struct {
+ Name string
+ Pending int64
+ Idle time.Duration
+ Inactive time.Duration
+}
+
+var _ Cmder = (*XInfoConsumersCmd)(nil)
+
+func NewXInfoConsumersCmd(ctx context.Context, stream string, group string) *XInfoConsumersCmd {
+ return &XInfoConsumersCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: []interface{}{"xinfo", "consumers", stream, group},
+ },
+ }
+}
+
+func (cmd *XInfoConsumersCmd) SetVal(val []XInfoConsumer) {
+ cmd.val = val
+}
+
+func (cmd *XInfoConsumersCmd) Val() []XInfoConsumer {
+ return cmd.val
+}
+
+func (cmd *XInfoConsumersCmd) Result() ([]XInfoConsumer, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XInfoConsumersCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XInfoConsumersCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]XInfoConsumer, n)
+
+ for i := 0; i < len(cmd.val); i++ {
+ nn, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ var key string
+ for f := 0; f < nn; f++ {
+ key, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "name":
+ cmd.val[i].Name, err = rd.ReadString()
+ case "pending":
+ cmd.val[i].Pending, err = rd.ReadInt()
+ case "idle":
+ var idle int64
+ idle, err = rd.ReadInt()
+ cmd.val[i].Idle = time.Duration(idle) * time.Millisecond
+ case "inactive":
+ var inactive int64
+ inactive, err = rd.ReadInt()
+ cmd.val[i].Inactive = time.Duration(inactive) * time.Millisecond
+ default:
+ return fmt.Errorf("redis: unexpected content %s in XINFO CONSUMERS reply", key)
+ }
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type XInfoGroupsCmd struct {
+ baseCmd
+ val []XInfoGroup
+}
+
+type XInfoGroup struct {
+ Name string
+ Consumers int64
+ Pending int64
+ LastDeliveredID string
+ EntriesRead int64
+ // Lag represents the number of pending messages in the stream not yet
+ // delivered to this consumer group. Returns -1 when the lag cannot be determined.
+ Lag int64
+}
+
+var _ Cmder = (*XInfoGroupsCmd)(nil)
+
+func NewXInfoGroupsCmd(ctx context.Context, stream string) *XInfoGroupsCmd {
+ return &XInfoGroupsCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: []interface{}{"xinfo", "groups", stream},
+ },
+ }
+}
+
+func (cmd *XInfoGroupsCmd) SetVal(val []XInfoGroup) {
+ cmd.val = val
+}
+
+func (cmd *XInfoGroupsCmd) Val() []XInfoGroup {
+ return cmd.val
+}
+
+func (cmd *XInfoGroupsCmd) Result() ([]XInfoGroup, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XInfoGroupsCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XInfoGroupsCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]XInfoGroup, n)
+
+ for i := 0; i < len(cmd.val); i++ {
+ group := &cmd.val[i]
+
+ nn, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ var key string
+ for j := 0; j < nn; j++ {
+ key, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "name":
+ group.Name, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+ case "consumers":
+ group.Consumers, err = rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ case "pending":
+ group.Pending, err = rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ case "last-delivered-id":
+ group.LastDeliveredID, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+ case "entries-read":
+ group.EntriesRead, err = rd.ReadInt()
+ if err != nil && err != Nil {
+ return err
+ }
+ case "lag":
+ group.Lag, err = rd.ReadInt()
+
+ // lag: the number of entries in the stream that are still waiting to be delivered
+ // to the group's consumers, or a NULL(Nil) when that number can't be determined.
+ // In that case, we return -1.
+ if err != nil && err != Nil {
+ return err
+ } else if err == Nil {
+ group.Lag = -1
+ }
+ default:
+ return fmt.Errorf("redis: unexpected key %q in XINFO GROUPS reply", key)
+ }
+ }
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type XInfoStreamCmd struct {
+ baseCmd
+ val *XInfoStream
+}
+
+type XInfoStream struct {
+ Length int64
+ RadixTreeKeys int64
+ RadixTreeNodes int64
+ Groups int64
+ LastGeneratedID string
+ MaxDeletedEntryID string
+ EntriesAdded int64
+ FirstEntry XMessage
+ LastEntry XMessage
+ RecordedFirstEntryID string
+}
+
+var _ Cmder = (*XInfoStreamCmd)(nil)
+
+func NewXInfoStreamCmd(ctx context.Context, stream string) *XInfoStreamCmd {
+ return &XInfoStreamCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: []interface{}{"xinfo", "stream", stream},
+ },
+ }
+}
+
+func (cmd *XInfoStreamCmd) SetVal(val *XInfoStream) {
+ cmd.val = val
+}
+
+func (cmd *XInfoStreamCmd) Val() *XInfoStream {
+ return cmd.val
+}
+
+func (cmd *XInfoStreamCmd) Result() (*XInfoStream, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XInfoStreamCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XInfoStreamCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = &XInfoStream{}
+
+ for i := 0; i < n; i++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ switch key {
+ case "length":
+ cmd.val.Length, err = rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ case "radix-tree-keys":
+ cmd.val.RadixTreeKeys, err = rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ case "radix-tree-nodes":
+ cmd.val.RadixTreeNodes, err = rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ case "groups":
+ cmd.val.Groups, err = rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ case "last-generated-id":
+ cmd.val.LastGeneratedID, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+ case "max-deleted-entry-id":
+ cmd.val.MaxDeletedEntryID, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+ case "entries-added":
+ cmd.val.EntriesAdded, err = rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ case "first-entry":
+ cmd.val.FirstEntry, err = readXMessage(rd)
+ if err != nil && err != Nil {
+ return err
+ }
+ case "last-entry":
+ cmd.val.LastEntry, err = readXMessage(rd)
+ if err != nil && err != Nil {
+ return err
+ }
+ case "recorded-first-entry-id":
+ cmd.val.RecordedFirstEntryID, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+ default:
+ return fmt.Errorf("redis: unexpected key %q in XINFO STREAM reply", key)
+ }
+ }
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type XInfoStreamFullCmd struct {
+ baseCmd
+ val *XInfoStreamFull
+}
+
+type XInfoStreamFull struct {
+ Length int64
+ RadixTreeKeys int64
+ RadixTreeNodes int64
+ LastGeneratedID string
+ MaxDeletedEntryID string
+ EntriesAdded int64
+ Entries []XMessage
+ Groups []XInfoStreamGroup
+ RecordedFirstEntryID string
+}
+
+type XInfoStreamGroup struct {
+ Name string
+ LastDeliveredID string
+ EntriesRead int64
+ Lag int64
+ PelCount int64
+ Pending []XInfoStreamGroupPending
+ Consumers []XInfoStreamConsumer
+}
+
+type XInfoStreamGroupPending struct {
+ ID string
+ Consumer string
+ DeliveryTime time.Time
+ DeliveryCount int64
+}
+
+type XInfoStreamConsumer struct {
+ Name string
+ SeenTime time.Time
+ ActiveTime time.Time
+ PelCount int64
+ Pending []XInfoStreamConsumerPending
+}
+
+type XInfoStreamConsumerPending struct {
+ ID string
+ DeliveryTime time.Time
+ DeliveryCount int64
+}
+
+var _ Cmder = (*XInfoStreamFullCmd)(nil)
+
+func NewXInfoStreamFullCmd(ctx context.Context, args ...interface{}) *XInfoStreamFullCmd {
+ return &XInfoStreamFullCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *XInfoStreamFullCmd) SetVal(val *XInfoStreamFull) {
+ cmd.val = val
+}
+
+func (cmd *XInfoStreamFullCmd) Val() *XInfoStreamFull {
+ return cmd.val
+}
+
+func (cmd *XInfoStreamFullCmd) Result() (*XInfoStreamFull, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XInfoStreamFullCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XInfoStreamFullCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = &XInfoStreamFull{}
+
+ for i := 0; i < n; i++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "length":
+ cmd.val.Length, err = rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ case "radix-tree-keys":
+ cmd.val.RadixTreeKeys, err = rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ case "radix-tree-nodes":
+ cmd.val.RadixTreeNodes, err = rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ case "last-generated-id":
+ cmd.val.LastGeneratedID, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+ case "entries-added":
+ cmd.val.EntriesAdded, err = rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ case "entries":
+ cmd.val.Entries, err = readXMessageSlice(rd)
+ if err != nil {
+ return err
+ }
+ case "groups":
+ cmd.val.Groups, err = readStreamGroups(rd)
+ if err != nil {
+ return err
+ }
+ case "max-deleted-entry-id":
+ cmd.val.MaxDeletedEntryID, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+ case "recorded-first-entry-id":
+ cmd.val.RecordedFirstEntryID, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+ default:
+ return fmt.Errorf("redis: unexpected key %q in XINFO STREAM FULL reply", key)
+ }
+ }
+ return nil
+}
+
+func readStreamGroups(rd *proto.Reader) ([]XInfoStreamGroup, error) {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+ groups := make([]XInfoStreamGroup, 0, n)
+ for i := 0; i < n; i++ {
+ nn, err := rd.ReadMapLen()
+ if err != nil {
+ return nil, err
+ }
+
+ group := XInfoStreamGroup{}
+
+ for j := 0; j < nn; j++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ switch key {
+ case "name":
+ group.Name, err = rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+ case "last-delivered-id":
+ group.LastDeliveredID, err = rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+ case "entries-read":
+ group.EntriesRead, err = rd.ReadInt()
+ if err != nil && err != Nil {
+ return nil, err
+ }
+ case "lag":
+ // lag: the number of entries in the stream that are still waiting to be delivered
+ // to the group's consumers, or a NULL(Nil) when that number can't be determined.
+ group.Lag, err = rd.ReadInt()
+ if err != nil && err != Nil {
+ return nil, err
+ }
+ case "pel-count":
+ group.PelCount, err = rd.ReadInt()
+ if err != nil {
+ return nil, err
+ }
+ case "pending":
+ group.Pending, err = readXInfoStreamGroupPending(rd)
+ if err != nil {
+ return nil, err
+ }
+ case "consumers":
+ group.Consumers, err = readXInfoStreamConsumers(rd)
+ if err != nil {
+ return nil, err
+ }
+ default:
+ return nil, fmt.Errorf("redis: unexpected key %q in XINFO STREAM FULL reply", key)
+ }
+ }
+
+ groups = append(groups, group)
+ }
+
+ return groups, nil
+}
+
+func readXInfoStreamGroupPending(rd *proto.Reader) ([]XInfoStreamGroupPending, error) {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+
+ pending := make([]XInfoStreamGroupPending, 0, n)
+
+ for i := 0; i < n; i++ {
+ if err = rd.ReadFixedArrayLen(4); err != nil {
+ return nil, err
+ }
+
+ p := XInfoStreamGroupPending{}
+
+ p.ID, err = rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ p.Consumer, err = rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ delivery, err := rd.ReadInt()
+ if err != nil {
+ return nil, err
+ }
+ p.DeliveryTime = time.Unix(delivery/1000, delivery%1000*int64(time.Millisecond))
+
+ p.DeliveryCount, err = rd.ReadInt()
+ if err != nil {
+ return nil, err
+ }
+
+ pending = append(pending, p)
+ }
+
+ return pending, nil
+}
+
+func readXInfoStreamConsumers(rd *proto.Reader) ([]XInfoStreamConsumer, error) {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+
+ consumers := make([]XInfoStreamConsumer, 0, n)
+
+ for i := 0; i < n; i++ {
+ nn, err := rd.ReadMapLen()
+ if err != nil {
+ return nil, err
+ }
+
+ c := XInfoStreamConsumer{}
+
+ for f := 0; f < nn; f++ {
+ cKey, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ switch cKey {
+ case "name":
+ c.Name, err = rd.ReadString()
+ case "seen-time":
+ seen, err := rd.ReadInt()
+ if err != nil {
+ return nil, err
+ }
+ c.SeenTime = time.UnixMilli(seen)
+ case "active-time":
+ active, err := rd.ReadInt()
+ if err != nil {
+ return nil, err
+ }
+ c.ActiveTime = time.UnixMilli(active)
+ case "pel-count":
+ c.PelCount, err = rd.ReadInt()
+ case "pending":
+ pendingNumber, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+
+ c.Pending = make([]XInfoStreamConsumerPending, 0, pendingNumber)
+
+ for pn := 0; pn < pendingNumber; pn++ {
+ if err = rd.ReadFixedArrayLen(3); err != nil {
+ return nil, err
+ }
+
+ p := XInfoStreamConsumerPending{}
+
+ p.ID, err = rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ delivery, err := rd.ReadInt()
+ if err != nil {
+ return nil, err
+ }
+ p.DeliveryTime = time.Unix(delivery/1000, delivery%1000*int64(time.Millisecond))
+
+ p.DeliveryCount, err = rd.ReadInt()
+ if err != nil {
+ return nil, err
+ }
+
+ c.Pending = append(c.Pending, p)
+ }
+ default:
+ return nil, fmt.Errorf("redis: unexpected content %s "+
+ "in XINFO STREAM FULL reply", cKey)
+ }
+ if err != nil {
+ return nil, err
+ }
+ }
+ consumers = append(consumers, c)
+ }
+
+ return consumers, nil
+}
+
+//------------------------------------------------------------------------------
+
+type ZSliceCmd struct {
+ baseCmd
+
+ val []Z
+}
+
+var _ Cmder = (*ZSliceCmd)(nil)
+
+func NewZSliceCmd(ctx context.Context, args ...interface{}) *ZSliceCmd {
+ return &ZSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *ZSliceCmd) SetVal(val []Z) {
+ cmd.val = val
+}
+
+func (cmd *ZSliceCmd) Val() []Z {
+ return cmd.val
+}
+
+func (cmd *ZSliceCmd) Result() ([]Z, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *ZSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ZSliceCmd) readReply(rd *proto.Reader) error { // nolint:dupl
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ // If the n is 0, can't continue reading.
+ if n == 0 {
+ cmd.val = make([]Z, 0)
+ return nil
+ }
+
+ typ, err := rd.PeekReplyType()
+ if err != nil {
+ return err
+ }
+ array := typ == proto.RespArray
+
+ if array {
+ cmd.val = make([]Z, n)
+ } else {
+ cmd.val = make([]Z, n/2)
+ }
+
+ for i := 0; i < len(cmd.val); i++ {
+ if array {
+ if err = rd.ReadFixedArrayLen(2); err != nil {
+ return err
+ }
+ }
+
+ if cmd.val[i].Member, err = rd.ReadString(); err != nil {
+ return err
+ }
+
+ if cmd.val[i].Score, err = rd.ReadFloat(); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type ZWithKeyCmd struct {
+ baseCmd
+
+ val *ZWithKey
+}
+
+var _ Cmder = (*ZWithKeyCmd)(nil)
+
+func NewZWithKeyCmd(ctx context.Context, args ...interface{}) *ZWithKeyCmd {
+ return &ZWithKeyCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *ZWithKeyCmd) SetVal(val *ZWithKey) {
+ cmd.val = val
+}
+
+func (cmd *ZWithKeyCmd) Val() *ZWithKey {
+ return cmd.val
+}
+
+func (cmd *ZWithKeyCmd) Result() (*ZWithKey, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *ZWithKeyCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ZWithKeyCmd) readReply(rd *proto.Reader) (err error) {
+ if err = rd.ReadFixedArrayLen(3); err != nil {
+ return err
+ }
+ cmd.val = &ZWithKey{}
+
+ if cmd.val.Key, err = rd.ReadString(); err != nil {
+ return err
+ }
+ if cmd.val.Member, err = rd.ReadString(); err != nil {
+ return err
+ }
+ if cmd.val.Score, err = rd.ReadFloat(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type ScanCmd struct {
+ baseCmd
+
+ page []string
+ cursor uint64
+
+ process cmdable
+}
+
+var _ Cmder = (*ScanCmd)(nil)
+
+func NewScanCmd(ctx context.Context, process cmdable, args ...interface{}) *ScanCmd {
+ return &ScanCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ process: process,
+ }
+}
+
+func (cmd *ScanCmd) SetVal(page []string, cursor uint64) {
+ cmd.page = page
+ cmd.cursor = cursor
+}
+
+func (cmd *ScanCmd) Val() (keys []string, cursor uint64) {
+ return cmd.page, cmd.cursor
+}
+
+func (cmd *ScanCmd) Result() (keys []string, cursor uint64, err error) {
+ return cmd.page, cmd.cursor, cmd.err
+}
+
+func (cmd *ScanCmd) String() string {
+ return cmdString(cmd, cmd.page)
+}
+
+func (cmd *ScanCmd) readReply(rd *proto.Reader) error {
+ if err := rd.ReadFixedArrayLen(2); err != nil {
+ return err
+ }
+
+ cursor, err := rd.ReadUint()
+ if err != nil {
+ return err
+ }
+ cmd.cursor = cursor
+
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.page = make([]string, n)
+
+ for i := 0; i < len(cmd.page); i++ {
+ if cmd.page[i], err = rd.ReadString(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Iterator creates a new ScanIterator.
+func (cmd *ScanCmd) Iterator() *ScanIterator {
+ return &ScanIterator{
+ cmd: cmd,
+ }
+}
+
+//------------------------------------------------------------------------------
+
+type ClusterNode struct {
+ ID string
+ Addr string
+ NetworkingMetadata map[string]string
+}
+
+type ClusterSlot struct {
+ Start int
+ End int
+ Nodes []ClusterNode
+}
+
+type ClusterSlotsCmd struct {
+ baseCmd
+
+ val []ClusterSlot
+}
+
+var _ Cmder = (*ClusterSlotsCmd)(nil)
+
+func NewClusterSlotsCmd(ctx context.Context, args ...interface{}) *ClusterSlotsCmd {
+ return &ClusterSlotsCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *ClusterSlotsCmd) SetVal(val []ClusterSlot) {
+ cmd.val = val
+}
+
+func (cmd *ClusterSlotsCmd) Val() []ClusterSlot {
+ return cmd.val
+}
+
+func (cmd *ClusterSlotsCmd) Result() ([]ClusterSlot, error) {
+ return cmd.val, cmd.err
}
func (cmd *ClusterSlotsCmd) String() string {
return cmdString(cmd, cmd.val)
}
-func (cmd *ClusterSlotsCmd) readReply(cn *pool.Conn) error {
- var v interface{}
- v, cmd.err = cn.Rd.ReadArrayReply(clusterSlotsParser)
- if cmd.err != nil {
- return cmd.err
+func (cmd *ClusterSlotsCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]ClusterSlot, n)
+
+ for i := 0; i < len(cmd.val); i++ {
+ n, err = rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ if n < 2 {
+ return fmt.Errorf("redis: got %d elements in cluster info, expected at least 2", n)
+ }
+
+ start, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+
+ end, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+
+ // subtract start and end.
+ nodes := make([]ClusterNode, n-2)
+
+ for j := 0; j < len(nodes); j++ {
+ nn, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ if nn < 2 || nn > 4 {
+ return fmt.Errorf("got %d elements in cluster info address, expected 2, 3, or 4", n)
+ }
+
+ ip, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ port, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ nodes[j].Addr = net.JoinHostPort(ip, port)
+
+ if nn >= 3 {
+ id, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ nodes[j].ID = id
+ }
+
+ if nn >= 4 {
+ metadataLength, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ networkingMetadata := make(map[string]string, metadataLength)
+
+ for i := 0; i < metadataLength; i++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ value, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ networkingMetadata[key] = value
+ }
+
+ nodes[j].NetworkingMetadata = networkingMetadata
+ }
+ }
+
+ cmd.val[i] = ClusterSlot{
+ Start: int(start),
+ End: int(end),
+ Nodes: nodes,
+ }
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+// GeoLocation is used with GeoAdd to add geospatial location.
+type GeoLocation struct {
+ Name string
+ Longitude, Latitude, Dist float64
+ GeoHash int64
+}
+
+// GeoRadiusQuery is used with GeoRadius to query geospatial index.
+type GeoRadiusQuery struct {
+ Radius float64
+ // Can be m, km, ft, or mi. Default is km.
+ Unit string
+ WithCoord bool
+ WithDist bool
+ WithGeoHash bool
+ Count int
+ // Can be ASC or DESC. Default is no sort order.
+ Sort string
+ Store string
+ StoreDist string
+
+ // WithCoord+WithDist+WithGeoHash
+ withLen int
+}
+
+type GeoLocationCmd struct {
+ baseCmd
+
+ q *GeoRadiusQuery
+ locations []GeoLocation
+}
+
+var _ Cmder = (*GeoLocationCmd)(nil)
+
+func NewGeoLocationCmd(ctx context.Context, q *GeoRadiusQuery, args ...interface{}) *GeoLocationCmd {
+ return &GeoLocationCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: geoLocationArgs(q, args...),
+ },
+ q: q,
+ }
+}
+
+func geoLocationArgs(q *GeoRadiusQuery, args ...interface{}) []interface{} {
+ args = append(args, q.Radius)
+ if q.Unit != "" {
+ args = append(args, q.Unit)
+ } else {
+ args = append(args, "km")
+ }
+ if q.WithCoord {
+ args = append(args, "withcoord")
+ q.withLen++
+ }
+ if q.WithDist {
+ args = append(args, "withdist")
+ q.withLen++
+ }
+ if q.WithGeoHash {
+ args = append(args, "withhash")
+ q.withLen++
+ }
+ if q.Count > 0 {
+ args = append(args, "count", q.Count)
+ }
+ if q.Sort != "" {
+ args = append(args, q.Sort)
+ }
+ if q.Store != "" {
+ args = append(args, "store")
+ args = append(args, q.Store)
+ }
+ if q.StoreDist != "" {
+ args = append(args, "storedist")
+ args = append(args, q.StoreDist)
+ }
+ return args
+}
+
+func (cmd *GeoLocationCmd) SetVal(locations []GeoLocation) {
+ cmd.locations = locations
+}
+
+func (cmd *GeoLocationCmd) Val() []GeoLocation {
+ return cmd.locations
+}
+
+func (cmd *GeoLocationCmd) Result() ([]GeoLocation, error) {
+ return cmd.locations, cmd.err
+}
+
+func (cmd *GeoLocationCmd) String() string {
+ return cmdString(cmd, cmd.locations)
+}
+
+func (cmd *GeoLocationCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.locations = make([]GeoLocation, n)
+
+ for i := 0; i < len(cmd.locations); i++ {
+ // only name
+ if cmd.q.withLen == 0 {
+ if cmd.locations[i].Name, err = rd.ReadString(); err != nil {
+ return err
+ }
+ continue
+ }
+
+ // +name
+ if err = rd.ReadFixedArrayLen(cmd.q.withLen + 1); err != nil {
+ return err
+ }
+
+ if cmd.locations[i].Name, err = rd.ReadString(); err != nil {
+ return err
+ }
+ if cmd.q.WithDist {
+ if cmd.locations[i].Dist, err = rd.ReadFloat(); err != nil {
+ return err
+ }
+ }
+ if cmd.q.WithGeoHash {
+ if cmd.locations[i].GeoHash, err = rd.ReadInt(); err != nil {
+ return err
+ }
+ }
+ if cmd.q.WithCoord {
+ if err = rd.ReadFixedArrayLen(2); err != nil {
+ return err
+ }
+ if cmd.locations[i].Longitude, err = rd.ReadFloat(); err != nil {
+ return err
+ }
+ if cmd.locations[i].Latitude, err = rd.ReadFloat(); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+// GeoSearchQuery is used for GEOSearch/GEOSearchStore command query.
+type GeoSearchQuery struct {
+ Member string
+
+ // Latitude and Longitude when using FromLonLat option.
+ Longitude float64
+ Latitude float64
+
+ // Distance and unit when using ByRadius option.
+ // Can use m, km, ft, or mi. Default is km.
+ Radius float64
+ RadiusUnit string
+
+ // Height, width and unit when using ByBox option.
+ // Can be m, km, ft, or mi. Default is km.
+ BoxWidth float64
+ BoxHeight float64
+ BoxUnit string
+
+ // Can be ASC or DESC. Default is no sort order.
+ Sort string
+ Count int
+ CountAny bool
+}
+
+type GeoSearchLocationQuery struct {
+ GeoSearchQuery
+
+ WithCoord bool
+ WithDist bool
+ WithHash bool
+}
+
+type GeoSearchStoreQuery struct {
+ GeoSearchQuery
+
+ // When using the StoreDist option, the command stores the items in a
+ // sorted set populated with their distance from the center of the circle or box,
+ // as a floating-point number, in the same unit specified for that shape.
+ StoreDist bool
+}
+
+func geoSearchLocationArgs(q *GeoSearchLocationQuery, args []interface{}) []interface{} {
+ args = geoSearchArgs(&q.GeoSearchQuery, args)
+
+ if q.WithCoord {
+ args = append(args, "withcoord")
+ }
+ if q.WithDist {
+ args = append(args, "withdist")
+ }
+ if q.WithHash {
+ args = append(args, "withhash")
+ }
+
+ return args
+}
+
+func geoSearchArgs(q *GeoSearchQuery, args []interface{}) []interface{} {
+ if q.Member != "" {
+ args = append(args, "frommember", q.Member)
+ } else {
+ args = append(args, "fromlonlat", q.Longitude, q.Latitude)
+ }
+
+ if q.Radius > 0 {
+ if q.RadiusUnit == "" {
+ q.RadiusUnit = "km"
+ }
+ args = append(args, "byradius", q.Radius, q.RadiusUnit)
+ } else {
+ if q.BoxUnit == "" {
+ q.BoxUnit = "km"
+ }
+ args = append(args, "bybox", q.BoxWidth, q.BoxHeight, q.BoxUnit)
+ }
+
+ if q.Sort != "" {
+ args = append(args, q.Sort)
+ }
+
+ if q.Count > 0 {
+ args = append(args, "count", q.Count)
+ if q.CountAny {
+ args = append(args, "any")
+ }
+ }
+
+ return args
+}
+
+type GeoSearchLocationCmd struct {
+ baseCmd
+
+ opt *GeoSearchLocationQuery
+ val []GeoLocation
+}
+
+var _ Cmder = (*GeoSearchLocationCmd)(nil)
+
+func NewGeoSearchLocationCmd(
+ ctx context.Context, opt *GeoSearchLocationQuery, args ...interface{},
+) *GeoSearchLocationCmd {
+ return &GeoSearchLocationCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ opt: opt,
+ }
+}
+
+func (cmd *GeoSearchLocationCmd) SetVal(val []GeoLocation) {
+ cmd.val = val
+}
+
+func (cmd *GeoSearchLocationCmd) Val() []GeoLocation {
+ return cmd.val
+}
+
+func (cmd *GeoSearchLocationCmd) Result() ([]GeoLocation, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *GeoSearchLocationCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *GeoSearchLocationCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = make([]GeoLocation, n)
+ for i := 0; i < n; i++ {
+ _, err = rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ var loc GeoLocation
+
+ loc.Name, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+ if cmd.opt.WithDist {
+ loc.Dist, err = rd.ReadFloat()
+ if err != nil {
+ return err
+ }
+ }
+ if cmd.opt.WithHash {
+ loc.GeoHash, err = rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ }
+ if cmd.opt.WithCoord {
+ if err = rd.ReadFixedArrayLen(2); err != nil {
+ return err
+ }
+ loc.Longitude, err = rd.ReadFloat()
+ if err != nil {
+ return err
+ }
+ loc.Latitude, err = rd.ReadFloat()
+ if err != nil {
+ return err
+ }
+ }
+
+ cmd.val[i] = loc
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type GeoPos struct {
+ Longitude, Latitude float64
+}
+
+type GeoPosCmd struct {
+ baseCmd
+
+ val []*GeoPos
+}
+
+var _ Cmder = (*GeoPosCmd)(nil)
+
+func NewGeoPosCmd(ctx context.Context, args ...interface{}) *GeoPosCmd {
+ return &GeoPosCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *GeoPosCmd) SetVal(val []*GeoPos) {
+ cmd.val = val
+}
+
+func (cmd *GeoPosCmd) Val() []*GeoPos {
+ return cmd.val
+}
+
+func (cmd *GeoPosCmd) Result() ([]*GeoPos, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *GeoPosCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *GeoPosCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]*GeoPos, n)
+
+ for i := 0; i < len(cmd.val); i++ {
+ err = rd.ReadFixedArrayLen(2)
+ if err != nil {
+ if err == Nil {
+ cmd.val[i] = nil
+ continue
+ }
+ return err
+ }
+
+ longitude, err := rd.ReadFloat()
+ if err != nil {
+ return err
+ }
+ latitude, err := rd.ReadFloat()
+ if err != nil {
+ return err
+ }
+
+ cmd.val[i] = &GeoPos{
+ Longitude: longitude,
+ Latitude: latitude,
+ }
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type CommandInfo struct {
+ Name string
+ Arity int8
+ Flags []string
+ ACLFlags []string
+ FirstKeyPos int8
+ LastKeyPos int8
+ StepCount int8
+ ReadOnly bool
+}
+
+type CommandsInfoCmd struct {
+ baseCmd
+
+ val map[string]*CommandInfo
+}
+
+var _ Cmder = (*CommandsInfoCmd)(nil)
+
+func NewCommandsInfoCmd(ctx context.Context, args ...interface{}) *CommandsInfoCmd {
+ return &CommandsInfoCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *CommandsInfoCmd) SetVal(val map[string]*CommandInfo) {
+ cmd.val = val
+}
+
+func (cmd *CommandsInfoCmd) Val() map[string]*CommandInfo {
+ return cmd.val
+}
+
+func (cmd *CommandsInfoCmd) Result() (map[string]*CommandInfo, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *CommandsInfoCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *CommandsInfoCmd) readReply(rd *proto.Reader) error {
+ const numArgRedis5 = 6
+ const numArgRedis6 = 7
+ const numArgRedis7 = 10
+
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make(map[string]*CommandInfo, n)
+
+ for i := 0; i < n; i++ {
+ nn, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ switch nn {
+ case numArgRedis5, numArgRedis6, numArgRedis7:
+ // ok
+ default:
+ return fmt.Errorf("redis: got %d elements in COMMAND reply, wanted 6/7/10", nn)
+ }
+
+ cmdInfo := &CommandInfo{}
+ if cmdInfo.Name, err = rd.ReadString(); err != nil {
+ return err
+ }
+
+ arity, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ cmdInfo.Arity = int8(arity)
+
+ flagLen, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmdInfo.Flags = make([]string, flagLen)
+ for f := 0; f < len(cmdInfo.Flags); f++ {
+ switch s, err := rd.ReadString(); {
+ case err == Nil:
+ cmdInfo.Flags[f] = ""
+ case err != nil:
+ return err
+ default:
+ if !cmdInfo.ReadOnly && s == "readonly" {
+ cmdInfo.ReadOnly = true
+ }
+ cmdInfo.Flags[f] = s
+ }
+ }
+
+ firstKeyPos, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ cmdInfo.FirstKeyPos = int8(firstKeyPos)
+
+ lastKeyPos, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ cmdInfo.LastKeyPos = int8(lastKeyPos)
+
+ stepCount, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ cmdInfo.StepCount = int8(stepCount)
+
+ if nn >= numArgRedis6 {
+ aclFlagLen, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmdInfo.ACLFlags = make([]string, aclFlagLen)
+ for f := 0; f < len(cmdInfo.ACLFlags); f++ {
+ switch s, err := rd.ReadString(); {
+ case err == Nil:
+ cmdInfo.ACLFlags[f] = ""
+ case err != nil:
+ return err
+ default:
+ cmdInfo.ACLFlags[f] = s
+ }
+ }
+ }
+
+ if nn >= numArgRedis7 {
+ if err := rd.DiscardNext(); err != nil {
+ return err
+ }
+ if err := rd.DiscardNext(); err != nil {
+ return err
+ }
+ if err := rd.DiscardNext(); err != nil {
+ return err
+ }
+ }
+
+ cmd.val[cmdInfo.Name] = cmdInfo
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type cmdsInfoCache struct {
+ fn func(ctx context.Context) (map[string]*CommandInfo, error)
+
+ once internal.Once
+ cmds map[string]*CommandInfo
+}
+
+func newCmdsInfoCache(fn func(ctx context.Context) (map[string]*CommandInfo, error)) *cmdsInfoCache {
+ return &cmdsInfoCache{
+ fn: fn,
+ }
+}
+
+func (c *cmdsInfoCache) Get(ctx context.Context) (map[string]*CommandInfo, error) {
+ err := c.once.Do(func() error {
+ cmds, err := c.fn(ctx)
+ if err != nil {
+ return err
+ }
+
+ lowerCmds := make(map[string]*CommandInfo, len(cmds))
+
+ // Extensions have cmd names in upper case. Convert them to lower case.
+ for k, v := range cmds {
+ lowerCmds[internal.ToLower(k)] = v
+ }
+
+ c.cmds = lowerCmds
+ return nil
+ })
+ return c.cmds, err
+}
+
+//------------------------------------------------------------------------------
+
+type SlowLog struct {
+ ID int64
+ Time time.Time
+ Duration time.Duration
+ Args []string
+ // These are also optional fields emitted only by Redis 4.0 or greater:
+ // https://redis.io/commands/slowlog#output-format
+ ClientAddr string
+ ClientName string
+}
+
+type SlowLogCmd struct {
+ baseCmd
+
+ val []SlowLog
+}
+
+var _ Cmder = (*SlowLogCmd)(nil)
+
+func NewSlowLogCmd(ctx context.Context, args ...interface{}) *SlowLogCmd {
+ return &SlowLogCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *SlowLogCmd) SetVal(val []SlowLog) {
+ cmd.val = val
+}
+
+func (cmd *SlowLogCmd) Val() []SlowLog {
+ return cmd.val
+}
+
+func (cmd *SlowLogCmd) Result() ([]SlowLog, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *SlowLogCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *SlowLogCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]SlowLog, n)
+
+ for i := 0; i < len(cmd.val); i++ {
+ nn, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ if nn < 4 {
+ return fmt.Errorf("redis: got %d elements in slowlog get, expected at least 4", nn)
+ }
+
+ if cmd.val[i].ID, err = rd.ReadInt(); err != nil {
+ return err
+ }
+
+ createdAt, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ cmd.val[i].Time = time.Unix(createdAt, 0)
+
+ costs, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ cmd.val[i].Duration = time.Duration(costs) * time.Microsecond
+
+ cmdLen, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ if cmdLen < 1 {
+ return fmt.Errorf("redis: got %d elements commands reply in slowlog get, expected at least 1", cmdLen)
+ }
+
+ cmd.val[i].Args = make([]string, cmdLen)
+ for f := 0; f < len(cmd.val[i].Args); f++ {
+ cmd.val[i].Args[f], err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+ }
+
+ if nn >= 5 {
+ if cmd.val[i].ClientAddr, err = rd.ReadString(); err != nil {
+ return err
+ }
+ }
+
+ if nn >= 6 {
+ if cmd.val[i].ClientName, err = rd.ReadString(); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+//-----------------------------------------------------------------------
+
+type MapStringInterfaceCmd struct {
+ baseCmd
+
+ val map[string]interface{}
+}
+
+var _ Cmder = (*MapStringInterfaceCmd)(nil)
+
+func NewMapStringInterfaceCmd(ctx context.Context, args ...interface{}) *MapStringInterfaceCmd {
+ return &MapStringInterfaceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *MapStringInterfaceCmd) SetVal(val map[string]interface{}) {
+ cmd.val = val
+}
+
+func (cmd *MapStringInterfaceCmd) Val() map[string]interface{} {
+ return cmd.val
+}
+
+func (cmd *MapStringInterfaceCmd) Result() (map[string]interface{}, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *MapStringInterfaceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *MapStringInterfaceCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = make(map[string]interface{}, n)
+ for i := 0; i < n; i++ {
+ k, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ v, err := rd.ReadReply()
+ if err != nil {
+ if err == Nil {
+ cmd.val[k] = Nil
+ continue
+ }
+ if err, ok := err.(proto.RedisError); ok {
+ cmd.val[k] = err
+ continue
+ }
+ return err
+ }
+ cmd.val[k] = v
+ }
+ return nil
+}
+
+//-----------------------------------------------------------------------
+
+type MapStringStringSliceCmd struct {
+ baseCmd
+
+ val []map[string]string
+}
+
+var _ Cmder = (*MapStringStringSliceCmd)(nil)
+
+func NewMapStringStringSliceCmd(ctx context.Context, args ...interface{}) *MapStringStringSliceCmd {
+ return &MapStringStringSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *MapStringStringSliceCmd) SetVal(val []map[string]string) {
+ cmd.val = val
+}
+
+func (cmd *MapStringStringSliceCmd) Val() []map[string]string {
+ return cmd.val
+}
+
+func (cmd *MapStringStringSliceCmd) Result() ([]map[string]string, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *MapStringStringSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *MapStringStringSliceCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = make([]map[string]string, n)
+ for i := 0; i < n; i++ {
+ nn, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+ cmd.val[i] = make(map[string]string, nn)
+ for f := 0; f < nn; f++ {
+ k, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ v, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ cmd.val[i][k] = v
+ }
+ }
+ return nil
+}
+
+// -----------------------------------------------------------------------
+
+// MapMapStringInterfaceCmd represents a command that returns a map of strings to interface{}.
+type MapMapStringInterfaceCmd struct {
+ baseCmd
+ val map[string]interface{}
+}
+
+func NewMapMapStringInterfaceCmd(ctx context.Context, args ...interface{}) *MapMapStringInterfaceCmd {
+ return &MapMapStringInterfaceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *MapMapStringInterfaceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *MapMapStringInterfaceCmd) SetVal(val map[string]interface{}) {
+ cmd.val = val
+}
+
+func (cmd *MapMapStringInterfaceCmd) Result() (map[string]interface{}, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *MapMapStringInterfaceCmd) Val() map[string]interface{} {
+ return cmd.val
+}
+
+// readReply will try to parse the reply from the proto.Reader for both resp2 and resp3
+func (cmd *MapMapStringInterfaceCmd) readReply(rd *proto.Reader) (err error) {
+ data, err := rd.ReadReply()
+ if err != nil {
+ return err
+ }
+ resultMap := map[string]interface{}{}
+
+ switch midResponse := data.(type) {
+ case map[interface{}]interface{}: // resp3 will return map
+ for k, v := range midResponse {
+ stringKey, ok := k.(string)
+ if !ok {
+ return fmt.Errorf("redis: invalid map key %#v", k)
+ }
+ resultMap[stringKey] = v
+ }
+ case []interface{}: // resp2 will return array of arrays
+ n := len(midResponse)
+ for i := 0; i < n; i++ {
+ finalArr, ok := midResponse[i].([]interface{}) // final array that we need to transform to map
+ if !ok {
+ return fmt.Errorf("redis: unexpected response %#v", data)
+ }
+ m := len(finalArr)
+ if m%2 != 0 { // since this should be map, keys should be even number
+ return fmt.Errorf("redis: unexpected response %#v", data)
+ }
+
+ for j := 0; j < m; j += 2 {
+ stringKey, ok := finalArr[j].(string) // the first one
+ if !ok {
+ return fmt.Errorf("redis: invalid map key %#v", finalArr[i])
+ }
+ resultMap[stringKey] = finalArr[j+1] // second one is value
+ }
+ }
+ default:
+ return fmt.Errorf("redis: unexpected response %#v", data)
+ }
+
+ cmd.val = resultMap
+ return nil
+}
+
+//-----------------------------------------------------------------------
+
+type MapStringInterfaceSliceCmd struct {
+ baseCmd
+
+ val []map[string]interface{}
+}
+
+var _ Cmder = (*MapStringInterfaceSliceCmd)(nil)
+
+func NewMapStringInterfaceSliceCmd(ctx context.Context, args ...interface{}) *MapStringInterfaceSliceCmd {
+ return &MapStringInterfaceSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *MapStringInterfaceSliceCmd) SetVal(val []map[string]interface{}) {
+ cmd.val = val
+}
+
+func (cmd *MapStringInterfaceSliceCmd) Val() []map[string]interface{} {
+ return cmd.val
+}
+
+func (cmd *MapStringInterfaceSliceCmd) Result() ([]map[string]interface{}, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *MapStringInterfaceSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *MapStringInterfaceSliceCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = make([]map[string]interface{}, n)
+ for i := 0; i < n; i++ {
+ nn, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+ cmd.val[i] = make(map[string]interface{}, nn)
+ for f := 0; f < nn; f++ {
+ k, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ v, err := rd.ReadReply()
+ if err != nil {
+ if err != Nil {
+ return err
+ }
+ }
+ cmd.val[i][k] = v
+ }
+ }
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type KeyValuesCmd struct {
+ baseCmd
+
+ key string
+ val []string
+}
+
+var _ Cmder = (*KeyValuesCmd)(nil)
+
+func NewKeyValuesCmd(ctx context.Context, args ...interface{}) *KeyValuesCmd {
+ return &KeyValuesCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *KeyValuesCmd) SetVal(key string, val []string) {
+ cmd.key = key
+ cmd.val = val
+}
+
+func (cmd *KeyValuesCmd) Val() (string, []string) {
+ return cmd.key, cmd.val
+}
+
+func (cmd *KeyValuesCmd) Result() (string, []string, error) {
+ return cmd.key, cmd.val, cmd.err
+}
+
+func (cmd *KeyValuesCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *KeyValuesCmd) readReply(rd *proto.Reader) (err error) {
+ if err = rd.ReadFixedArrayLen(2); err != nil {
+ return err
+ }
+
+ cmd.key, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]string, n)
+ for i := 0; i < n; i++ {
+ cmd.val[i], err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type ZSliceWithKeyCmd struct {
+ baseCmd
+
+ key string
+ val []Z
+}
+
+var _ Cmder = (*ZSliceWithKeyCmd)(nil)
+
+func NewZSliceWithKeyCmd(ctx context.Context, args ...interface{}) *ZSliceWithKeyCmd {
+ return &ZSliceWithKeyCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *ZSliceWithKeyCmd) SetVal(key string, val []Z) {
+ cmd.key = key
+ cmd.val = val
+}
+
+func (cmd *ZSliceWithKeyCmd) Val() (string, []Z) {
+ return cmd.key, cmd.val
+}
+
+func (cmd *ZSliceWithKeyCmd) Result() (string, []Z, error) {
+ return cmd.key, cmd.val, cmd.err
+}
+
+func (cmd *ZSliceWithKeyCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ZSliceWithKeyCmd) readReply(rd *proto.Reader) (err error) {
+ if err = rd.ReadFixedArrayLen(2); err != nil {
+ return err
+ }
+
+ cmd.key, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ typ, err := rd.PeekReplyType()
+ if err != nil {
+ return err
+ }
+ array := typ == proto.RespArray
+
+ if array {
+ cmd.val = make([]Z, n)
+ } else {
+ cmd.val = make([]Z, n/2)
+ }
+
+ for i := 0; i < len(cmd.val); i++ {
+ if array {
+ if err = rd.ReadFixedArrayLen(2); err != nil {
+ return err
+ }
+ }
+
+ if cmd.val[i].Member, err = rd.ReadString(); err != nil {
+ return err
+ }
+
+ if cmd.val[i].Score, err = rd.ReadFloat(); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+type Function struct {
+ Name string
+ Description string
+ Flags []string
+}
+
+type Library struct {
+ Name string
+ Engine string
+ Functions []Function
+ Code string
+}
+
+type FunctionListCmd struct {
+ baseCmd
+
+ val []Library
+}
+
+var _ Cmder = (*FunctionListCmd)(nil)
+
+func NewFunctionListCmd(ctx context.Context, args ...interface{}) *FunctionListCmd {
+ return &FunctionListCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *FunctionListCmd) SetVal(val []Library) {
+ cmd.val = val
+}
+
+func (cmd *FunctionListCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *FunctionListCmd) Val() []Library {
+ return cmd.val
+}
+
+func (cmd *FunctionListCmd) Result() ([]Library, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *FunctionListCmd) First() (*Library, error) {
+ if cmd.err != nil {
+ return nil, cmd.err
+ }
+ if len(cmd.val) > 0 {
+ return &cmd.val[0], nil
+ }
+ return nil, Nil
+}
+
+func (cmd *FunctionListCmd) readReply(rd *proto.Reader) (err error) {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ libraries := make([]Library, n)
+ for i := 0; i < n; i++ {
+ nn, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ library := Library{}
+ for f := 0; f < nn; f++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "library_name":
+ library.Name, err = rd.ReadString()
+ case "engine":
+ library.Engine, err = rd.ReadString()
+ case "functions":
+ library.Functions, err = cmd.readFunctions(rd)
+ case "library_code":
+ library.Code, err = rd.ReadString()
+ default:
+ return fmt.Errorf("redis: function list unexpected key %s", key)
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+
+ libraries[i] = library
+ }
+ cmd.val = libraries
+ return nil
+}
+
+func (cmd *FunctionListCmd) readFunctions(rd *proto.Reader) ([]Function, error) {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+
+ functions := make([]Function, n)
+ for i := 0; i < n; i++ {
+ nn, err := rd.ReadMapLen()
+ if err != nil {
+ return nil, err
+ }
+
+ function := Function{}
+ for f := 0; f < nn; f++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ switch key {
+ case "name":
+ if function.Name, err = rd.ReadString(); err != nil {
+ return nil, err
+ }
+ case "description":
+ if function.Description, err = rd.ReadString(); err != nil && err != Nil {
+ return nil, err
+ }
+ case "flags":
+ // resp set
+ nx, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+
+ function.Flags = make([]string, nx)
+ for j := 0; j < nx; j++ {
+ if function.Flags[j], err = rd.ReadString(); err != nil {
+ return nil, err
+ }
+ }
+ default:
+ return nil, fmt.Errorf("redis: function list unexpected key %s", key)
+ }
+ }
+
+ functions[i] = function
+ }
+ return functions, nil
+}
+
+// FunctionStats contains information about the scripts currently executing on the server, and the available engines
+// - Engines:
+// Statistics about the engine like number of functions and number of libraries
+// - RunningScript:
+// The script currently running on the shard we're connecting to.
+// For Redis Enterprise and Redis Cloud, this represents the
+// function with the longest running time, across all the running functions, on all shards
+// - RunningScripts
+// All scripts currently running in a Redis Enterprise clustered database.
+// Only available on Redis Enterprise
+type FunctionStats struct {
+ Engines []Engine
+ isRunning bool
+ rs RunningScript
+ allrs []RunningScript
+}
+
+func (fs *FunctionStats) Running() bool {
+ return fs.isRunning
+}
+
+func (fs *FunctionStats) RunningScript() (RunningScript, bool) {
+ return fs.rs, fs.isRunning
+}
+
+// AllRunningScripts returns all scripts currently running in a Redis Enterprise clustered database.
+// Only available on Redis Enterprise
+func (fs *FunctionStats) AllRunningScripts() []RunningScript {
+ return fs.allrs
+}
+
+type RunningScript struct {
+ Name string
+ Command []string
+ Duration time.Duration
+}
+
+type Engine struct {
+ Language string
+ LibrariesCount int64
+ FunctionsCount int64
+}
+
+type FunctionStatsCmd struct {
+ baseCmd
+ val FunctionStats
+}
+
+var _ Cmder = (*FunctionStatsCmd)(nil)
+
+func NewFunctionStatsCmd(ctx context.Context, args ...interface{}) *FunctionStatsCmd {
+ return &FunctionStatsCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *FunctionStatsCmd) SetVal(val FunctionStats) {
+ cmd.val = val
+}
+
+func (cmd *FunctionStatsCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *FunctionStatsCmd) Val() FunctionStats {
+ return cmd.val
+}
+
+func (cmd *FunctionStatsCmd) Result() (FunctionStats, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *FunctionStatsCmd) readReply(rd *proto.Reader) (err error) {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ var key string
+ var result FunctionStats
+ for f := 0; f < n; f++ {
+ key, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "running_script":
+ result.rs, result.isRunning, err = cmd.readRunningScript(rd)
+ case "engines":
+ result.Engines, err = cmd.readEngines(rd)
+ case "all_running_scripts": // Redis Enterprise only
+ result.allrs, result.isRunning, err = cmd.readRunningScripts(rd)
+ default:
+ return fmt.Errorf("redis: function stats unexpected key %s", key)
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+
+ cmd.val = result
+ return nil
+}
+
+func (cmd *FunctionStatsCmd) readRunningScript(rd *proto.Reader) (RunningScript, bool, error) {
+ err := rd.ReadFixedMapLen(3)
+ if err != nil {
+ if err == Nil {
+ return RunningScript{}, false, nil
+ }
+ return RunningScript{}, false, err
+ }
+
+ var runningScript RunningScript
+ for i := 0; i < 3; i++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return RunningScript{}, false, err
+ }
+
+ switch key {
+ case "name":
+ runningScript.Name, err = rd.ReadString()
+ case "duration_ms":
+ runningScript.Duration, err = cmd.readDuration(rd)
+ case "command":
+ runningScript.Command, err = cmd.readCommand(rd)
+ default:
+ return RunningScript{}, false, fmt.Errorf("redis: function stats unexpected running_script key %s", key)
+ }
+
+ if err != nil {
+ return RunningScript{}, false, err
+ }
+ }
+
+ return runningScript, true, nil
+}
+
+func (cmd *FunctionStatsCmd) readEngines(rd *proto.Reader) ([]Engine, error) {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return nil, err
+ }
+
+ engines := make([]Engine, 0, n)
+ for i := 0; i < n; i++ {
+ engine := Engine{}
+ engine.Language, err = rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ err = rd.ReadFixedMapLen(2)
+ if err != nil {
+ return nil, fmt.Errorf("redis: function stats unexpected %s engine map length", engine.Language)
+ }
+
+ for i := 0; i < 2; i++ {
+ key, err := rd.ReadString()
+ switch key {
+ case "libraries_count":
+ engine.LibrariesCount, err = rd.ReadInt()
+ case "functions_count":
+ engine.FunctionsCount, err = rd.ReadInt()
+ }
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ engines = append(engines, engine)
+ }
+ return engines, nil
+}
+
+func (cmd *FunctionStatsCmd) readDuration(rd *proto.Reader) (time.Duration, error) {
+ t, err := rd.ReadInt()
+ if err != nil {
+ return time.Duration(0), err
+ }
+ return time.Duration(t) * time.Millisecond, nil
+}
+
+func (cmd *FunctionStatsCmd) readCommand(rd *proto.Reader) ([]string, error) {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+
+ command := make([]string, 0, n)
+ for i := 0; i < n; i++ {
+ x, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+ command = append(command, x)
+ }
+
+ return command, nil
+}
+
+func (cmd *FunctionStatsCmd) readRunningScripts(rd *proto.Reader) ([]RunningScript, bool, error) {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, false, err
+ }
+
+ runningScripts := make([]RunningScript, 0, n)
+ for i := 0; i < n; i++ {
+ rs, _, err := cmd.readRunningScript(rd)
+ if err != nil {
+ return nil, false, err
+ }
+ runningScripts = append(runningScripts, rs)
+ }
+
+ return runningScripts, len(runningScripts) > 0, nil
+}
+
+//------------------------------------------------------------------------------
+
+// LCSQuery is a parameter used for the LCS command
+type LCSQuery struct {
+ Key1 string
+ Key2 string
+ Len bool
+ Idx bool
+ MinMatchLen int
+ WithMatchLen bool
+}
+
+// LCSMatch is the result set of the LCS command.
+type LCSMatch struct {
+ MatchString string
+ Matches []LCSMatchedPosition
+ Len int64
+}
+
+type LCSMatchedPosition struct {
+ Key1 LCSPosition
+ Key2 LCSPosition
+
+ // only for withMatchLen is true
+ MatchLen int64
+}
+
+type LCSPosition struct {
+ Start int64
+ End int64
+}
+
+type LCSCmd struct {
+ baseCmd
+
+ // 1: match string
+ // 2: match len
+ // 3: match idx LCSMatch
+ readType uint8
+ val *LCSMatch
+}
+
+func NewLCSCmd(ctx context.Context, q *LCSQuery) *LCSCmd {
+ args := make([]interface{}, 3, 7)
+ args[0] = "lcs"
+ args[1] = q.Key1
+ args[2] = q.Key2
+
+ cmd := &LCSCmd{readType: 1}
+ if q.Len {
+ cmd.readType = 2
+ args = append(args, "len")
+ } else if q.Idx {
+ cmd.readType = 3
+ args = append(args, "idx")
+ if q.MinMatchLen != 0 {
+ args = append(args, "minmatchlen", q.MinMatchLen)
+ }
+ if q.WithMatchLen {
+ args = append(args, "withmatchlen")
+ }
+ }
+ cmd.baseCmd = baseCmd{
+ ctx: ctx,
+ args: args,
+ }
+
+ return cmd
+}
+
+func (cmd *LCSCmd) SetVal(val *LCSMatch) {
+ cmd.val = val
+}
+
+func (cmd *LCSCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *LCSCmd) Val() *LCSMatch {
+ return cmd.val
+}
+
+func (cmd *LCSCmd) Result() (*LCSMatch, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *LCSCmd) readReply(rd *proto.Reader) (err error) {
+ lcs := &LCSMatch{}
+ switch cmd.readType {
+ case 1:
+ // match string
+ if lcs.MatchString, err = rd.ReadString(); err != nil {
+ return err
+ }
+ case 2:
+ // match len
+ if lcs.Len, err = rd.ReadInt(); err != nil {
+ return err
+ }
+ case 3:
+ // read LCSMatch
+ if err = rd.ReadFixedMapLen(2); err != nil {
+ return err
+ }
+
+ // read matches or len field
+ for i := 0; i < 2; i++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "matches":
+ // read array of matched positions
+ if lcs.Matches, err = cmd.readMatchedPositions(rd); err != nil {
+ return err
+ }
+ case "len":
+ // read match length
+ if lcs.Len, err = rd.ReadInt(); err != nil {
+ return err
+ }
+ }
+ }
+ }
+
+ cmd.val = lcs
+ return nil
+}
+
+func (cmd *LCSCmd) readMatchedPositions(rd *proto.Reader) ([]LCSMatchedPosition, error) {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+
+ positions := make([]LCSMatchedPosition, n)
+ for i := 0; i < n; i++ {
+ pn, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+
+ if positions[i].Key1, err = cmd.readPosition(rd); err != nil {
+ return nil, err
+ }
+ if positions[i].Key2, err = cmd.readPosition(rd); err != nil {
+ return nil, err
+ }
+
+ // read match length if WithMatchLen is true
+ if pn > 2 {
+ if positions[i].MatchLen, err = rd.ReadInt(); err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ return positions, nil
+}
+
+func (cmd *LCSCmd) readPosition(rd *proto.Reader) (pos LCSPosition, err error) {
+ if err = rd.ReadFixedArrayLen(2); err != nil {
+ return pos, err
+ }
+ if pos.Start, err = rd.ReadInt(); err != nil {
+ return pos, err
+ }
+ if pos.End, err = rd.ReadInt(); err != nil {
+ return pos, err
+ }
+
+ return pos, nil
+}
+
+// ------------------------------------------------------------------------
+
+type KeyFlags struct {
+ Key string
+ Flags []string
+}
+
+type KeyFlagsCmd struct {
+ baseCmd
+
+ val []KeyFlags
+}
+
+var _ Cmder = (*KeyFlagsCmd)(nil)
+
+func NewKeyFlagsCmd(ctx context.Context, args ...interface{}) *KeyFlagsCmd {
+ return &KeyFlagsCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *KeyFlagsCmd) SetVal(val []KeyFlags) {
+ cmd.val = val
+}
+
+func (cmd *KeyFlagsCmd) Val() []KeyFlags {
+ return cmd.val
+}
+
+func (cmd *KeyFlagsCmd) Result() ([]KeyFlags, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *KeyFlagsCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *KeyFlagsCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ if n == 0 {
+ cmd.val = make([]KeyFlags, 0)
+ return nil
+ }
+
+ cmd.val = make([]KeyFlags, n)
+
+ for i := 0; i < len(cmd.val); i++ {
+
+ if err = rd.ReadFixedArrayLen(2); err != nil {
+ return err
+ }
+
+ if cmd.val[i].Key, err = rd.ReadString(); err != nil {
+ return err
+ }
+ flagsLen, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val[i].Flags = make([]string, flagsLen)
+
+ for j := 0; j < flagsLen; j++ {
+ if cmd.val[i].Flags[j], err = rd.ReadString(); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+// ---------------------------------------------------------------------------------------------------
+
+type ClusterLink struct {
+ Direction string
+ Node string
+ CreateTime int64
+ Events string
+ SendBufferAllocated int64
+ SendBufferUsed int64
+}
+
+type ClusterLinksCmd struct {
+ baseCmd
+
+ val []ClusterLink
+}
+
+var _ Cmder = (*ClusterLinksCmd)(nil)
+
+func NewClusterLinksCmd(ctx context.Context, args ...interface{}) *ClusterLinksCmd {
+ return &ClusterLinksCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *ClusterLinksCmd) SetVal(val []ClusterLink) {
+ cmd.val = val
+}
+
+func (cmd *ClusterLinksCmd) Val() []ClusterLink {
+ return cmd.val
+}
+
+func (cmd *ClusterLinksCmd) Result() ([]ClusterLink, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *ClusterLinksCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ClusterLinksCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
}
- cmd.val = v.([]ClusterSlot)
+ cmd.val = make([]ClusterLink, n)
+
+ for i := 0; i < len(cmd.val); i++ {
+ m, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ for j := 0; j < m; j++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "direction":
+ cmd.val[i].Direction, err = rd.ReadString()
+ case "node":
+ cmd.val[i].Node, err = rd.ReadString()
+ case "create-time":
+ cmd.val[i].CreateTime, err = rd.ReadInt()
+ case "events":
+ cmd.val[i].Events, err = rd.ReadString()
+ case "send-buffer-allocated":
+ cmd.val[i].SendBufferAllocated, err = rd.ReadInt()
+ case "send-buffer-used":
+ cmd.val[i].SendBufferUsed, err = rd.ReadInt()
+ default:
+ return fmt.Errorf("redis: unexpected key %q in CLUSTER LINKS reply", key)
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+ }
+
return nil
}
-//------------------------------------------------------------------------------
+// ------------------------------------------------------------------------------------------------------------------
-// GeoLocation is used with GeoAdd to add geospatial location.
-type GeoLocation struct {
- Name string
- Longitude, Latitude, Dist float64
- GeoHash int64
+type SlotRange struct {
+ Start int64
+ End int64
}
-// GeoRadiusQuery is used with GeoRadius to query geospatial index.
-type GeoRadiusQuery struct {
- Radius float64
- // Can be m, km, ft, or mi. Default is km.
- Unit string
- WithCoord bool
- WithDist bool
- WithGeoHash bool
- Count int
- // Can be ASC or DESC. Default is no sort order.
- Sort string
- Store string
- StoreDist string
+type Node struct {
+ ID string
+ Endpoint string
+ IP string
+ Hostname string
+ Port int64
+ TLSPort int64
+ Role string
+ ReplicationOffset int64
+ Health string
}
-type GeoLocationCmd struct {
+type ClusterShard struct {
+ Slots []SlotRange
+ Nodes []Node
+}
+
+type ClusterShardsCmd struct {
baseCmd
- q *GeoRadiusQuery
- locations []GeoLocation
+ val []ClusterShard
}
-var _ Cmder = (*GeoLocationCmd)(nil)
+var _ Cmder = (*ClusterShardsCmd)(nil)
-func NewGeoLocationCmd(q *GeoRadiusQuery, args ...interface{}) *GeoLocationCmd {
- args = append(args, q.Radius)
- if q.Unit != "" {
- args = append(args, q.Unit)
- } else {
- args = append(args, "km")
+func NewClusterShardsCmd(ctx context.Context, args ...interface{}) *ClusterShardsCmd {
+ return &ClusterShardsCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
}
- if q.WithCoord {
- args = append(args, "withcoord")
+}
+
+func (cmd *ClusterShardsCmd) SetVal(val []ClusterShard) {
+ cmd.val = val
+}
+
+func (cmd *ClusterShardsCmd) Val() []ClusterShard {
+ return cmd.val
+}
+
+func (cmd *ClusterShardsCmd) Result() ([]ClusterShard, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *ClusterShardsCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ClusterShardsCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
}
- if q.WithDist {
- args = append(args, "withdist")
+ cmd.val = make([]ClusterShard, n)
+
+ for i := 0; i < n; i++ {
+ m, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ for j := 0; j < m; j++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "slots":
+ l, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ for k := 0; k < l; k += 2 {
+ start, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+
+ end, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+
+ cmd.val[i].Slots = append(cmd.val[i].Slots, SlotRange{Start: start, End: end})
+ }
+ case "nodes":
+ nodesLen, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val[i].Nodes = make([]Node, nodesLen)
+ for k := 0; k < nodesLen; k++ {
+ nodeMapLen, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ for l := 0; l < nodeMapLen; l++ {
+ nodeKey, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch nodeKey {
+ case "id":
+ cmd.val[i].Nodes[k].ID, err = rd.ReadString()
+ case "endpoint":
+ cmd.val[i].Nodes[k].Endpoint, err = rd.ReadString()
+ case "ip":
+ cmd.val[i].Nodes[k].IP, err = rd.ReadString()
+ case "hostname":
+ cmd.val[i].Nodes[k].Hostname, err = rd.ReadString()
+ case "port":
+ cmd.val[i].Nodes[k].Port, err = rd.ReadInt()
+ case "tls-port":
+ cmd.val[i].Nodes[k].TLSPort, err = rd.ReadInt()
+ case "role":
+ cmd.val[i].Nodes[k].Role, err = rd.ReadString()
+ case "replication-offset":
+ cmd.val[i].Nodes[k].ReplicationOffset, err = rd.ReadInt()
+ case "health":
+ cmd.val[i].Nodes[k].Health, err = rd.ReadString()
+ default:
+ return fmt.Errorf("redis: unexpected key %q in CLUSTER SHARDS node reply", nodeKey)
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+ }
+ default:
+ return fmt.Errorf("redis: unexpected key %q in CLUSTER SHARDS reply", key)
+ }
+ }
}
- if q.WithGeoHash {
- args = append(args, "withhash")
+
+ return nil
+}
+
+// -----------------------------------------
+
+type RankScore struct {
+ Rank int64
+ Score float64
+}
+
+type RankWithScoreCmd struct {
+ baseCmd
+
+ val RankScore
+}
+
+var _ Cmder = (*RankWithScoreCmd)(nil)
+
+func NewRankWithScoreCmd(ctx context.Context, args ...interface{}) *RankWithScoreCmd {
+ return &RankWithScoreCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
}
- if q.Count > 0 {
- args = append(args, "count", q.Count)
+}
+
+func (cmd *RankWithScoreCmd) SetVal(val RankScore) {
+ cmd.val = val
+}
+
+func (cmd *RankWithScoreCmd) Val() RankScore {
+ return cmd.val
+}
+
+func (cmd *RankWithScoreCmd) Result() (RankScore, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *RankWithScoreCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *RankWithScoreCmd) readReply(rd *proto.Reader) error {
+ if err := rd.ReadFixedArrayLen(2); err != nil {
+ return err
}
- if q.Sort != "" {
- args = append(args, q.Sort)
+
+ rank, err := rd.ReadInt()
+ if err != nil {
+ return err
}
- if q.Store != "" {
- args = append(args, "store")
- args = append(args, q.Store)
+
+ score, err := rd.ReadFloat()
+ if err != nil {
+ return err
}
- if q.StoreDist != "" {
- args = append(args, "storedist")
- args = append(args, q.StoreDist)
+
+ cmd.val = RankScore{Rank: rank, Score: score}
+
+ return nil
+}
+
+// --------------------------------------------------------------------------------------------------
+
+// ClientFlags is redis-server client flags, copy from redis/src/server.h (redis 7.0)
+type ClientFlags uint64
+
+const (
+ ClientSlave ClientFlags = 1 << 0 /* This client is a replica */
+ ClientMaster ClientFlags = 1 << 1 /* This client is a master */
+ ClientMonitor ClientFlags = 1 << 2 /* This client is a slave monitor, see MONITOR */
+ ClientMulti ClientFlags = 1 << 3 /* This client is in a MULTI context */
+ ClientBlocked ClientFlags = 1 << 4 /* The client is waiting in a blocking operation */
+ ClientDirtyCAS ClientFlags = 1 << 5 /* Watched keys modified. EXEC will fail. */
+ ClientCloseAfterReply ClientFlags = 1 << 6 /* Close after writing entire reply. */
+ ClientUnBlocked ClientFlags = 1 << 7 /* This client was unblocked and is stored in server.unblocked_clients */
+ ClientScript ClientFlags = 1 << 8 /* This is a non-connected client used by Lua */
+ ClientAsking ClientFlags = 1 << 9 /* Client issued the ASKING command */
+ ClientCloseASAP ClientFlags = 1 << 10 /* Close this client ASAP */
+ ClientUnixSocket ClientFlags = 1 << 11 /* Client connected via Unix domain socket */
+ ClientDirtyExec ClientFlags = 1 << 12 /* EXEC will fail for errors while queueing */
+ ClientMasterForceReply ClientFlags = 1 << 13 /* Queue replies even if is master */
+ ClientForceAOF ClientFlags = 1 << 14 /* Force AOF propagation of current cmd. */
+ ClientForceRepl ClientFlags = 1 << 15 /* Force replication of current cmd. */
+ ClientPrePSync ClientFlags = 1 << 16 /* Instance don't understand PSYNC. */
+ ClientReadOnly ClientFlags = 1 << 17 /* Cluster client is in read-only state. */
+ ClientPubSub ClientFlags = 1 << 18 /* Client is in Pub/Sub mode. */
+ ClientPreventAOFProp ClientFlags = 1 << 19 /* Don't propagate to AOF. */
+ ClientPreventReplProp ClientFlags = 1 << 20 /* Don't propagate to slaves. */
+ ClientPreventProp ClientFlags = ClientPreventAOFProp | ClientPreventReplProp
+ ClientPendingWrite ClientFlags = 1 << 21 /* Client has output to send but a-write handler is yet not installed. */
+ ClientReplyOff ClientFlags = 1 << 22 /* Don't send replies to client. */
+ ClientReplySkipNext ClientFlags = 1 << 23 /* Set ClientREPLY_SKIP for next cmd */
+ ClientReplySkip ClientFlags = 1 << 24 /* Don't send just this reply. */
+ ClientLuaDebug ClientFlags = 1 << 25 /* Run EVAL in debug mode. */
+ ClientLuaDebugSync ClientFlags = 1 << 26 /* EVAL debugging without fork() */
+ ClientModule ClientFlags = 1 << 27 /* Non connected client used by some module. */
+ ClientProtected ClientFlags = 1 << 28 /* Client should not be freed for now. */
+ ClientExecutingCommand ClientFlags = 1 << 29 /* Indicates that the client is currently in the process of handling
+ a command. usually this will be marked only during call()
+ however, blocked clients might have this flag kept until they
+ will try to reprocess the command. */
+ ClientPendingCommand ClientFlags = 1 << 30 /* Indicates the client has a fully * parsed command ready for execution. */
+ ClientTracking ClientFlags = 1 << 31 /* Client enabled keys tracking in order to perform client side caching. */
+ ClientTrackingBrokenRedir ClientFlags = 1 << 32 /* Target client is invalid. */
+ ClientTrackingBCAST ClientFlags = 1 << 33 /* Tracking in BCAST mode. */
+ ClientTrackingOptIn ClientFlags = 1 << 34 /* Tracking in opt-in mode. */
+ ClientTrackingOptOut ClientFlags = 1 << 35 /* Tracking in opt-out mode. */
+ ClientTrackingCaching ClientFlags = 1 << 36 /* CACHING yes/no was given, depending on optin/optout mode. */
+ ClientTrackingNoLoop ClientFlags = 1 << 37 /* Don't send invalidation messages about writes performed by myself.*/
+ ClientInTimeoutTable ClientFlags = 1 << 38 /* This client is in the timeout table. */
+ ClientProtocolError ClientFlags = 1 << 39 /* Protocol error chatting with it. */
+ ClientCloseAfterCommand ClientFlags = 1 << 40 /* Close after executing commands * and writing entire reply. */
+ ClientDenyBlocking ClientFlags = 1 << 41 /* Indicate that the client should not be blocked. currently, turned on inside MULTI, Lua, RM_Call, and AOF client */
+ ClientReplRDBOnly ClientFlags = 1 << 42 /* This client is a replica that only wants RDB without replication buffer. */
+ ClientNoEvict ClientFlags = 1 << 43 /* This client is protected against client memory eviction. */
+ ClientAllowOOM ClientFlags = 1 << 44 /* Client used by RM_Call is allowed to fully execute scripts even when in OOM */
+ ClientNoTouch ClientFlags = 1 << 45 /* This client will not touch LFU/LRU stats. */
+ ClientPushing ClientFlags = 1 << 46 /* This client is pushing notifications. */
+)
+
+// ClientInfo is redis-server ClientInfo, not go-redis *Client
+type ClientInfo struct {
+ ID int64 // redis version 2.8.12, a unique 64-bit client ID
+ Addr string // address/port of the client
+ LAddr string // address/port of local address client connected to (bind address)
+ FD int64 // file descriptor corresponding to the socket
+ Name string // the name set by the client with CLIENT SETNAME
+ Age time.Duration // total duration of the connection in seconds
+ Idle time.Duration // idle time of the connection in seconds
+ Flags ClientFlags // client flags (see below)
+ DB int // current database ID
+ Sub int // number of channel subscriptions
+ PSub int // number of pattern matching subscriptions
+ SSub int // redis version 7.0.3, number of shard channel subscriptions
+ Multi int // number of commands in a MULTI/EXEC context
+ Watch int // redis version 7.4 RC1, number of keys this client is currently watching.
+ QueryBuf int // qbuf, query buffer length (0 means no query pending)
+ QueryBufFree int // qbuf-free, free space of the query buffer (0 means the buffer is full)
+ ArgvMem int // incomplete arguments for the next command (already extracted from query buffer)
+ MultiMem int // redis version 7.0, memory is used up by buffered multi commands
+ BufferSize int // rbs, usable size of buffer
+ BufferPeak int // rbp, peak used size of buffer in last 5 sec interval
+ OutputBufferLength int // obl, output buffer length
+ OutputListLength int // oll, output list length (replies are queued in this list when the buffer is full)
+ OutputMemory int // omem, output buffer memory usage
+ TotalMemory int // tot-mem, total memory consumed by this client in its various buffers
+ TotalNetIn int // tot-net-in, total network input
+ TotalNetOut int // tot-net-out, total network output
+ TotalCmds int // tot-cmds, total number of commands processed
+ IoThread int // io-thread id
+ Events string // file descriptor events (see below)
+ LastCmd string // cmd, last command played
+ User string // the authenticated username of the client
+ Redir int64 // client id of current client tracking redirection
+ Resp int // redis version 7.0, client RESP protocol version
+ LibName string // redis version 7.2, client library name
+ LibVer string // redis version 7.2, client library version
+}
+
+type ClientInfoCmd struct {
+ baseCmd
+
+ val *ClientInfo
+}
+
+var _ Cmder = (*ClientInfoCmd)(nil)
+
+func NewClientInfoCmd(ctx context.Context, args ...interface{}) *ClientInfoCmd {
+ return &ClientInfoCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
}
- return &GeoLocationCmd{
- baseCmd: baseCmd{_args: args},
- q: q,
+}
+
+func (cmd *ClientInfoCmd) SetVal(val *ClientInfo) {
+ cmd.val = val
+}
+
+func (cmd *ClientInfoCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ClientInfoCmd) Val() *ClientInfo {
+ return cmd.val
+}
+
+func (cmd *ClientInfoCmd) Result() (*ClientInfo, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *ClientInfoCmd) readReply(rd *proto.Reader) (err error) {
+ txt, err := rd.ReadString()
+ if err != nil {
+ return err
}
+
+ // sds o = catClientInfoString(sdsempty(), c);
+ // o = sdscatlen(o,"\n",1);
+ // addReplyVerbatim(c,o,sdslen(o),"txt");
+ // sdsfree(o);
+ cmd.val, err = parseClientInfo(strings.TrimSpace(txt))
+ return err
}
-func (cmd *GeoLocationCmd) Val() []GeoLocation {
- return cmd.locations
+// fmt.Sscanf() cannot handle null values
+func parseClientInfo(txt string) (info *ClientInfo, err error) {
+ info = &ClientInfo{}
+ for _, s := range strings.Split(txt, " ") {
+ kv := strings.Split(s, "=")
+ if len(kv) != 2 {
+ return nil, fmt.Errorf("redis: unexpected client info data (%s)", s)
+ }
+ key, val := kv[0], kv[1]
+
+ switch key {
+ case "id":
+ info.ID, err = strconv.ParseInt(val, 10, 64)
+ case "addr":
+ info.Addr = val
+ case "laddr":
+ info.LAddr = val
+ case "fd":
+ info.FD, err = strconv.ParseInt(val, 10, 64)
+ case "name":
+ info.Name = val
+ case "age":
+ var age int
+ if age, err = strconv.Atoi(val); err == nil {
+ info.Age = time.Duration(age) * time.Second
+ }
+ case "idle":
+ var idle int
+ if idle, err = strconv.Atoi(val); err == nil {
+ info.Idle = time.Duration(idle) * time.Second
+ }
+ case "flags":
+ if val == "N" {
+ break
+ }
+
+ for i := 0; i < len(val); i++ {
+ switch val[i] {
+ case 'S':
+ info.Flags |= ClientSlave
+ case 'O':
+ info.Flags |= ClientSlave | ClientMonitor
+ case 'M':
+ info.Flags |= ClientMaster
+ case 'P':
+ info.Flags |= ClientPubSub
+ case 'x':
+ info.Flags |= ClientMulti
+ case 'b':
+ info.Flags |= ClientBlocked
+ case 't':
+ info.Flags |= ClientTracking
+ case 'R':
+ info.Flags |= ClientTrackingBrokenRedir
+ case 'B':
+ info.Flags |= ClientTrackingBCAST
+ case 'd':
+ info.Flags |= ClientDirtyCAS
+ case 'c':
+ info.Flags |= ClientCloseAfterCommand
+ case 'u':
+ info.Flags |= ClientUnBlocked
+ case 'A':
+ info.Flags |= ClientCloseASAP
+ case 'U':
+ info.Flags |= ClientUnixSocket
+ case 'r':
+ info.Flags |= ClientReadOnly
+ case 'e':
+ info.Flags |= ClientNoEvict
+ case 'T':
+ info.Flags |= ClientNoTouch
+ default:
+ return nil, fmt.Errorf("redis: unexpected client info flags(%s)", string(val[i]))
+ }
+ }
+ case "db":
+ info.DB, err = strconv.Atoi(val)
+ case "sub":
+ info.Sub, err = strconv.Atoi(val)
+ case "psub":
+ info.PSub, err = strconv.Atoi(val)
+ case "ssub":
+ info.SSub, err = strconv.Atoi(val)
+ case "multi":
+ info.Multi, err = strconv.Atoi(val)
+ case "watch":
+ info.Watch, err = strconv.Atoi(val)
+ case "qbuf":
+ info.QueryBuf, err = strconv.Atoi(val)
+ case "qbuf-free":
+ info.QueryBufFree, err = strconv.Atoi(val)
+ case "argv-mem":
+ info.ArgvMem, err = strconv.Atoi(val)
+ case "multi-mem":
+ info.MultiMem, err = strconv.Atoi(val)
+ case "rbs":
+ info.BufferSize, err = strconv.Atoi(val)
+ case "rbp":
+ info.BufferPeak, err = strconv.Atoi(val)
+ case "obl":
+ info.OutputBufferLength, err = strconv.Atoi(val)
+ case "oll":
+ info.OutputListLength, err = strconv.Atoi(val)
+ case "omem":
+ info.OutputMemory, err = strconv.Atoi(val)
+ case "tot-mem":
+ info.TotalMemory, err = strconv.Atoi(val)
+ case "tot-net-in":
+ info.TotalNetIn, err = strconv.Atoi(val)
+ case "tot-net-out":
+ info.TotalNetOut, err = strconv.Atoi(val)
+ case "tot-cmds":
+ info.TotalCmds, err = strconv.Atoi(val)
+ case "events":
+ info.Events = val
+ case "cmd":
+ info.LastCmd = val
+ case "user":
+ info.User = val
+ case "redir":
+ info.Redir, err = strconv.ParseInt(val, 10, 64)
+ case "resp":
+ info.Resp, err = strconv.Atoi(val)
+ case "lib-name":
+ info.LibName = val
+ case "lib-ver":
+ info.LibVer = val
+ case "io-thread":
+ info.IoThread, err = strconv.Atoi(val)
+ default:
+ return nil, fmt.Errorf("redis: unexpected client info key(%s)", key)
+ }
+
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return info, nil
}
-func (cmd *GeoLocationCmd) Result() ([]GeoLocation, error) {
- return cmd.locations, cmd.err
+// -------------------------------------------
+
+type ACLLogEntry struct {
+ Count int64
+ Reason string
+ Context string
+ Object string
+ Username string
+ AgeSeconds float64
+ ClientInfo *ClientInfo
+ EntryID int64
+ TimestampCreated int64
+ TimestampLastUpdated int64
}
-func (cmd *GeoLocationCmd) String() string {
- return cmdString(cmd, cmd.locations)
+type ACLLogCmd struct {
+ baseCmd
+
+ val []*ACLLogEntry
}
-func (cmd *GeoLocationCmd) readReply(cn *pool.Conn) error {
- var v interface{}
- v, cmd.err = cn.Rd.ReadArrayReply(newGeoLocationSliceParser(cmd.q))
- if cmd.err != nil {
- return cmd.err
+var _ Cmder = (*ACLLogCmd)(nil)
+
+func NewACLLogCmd(ctx context.Context, args ...interface{}) *ACLLogCmd {
+ return &ACLLogCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *ACLLogCmd) SetVal(val []*ACLLogEntry) {
+ cmd.val = val
+}
+
+func (cmd *ACLLogCmd) Val() []*ACLLogEntry {
+ return cmd.val
+}
+
+func (cmd *ACLLogCmd) Result() ([]*ACLLogEntry, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *ACLLogCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ACLLogCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = make([]*ACLLogEntry, n)
+ for i := 0; i < n; i++ {
+ cmd.val[i] = &ACLLogEntry{}
+ entry := cmd.val[i]
+ respLen, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+ for j := 0; j < respLen; j++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "count":
+ entry.Count, err = rd.ReadInt()
+ case "reason":
+ entry.Reason, err = rd.ReadString()
+ case "context":
+ entry.Context, err = rd.ReadString()
+ case "object":
+ entry.Object, err = rd.ReadString()
+ case "username":
+ entry.Username, err = rd.ReadString()
+ case "age-seconds":
+ entry.AgeSeconds, err = rd.ReadFloat()
+ case "client-info":
+ txt, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ entry.ClientInfo, err = parseClientInfo(strings.TrimSpace(txt))
+ if err != nil {
+ return err
+ }
+ case "entry-id":
+ entry.EntryID, err = rd.ReadInt()
+ case "timestamp-created":
+ entry.TimestampCreated, err = rd.ReadInt()
+ case "timestamp-last-updated":
+ entry.TimestampLastUpdated, err = rd.ReadInt()
+ default:
+ return fmt.Errorf("redis: unexpected key %q in ACL LOG reply", key)
+ }
+
+ if err != nil {
+ return err
+ }
+ }
}
- cmd.locations = v.([]GeoLocation)
+
return nil
}
-//------------------------------------------------------------------------------
+// LibraryInfo holds the library info.
+type LibraryInfo struct {
+ LibName *string
+ LibVer *string
+}
-type GeoPos struct {
- Longitude, Latitude float64
+// WithLibraryName returns a valid LibraryInfo with library name only.
+func WithLibraryName(libName string) LibraryInfo {
+ return LibraryInfo{LibName: &libName}
}
-type GeoPosCmd struct {
+// WithLibraryVersion returns a valid LibraryInfo with library version only.
+func WithLibraryVersion(libVer string) LibraryInfo {
+ return LibraryInfo{LibVer: &libVer}
+}
+
+// -------------------------------------------
+
+type InfoCmd struct {
baseCmd
+ val map[string]map[string]string
+}
+
+var _ Cmder = (*InfoCmd)(nil)
+
+func NewInfoCmd(ctx context.Context, args ...interface{}) *InfoCmd {
+ return &InfoCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
- positions []*GeoPos
+func (cmd *InfoCmd) SetVal(val map[string]map[string]string) {
+ cmd.val = val
}
-var _ Cmder = (*GeoPosCmd)(nil)
+func (cmd *InfoCmd) Val() map[string]map[string]string {
+ return cmd.val
+}
-func NewGeoPosCmd(args ...interface{}) *GeoPosCmd {
- return &GeoPosCmd{
- baseCmd: baseCmd{_args: args},
+func (cmd *InfoCmd) Result() (map[string]map[string]string, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *InfoCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *InfoCmd) readReply(rd *proto.Reader) error {
+ val, err := rd.ReadString()
+ if err != nil {
+ return err
}
+
+ section := ""
+ scanner := bufio.NewScanner(strings.NewReader(val))
+ for scanner.Scan() {
+ line := scanner.Text()
+ if strings.HasPrefix(line, "#") {
+ if cmd.val == nil {
+ cmd.val = make(map[string]map[string]string)
+ }
+ section = strings.TrimPrefix(line, "# ")
+ cmd.val[section] = make(map[string]string)
+ } else if line != "" {
+ if section == "Modules" {
+ moduleRe := regexp.MustCompile(`module:name=(.+?),(.+)$`)
+ kv := moduleRe.FindStringSubmatch(line)
+ if len(kv) == 3 {
+ cmd.val[section][kv[1]] = kv[2]
+ }
+ } else {
+ kv := strings.SplitN(line, ":", 2)
+ if len(kv) == 2 {
+ cmd.val[section][kv[0]] = kv[1]
+ }
+ }
+ }
+ }
+
+ return nil
}
-func (cmd *GeoPosCmd) Val() []*GeoPos {
- return cmd.positions
+func (cmd *InfoCmd) Item(section, key string) string {
+ if cmd.val == nil {
+ return ""
+ } else if cmd.val[section] == nil {
+ return ""
+ } else {
+ return cmd.val[section][key]
+ }
}
-func (cmd *GeoPosCmd) Result() ([]*GeoPos, error) {
- return cmd.Val(), cmd.Err()
+type MonitorStatus int
+
+const (
+ monitorStatusIdle MonitorStatus = iota
+ monitorStatusStart
+ monitorStatusStop
+)
+
+type MonitorCmd struct {
+ baseCmd
+ ch chan string
+ status MonitorStatus
+ mu sync.Mutex
+}
+
+func newMonitorCmd(ctx context.Context, ch chan string) *MonitorCmd {
+ return &MonitorCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: []interface{}{"monitor"},
+ },
+ ch: ch,
+ status: monitorStatusIdle,
+ mu: sync.Mutex{},
+ }
}
-func (cmd *GeoPosCmd) String() string {
- return cmdString(cmd, cmd.positions)
+func (cmd *MonitorCmd) String() string {
+ return cmdString(cmd, nil)
+}
+
+func (cmd *MonitorCmd) readReply(rd *proto.Reader) error {
+ ctx, cancel := context.WithCancel(cmd.ctx)
+ go func(ctx context.Context) {
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ default:
+ err := cmd.readMonitor(rd, cancel)
+ if err != nil {
+ cmd.err = err
+ return
+ }
+ }
+ }
+ }(ctx)
+ return nil
}
-func (cmd *GeoPosCmd) readReply(cn *pool.Conn) error {
- var v interface{}
- v, cmd.err = cn.Rd.ReadArrayReply(geoPosSliceParser)
- if cmd.err != nil {
- return cmd.err
+func (cmd *MonitorCmd) readMonitor(rd *proto.Reader, cancel context.CancelFunc) error {
+ for {
+ cmd.mu.Lock()
+ st := cmd.status
+ pk, _ := rd.Peek(1)
+ cmd.mu.Unlock()
+ if len(pk) != 0 && st == monitorStatusStart {
+ cmd.mu.Lock()
+ line, err := rd.ReadString()
+ cmd.mu.Unlock()
+ if err != nil {
+ return err
+ }
+ cmd.ch <- line
+ }
+ if st == monitorStatusStop {
+ cancel()
+ break
+ }
}
- cmd.positions = v.([]*GeoPos)
return nil
}
-//------------------------------------------------------------------------------
+func (cmd *MonitorCmd) Start() {
+ cmd.mu.Lock()
+ defer cmd.mu.Unlock()
+ cmd.status = monitorStatusStart
+}
-type CommandInfo struct {
- Name string
- Arity int8
- Flags []string
- FirstKeyPos int8
- LastKeyPos int8
- StepCount int8
- ReadOnly bool
+func (cmd *MonitorCmd) Stop() {
+ cmd.mu.Lock()
+ defer cmd.mu.Unlock()
+ cmd.status = monitorStatusStop
}
-type CommandsInfoCmd struct {
+type VectorScoreSliceCmd struct {
baseCmd
- val map[string]*CommandInfo
+ val []VectorScore
}
-var _ Cmder = (*CommandsInfoCmd)(nil)
+var _ Cmder = (*VectorScoreSliceCmd)(nil)
-func NewCommandsInfoCmd(args ...interface{}) *CommandsInfoCmd {
- return &CommandsInfoCmd{
- baseCmd: baseCmd{_args: args},
+func NewVectorInfoSliceCmd(ctx context.Context, args ...any) *VectorScoreSliceCmd {
+ return &VectorScoreSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
}
}
-func (cmd *CommandsInfoCmd) Val() map[string]*CommandInfo {
+func (cmd *VectorScoreSliceCmd) SetVal(val []VectorScore) {
+ cmd.val = val
+}
+
+func (cmd *VectorScoreSliceCmd) Val() []VectorScore {
return cmd.val
}
-func (cmd *CommandsInfoCmd) Result() (map[string]*CommandInfo, error) {
- return cmd.Val(), cmd.Err()
+func (cmd *VectorScoreSliceCmd) Result() ([]VectorScore, error) {
+ return cmd.val, cmd.err
}
-func (cmd *CommandsInfoCmd) String() string {
+func (cmd *VectorScoreSliceCmd) String() string {
return cmdString(cmd, cmd.val)
}
-func (cmd *CommandsInfoCmd) readReply(cn *pool.Conn) error {
- var v interface{}
- v, cmd.err = cn.Rd.ReadArrayReply(commandInfoSliceParser)
- if cmd.err != nil {
- return cmd.err
+func (cmd *VectorScoreSliceCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = make([]VectorScore, n)
+ for i := 0; i < n; i++ {
+ name, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ cmd.val[i].Name = name
+
+ score, err := rd.ReadFloat()
+ if err != nil {
+ return err
+ }
+ cmd.val[i].Score = score
}
- cmd.val = v.(map[string]*CommandInfo)
return nil
}
diff --git a/command_recorder_test.go b/command_recorder_test.go
new file mode 100644
index 0000000000..2251df5ef6
--- /dev/null
+++ b/command_recorder_test.go
@@ -0,0 +1,86 @@
+package redis_test
+
+import (
+ "context"
+ "strings"
+ "sync"
+
+ "github.com/redis/go-redis/v9"
+)
+
+// commandRecorder records the last N commands executed by a Redis client.
+type commandRecorder struct {
+ mu sync.Mutex
+ commands []string
+ maxSize int
+}
+
+// newCommandRecorder creates a new command recorder with the specified maximum size.
+func newCommandRecorder(maxSize int) *commandRecorder {
+ return &commandRecorder{
+ commands: make([]string, 0, maxSize),
+ maxSize: maxSize,
+ }
+}
+
+// Record adds a command to the recorder.
+func (r *commandRecorder) Record(cmd string) {
+ cmd = strings.ToLower(cmd)
+ r.mu.Lock()
+ defer r.mu.Unlock()
+
+ r.commands = append(r.commands, cmd)
+ if len(r.commands) > r.maxSize {
+ r.commands = r.commands[1:]
+ }
+}
+
+// LastCommands returns a copy of the recorded commands.
+func (r *commandRecorder) LastCommands() []string {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ return append([]string(nil), r.commands...)
+}
+
+// Contains checks if the recorder contains a specific command.
+func (r *commandRecorder) Contains(cmd string) bool {
+ cmd = strings.ToLower(cmd)
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ for _, c := range r.commands {
+ if strings.Contains(c, cmd) {
+ return true
+ }
+ }
+ return false
+}
+
+// Hook returns a Redis hook that records commands.
+func (r *commandRecorder) Hook() redis.Hook {
+ return &commandHook{recorder: r}
+}
+
+// commandHook implements the redis.Hook interface to record commands.
+type commandHook struct {
+ recorder *commandRecorder
+}
+
+func (h *commandHook) DialHook(next redis.DialHook) redis.DialHook {
+ return next
+}
+
+func (h *commandHook) ProcessHook(next redis.ProcessHook) redis.ProcessHook {
+ return func(ctx context.Context, cmd redis.Cmder) error {
+ h.recorder.Record(cmd.String())
+ return next(ctx, cmd)
+ }
+}
+
+func (h *commandHook) ProcessPipelineHook(next redis.ProcessPipelineHook) redis.ProcessPipelineHook {
+ return func(ctx context.Context, cmds []redis.Cmder) error {
+ for _, cmd := range cmds {
+ h.recorder.Record(cmd.String())
+ }
+ return next(ctx, cmds)
+ }
+}
diff --git a/command_test.go b/command_test.go
index e42375eda9..b9d558cf9d 100644
--- a/command_test.go
+++ b/command_test.go
@@ -1,10 +1,13 @@
package redis_test
import (
- "github.com/go-redis/redis"
+ "errors"
+ "time"
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
+ "github.com/redis/go-redis/v9"
+
+ . "github.com/bsm/ginkgo/v2"
+ . "github.com/bsm/gomega"
)
var _ = Describe("Cmd", func() {
@@ -12,7 +15,7 @@ var _ = Describe("Cmd", func() {
BeforeEach(func() {
client = redis.NewClient(redisOptions())
- Expect(client.FlushDB().Err()).NotTo(HaveOccurred())
+ Expect(client.FlushDB(ctx).Err()).NotTo(HaveOccurred())
})
AfterEach(func() {
@@ -20,19 +23,19 @@ var _ = Describe("Cmd", func() {
})
It("implements Stringer", func() {
- set := client.Set("foo", "bar", 0)
+ set := client.Set(ctx, "foo", "bar", 0)
Expect(set.String()).To(Equal("set foo bar: OK"))
- get := client.Get("foo")
+ get := client.Get(ctx, "foo")
Expect(get.String()).To(Equal("get foo: bar"))
})
It("has val/err", func() {
- set := client.Set("key", "hello", 0)
+ set := client.Set(ctx, "key", "hello", 0)
Expect(set.Err()).NotTo(HaveOccurred())
Expect(set.Val()).To(Equal("OK"))
- get := client.Get("key")
+ get := client.Get(ctx, "key")
Expect(get.Err()).NotTo(HaveOccurred())
Expect(get.Val()).To(Equal("hello"))
@@ -41,20 +44,53 @@ var _ = Describe("Cmd", func() {
})
It("has helpers", func() {
- set := client.Set("key", "10", 0)
+ set := client.Set(ctx, "key", "10", 0)
Expect(set.Err()).NotTo(HaveOccurred())
- n, err := client.Get("key").Int64()
+ n, err := client.Get(ctx, "key").Int64()
Expect(err).NotTo(HaveOccurred())
Expect(n).To(Equal(int64(10)))
- un, err := client.Get("key").Uint64()
+ un, err := client.Get(ctx, "key").Uint64()
Expect(err).NotTo(HaveOccurred())
Expect(un).To(Equal(uint64(10)))
- f, err := client.Get("key").Float64()
+ f, err := client.Get(ctx, "key").Float64()
Expect(err).NotTo(HaveOccurred())
Expect(f).To(Equal(float64(10)))
})
+ It("supports float32", func() {
+ f := float32(66.97)
+
+ err := client.Set(ctx, "float_key", f, 0).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ val, err := client.Get(ctx, "float_key").Float32()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal(f))
+ })
+
+ It("supports time.Time", func() {
+ tm := time.Date(2019, 1, 1, 9, 45, 10, 222125, time.UTC)
+
+ err := client.Set(ctx, "time_key", tm, 0).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ s, err := client.Get(ctx, "time_key").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(s).To(Equal("2019-01-01T09:45:10.000222125Z"))
+
+ tm2, err := client.Get(ctx, "time_key").Time()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(tm2).To(BeTemporally("==", tm))
+ })
+
+ It("allows to set custom error", func() {
+ e := errors.New("custom error")
+ cmd := redis.Cmd{}
+ cmd.SetErr(e)
+ _, err := cmd.Result()
+ Expect(err).To(Equal(e))
+ })
})
diff --git a/commands.go b/commands.go
index aa98aa736c..04235a2e6d 100644
--- a/commands.go
+++ b/commands.go
@@ -1,2146 +1,752 @@
package redis
import (
+ "context"
+ "encoding"
+ "errors"
+ "fmt"
"io"
+ "net"
+ "reflect"
+ "runtime"
+ "strings"
"time"
- "github.com/go-redis/redis/internal"
+ "github.com/redis/go-redis/v9/internal"
)
-func readTimeout(timeout time.Duration) time.Duration {
- if timeout == 0 {
- return 0
- }
- return timeout + 10*time.Second
-}
+// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0,
+// otherwise you will receive an error: (error) ERR syntax error.
+// For example:
+//
+// rdb.Set(ctx, key, value, redis.KeepTTL)
+const KeepTTL = -1
func usePrecise(dur time.Duration) bool {
return dur < time.Second || dur%time.Second != 0
}
-func formatMs(dur time.Duration) int64 {
+func formatMs(ctx context.Context, dur time.Duration) int64 {
if dur > 0 && dur < time.Millisecond {
- internal.Logf(
- "specified duration is %s, but minimal supported value is %s",
+ internal.Logger.Printf(
+ ctx,
+ "specified duration is %s, but minimal supported value is %s - truncating to 1ms",
dur, time.Millisecond,
)
+ return 1
}
return int64(dur / time.Millisecond)
}
-func formatSec(dur time.Duration) int64 {
+func formatSec(ctx context.Context, dur time.Duration) int64 {
if dur > 0 && dur < time.Second {
- internal.Logf(
- "specified duration is %s, but minimal supported value is %s",
+ internal.Logger.Printf(
+ ctx,
+ "specified duration is %s, but minimal supported value is %s - truncating to 1s",
dur, time.Second,
)
+ return 1
}
return int64(dur / time.Second)
}
-type Cmdable interface {
- Pipeline() Pipeliner
- Pipelined(fn func(Pipeliner) error) ([]Cmder, error)
-
- TxPipelined(fn func(Pipeliner) error) ([]Cmder, error)
- TxPipeline() Pipeliner
-
- ClientGetName() *StringCmd
- Echo(message interface{}) *StringCmd
- Ping() *StatusCmd
- Quit() *StatusCmd
- Del(keys ...string) *IntCmd
- Unlink(keys ...string) *IntCmd
- Dump(key string) *StringCmd
- Exists(keys ...string) *IntCmd
- Expire(key string, expiration time.Duration) *BoolCmd
- ExpireAt(key string, tm time.Time) *BoolCmd
- Keys(pattern string) *StringSliceCmd
- Migrate(host, port, key string, db int64, timeout time.Duration) *StatusCmd
- Move(key string, db int64) *BoolCmd
- ObjectRefCount(key string) *IntCmd
- ObjectEncoding(key string) *StringCmd
- ObjectIdleTime(key string) *DurationCmd
- Persist(key string) *BoolCmd
- PExpire(key string, expiration time.Duration) *BoolCmd
- PExpireAt(key string, tm time.Time) *BoolCmd
- PTTL(key string) *DurationCmd
- RandomKey() *StringCmd
- Rename(key, newkey string) *StatusCmd
- RenameNX(key, newkey string) *BoolCmd
- Restore(key string, ttl time.Duration, value string) *StatusCmd
- RestoreReplace(key string, ttl time.Duration, value string) *StatusCmd
- Sort(key string, sort Sort) *StringSliceCmd
- SortInterfaces(key string, sort Sort) *SliceCmd
- TTL(key string) *DurationCmd
- Type(key string) *StatusCmd
- Scan(cursor uint64, match string, count int64) *ScanCmd
- SScan(key string, cursor uint64, match string, count int64) *ScanCmd
- HScan(key string, cursor uint64, match string, count int64) *ScanCmd
- ZScan(key string, cursor uint64, match string, count int64) *ScanCmd
- Append(key, value string) *IntCmd
- BitCount(key string, bitCount *BitCount) *IntCmd
- BitOpAnd(destKey string, keys ...string) *IntCmd
- BitOpOr(destKey string, keys ...string) *IntCmd
- BitOpXor(destKey string, keys ...string) *IntCmd
- BitOpNot(destKey string, key string) *IntCmd
- BitPos(key string, bit int64, pos ...int64) *IntCmd
- Decr(key string) *IntCmd
- DecrBy(key string, decrement int64) *IntCmd
- Get(key string) *StringCmd
- GetBit(key string, offset int64) *IntCmd
- GetRange(key string, start, end int64) *StringCmd
- GetSet(key string, value interface{}) *StringCmd
- Incr(key string) *IntCmd
- IncrBy(key string, value int64) *IntCmd
- IncrByFloat(key string, value float64) *FloatCmd
- MGet(keys ...string) *SliceCmd
- MSet(pairs ...interface{}) *StatusCmd
- MSetNX(pairs ...interface{}) *BoolCmd
- Set(key string, value interface{}, expiration time.Duration) *StatusCmd
- SetBit(key string, offset int64, value int) *IntCmd
- SetNX(key string, value interface{}, expiration time.Duration) *BoolCmd
- SetXX(key string, value interface{}, expiration time.Duration) *BoolCmd
- SetRange(key string, offset int64, value string) *IntCmd
- StrLen(key string) *IntCmd
- HDel(key string, fields ...string) *IntCmd
- HExists(key, field string) *BoolCmd
- HGet(key, field string) *StringCmd
- HGetAll(key string) *StringStringMapCmd
- HIncrBy(key, field string, incr int64) *IntCmd
- HIncrByFloat(key, field string, incr float64) *FloatCmd
- HKeys(key string) *StringSliceCmd
- HLen(key string) *IntCmd
- HMGet(key string, fields ...string) *SliceCmd
- HMSet(key string, fields map[string]interface{}) *StatusCmd
- HSet(key, field string, value interface{}) *BoolCmd
- HSetNX(key, field string, value interface{}) *BoolCmd
- HVals(key string) *StringSliceCmd
- BLPop(timeout time.Duration, keys ...string) *StringSliceCmd
- BRPop(timeout time.Duration, keys ...string) *StringSliceCmd
- BRPopLPush(source, destination string, timeout time.Duration) *StringCmd
- LIndex(key string, index int64) *StringCmd
- LInsert(key, op string, pivot, value interface{}) *IntCmd
- LInsertBefore(key string, pivot, value interface{}) *IntCmd
- LInsertAfter(key string, pivot, value interface{}) *IntCmd
- LLen(key string) *IntCmd
- LPop(key string) *StringCmd
- LPush(key string, values ...interface{}) *IntCmd
- LPushX(key string, value interface{}) *IntCmd
- LRange(key string, start, stop int64) *StringSliceCmd
- LRem(key string, count int64, value interface{}) *IntCmd
- LSet(key string, index int64, value interface{}) *StatusCmd
- LTrim(key string, start, stop int64) *StatusCmd
- RPop(key string) *StringCmd
- RPopLPush(source, destination string) *StringCmd
- RPush(key string, values ...interface{}) *IntCmd
- RPushX(key string, value interface{}) *IntCmd
- SAdd(key string, members ...interface{}) *IntCmd
- SCard(key string) *IntCmd
- SDiff(keys ...string) *StringSliceCmd
- SDiffStore(destination string, keys ...string) *IntCmd
- SInter(keys ...string) *StringSliceCmd
- SInterStore(destination string, keys ...string) *IntCmd
- SIsMember(key string, member interface{}) *BoolCmd
- SMembers(key string) *StringSliceCmd
- SMembersMap(key string) *StringStructMapCmd
- SMove(source, destination string, member interface{}) *BoolCmd
- SPop(key string) *StringCmd
- SPopN(key string, count int64) *StringSliceCmd
- SRandMember(key string) *StringCmd
- SRandMemberN(key string, count int64) *StringSliceCmd
- SRem(key string, members ...interface{}) *IntCmd
- SUnion(keys ...string) *StringSliceCmd
- SUnionStore(destination string, keys ...string) *IntCmd
- ZAdd(key string, members ...Z) *IntCmd
- ZAddNX(key string, members ...Z) *IntCmd
- ZAddXX(key string, members ...Z) *IntCmd
- ZAddCh(key string, members ...Z) *IntCmd
- ZAddNXCh(key string, members ...Z) *IntCmd
- ZAddXXCh(key string, members ...Z) *IntCmd
- ZIncr(key string, member Z) *FloatCmd
- ZIncrNX(key string, member Z) *FloatCmd
- ZIncrXX(key string, member Z) *FloatCmd
- ZCard(key string) *IntCmd
- ZCount(key, min, max string) *IntCmd
- ZLexCount(key, min, max string) *IntCmd
- ZIncrBy(key string, increment float64, member string) *FloatCmd
- ZInterStore(destination string, store ZStore, keys ...string) *IntCmd
- ZRange(key string, start, stop int64) *StringSliceCmd
- ZRangeWithScores(key string, start, stop int64) *ZSliceCmd
- ZRangeByScore(key string, opt ZRangeBy) *StringSliceCmd
- ZRangeByLex(key string, opt ZRangeBy) *StringSliceCmd
- ZRangeByScoreWithScores(key string, opt ZRangeBy) *ZSliceCmd
- ZRank(key, member string) *IntCmd
- ZRem(key string, members ...interface{}) *IntCmd
- ZRemRangeByRank(key string, start, stop int64) *IntCmd
- ZRemRangeByScore(key, min, max string) *IntCmd
- ZRemRangeByLex(key, min, max string) *IntCmd
- ZRevRange(key string, start, stop int64) *StringSliceCmd
- ZRevRangeWithScores(key string, start, stop int64) *ZSliceCmd
- ZRevRangeByScore(key string, opt ZRangeBy) *StringSliceCmd
- ZRevRangeByLex(key string, opt ZRangeBy) *StringSliceCmd
- ZRevRangeByScoreWithScores(key string, opt ZRangeBy) *ZSliceCmd
- ZRevRank(key, member string) *IntCmd
- ZScore(key, member string) *FloatCmd
- ZUnionStore(dest string, store ZStore, keys ...string) *IntCmd
- PFAdd(key string, els ...interface{}) *IntCmd
- PFCount(keys ...string) *IntCmd
- PFMerge(dest string, keys ...string) *StatusCmd
- BgRewriteAOF() *StatusCmd
- BgSave() *StatusCmd
- ClientKill(ipPort string) *StatusCmd
- ClientList() *StringCmd
- ClientPause(dur time.Duration) *BoolCmd
- ConfigGet(parameter string) *SliceCmd
- ConfigResetStat() *StatusCmd
- ConfigSet(parameter, value string) *StatusCmd
- DBSize() *IntCmd
- FlushAll() *StatusCmd
- FlushAllAsync() *StatusCmd
- FlushDB() *StatusCmd
- FlushDBAsync() *StatusCmd
- Info(section ...string) *StringCmd
- LastSave() *IntCmd
- Save() *StatusCmd
- Shutdown() *StatusCmd
- ShutdownSave() *StatusCmd
- ShutdownNoSave() *StatusCmd
- SlaveOf(host, port string) *StatusCmd
- Time() *TimeCmd
- Eval(script string, keys []string, args ...interface{}) *Cmd
- EvalSha(sha1 string, keys []string, args ...interface{}) *Cmd
- ScriptExists(scripts ...string) *BoolSliceCmd
- ScriptFlush() *StatusCmd
- ScriptKill() *StatusCmd
- ScriptLoad(script string) *StringCmd
- DebugObject(key string) *StringCmd
- Publish(channel string, message interface{}) *IntCmd
- PubSubChannels(pattern string) *StringSliceCmd
- PubSubNumSub(channels ...string) *StringIntMapCmd
- PubSubNumPat() *IntCmd
- ClusterSlots() *ClusterSlotsCmd
- ClusterNodes() *StringCmd
- ClusterMeet(host, port string) *StatusCmd
- ClusterForget(nodeID string) *StatusCmd
- ClusterReplicate(nodeID string) *StatusCmd
- ClusterResetSoft() *StatusCmd
- ClusterResetHard() *StatusCmd
- ClusterInfo() *StringCmd
- ClusterKeySlot(key string) *IntCmd
- ClusterCountFailureReports(nodeID string) *IntCmd
- ClusterCountKeysInSlot(slot int) *IntCmd
- ClusterDelSlots(slots ...int) *StatusCmd
- ClusterDelSlotsRange(min, max int) *StatusCmd
- ClusterSaveConfig() *StatusCmd
- ClusterSlaves(nodeID string) *StringSliceCmd
- ClusterFailover() *StatusCmd
- ClusterAddSlots(slots ...int) *StatusCmd
- ClusterAddSlotsRange(min, max int) *StatusCmd
- GeoAdd(key string, geoLocation ...*GeoLocation) *IntCmd
- GeoPos(key string, members ...string) *GeoPosCmd
- GeoRadius(key string, longitude, latitude float64, query *GeoRadiusQuery) *GeoLocationCmd
- GeoRadiusRO(key string, longitude, latitude float64, query *GeoRadiusQuery) *GeoLocationCmd
- GeoRadiusByMember(key, member string, query *GeoRadiusQuery) *GeoLocationCmd
- GeoRadiusByMemberRO(key, member string, query *GeoRadiusQuery) *GeoLocationCmd
- GeoDist(key string, member1, member2, unit string) *FloatCmd
- GeoHash(key string, members ...string) *StringSliceCmd
- Command() *CommandsInfoCmd
-}
-
-type StatefulCmdable interface {
- Cmdable
- Auth(password string) *StatusCmd
- Select(index int) *StatusCmd
- ClientSetName(name string) *BoolCmd
- ReadOnly() *StatusCmd
- ReadWrite() *StatusCmd
-}
-
-var _ Cmdable = (*Client)(nil)
-var _ Cmdable = (*Tx)(nil)
-var _ Cmdable = (*Ring)(nil)
-var _ Cmdable = (*ClusterClient)(nil)
-
-type cmdable struct {
- process func(cmd Cmder) error
-}
-
-func (c *cmdable) setProcessor(fn func(Cmder) error) {
- c.process = fn
-}
-
-type statefulCmdable struct {
- cmdable
- process func(cmd Cmder) error
-}
-
-func (c *statefulCmdable) setProcessor(fn func(Cmder) error) {
- c.process = fn
- c.cmdable.setProcessor(fn)
-}
-
-//------------------------------------------------------------------------------
-
-func (c *statefulCmdable) Auth(password string) *StatusCmd {
- cmd := NewStatusCmd("auth", password)
- c.process(cmd)
- return cmd
-}
+func appendArgs(dst, src []interface{}) []interface{} {
+ if len(src) == 1 {
+ return appendArg(dst, src[0])
+ }
-func (c *cmdable) Echo(message interface{}) *StringCmd {
- cmd := NewStringCmd("echo", message)
- c.process(cmd)
- return cmd
+ dst = append(dst, src...)
+ return dst
}
-func (c *cmdable) Ping() *StatusCmd {
- cmd := NewStatusCmd("ping")
- c.process(cmd)
- return cmd
-}
+func appendArg(dst []interface{}, arg interface{}) []interface{} {
+ switch arg := arg.(type) {
+ case []string:
+ for _, s := range arg {
+ dst = append(dst, s)
+ }
+ return dst
+ case []interface{}:
+ dst = append(dst, arg...)
+ return dst
+ case map[string]interface{}:
+ for k, v := range arg {
+ dst = append(dst, k, v)
+ }
+ return dst
+ case map[string]string:
+ for k, v := range arg {
+ dst = append(dst, k, v)
+ }
+ return dst
+ case time.Time, time.Duration, encoding.BinaryMarshaler, net.IP:
+ return append(dst, arg)
+ case nil:
+ return dst
+ default:
+ // scan struct field
+ v := reflect.ValueOf(arg)
+ if v.Type().Kind() == reflect.Ptr {
+ if v.IsNil() {
+ // error: arg is not a valid object
+ return dst
+ }
+ v = v.Elem()
+ }
-func (c *cmdable) Wait(numSlaves int, timeout time.Duration) *IntCmd {
- cmd := NewIntCmd("wait", numSlaves, int(timeout/time.Millisecond))
- c.process(cmd)
- return cmd
-}
+ if v.Type().Kind() == reflect.Struct {
+ return appendStructField(dst, v)
+ }
-func (c *cmdable) Quit() *StatusCmd {
- panic("not implemented")
+ return append(dst, arg)
+ }
}
-func (c *statefulCmdable) Select(index int) *StatusCmd {
- cmd := NewStatusCmd("select", index)
- c.process(cmd)
- return cmd
-}
+// appendStructField appends the field and value held by the structure v to dst, and returns the appended dst.
+func appendStructField(dst []interface{}, v reflect.Value) []interface{} {
+ typ := v.Type()
+ for i := 0; i < typ.NumField(); i++ {
+ tag := typ.Field(i).Tag.Get("redis")
+ if tag == "" || tag == "-" {
+ continue
+ }
+ name, opt, _ := strings.Cut(tag, ",")
+ if name == "" {
+ continue
+ }
-//------------------------------------------------------------------------------
+ field := v.Field(i)
-func (c *cmdable) Del(keys ...string) *IntCmd {
- args := make([]interface{}, 1+len(keys))
- args[0] = "del"
- for i, key := range keys {
- args[1+i] = key
- }
- cmd := NewIntCmd(args...)
- c.process(cmd)
- return cmd
-}
+ // miss field
+ if omitEmpty(opt) && isEmptyValue(field) {
+ continue
+ }
-func (c *cmdable) Unlink(keys ...string) *IntCmd {
- args := make([]interface{}, 1+len(keys))
- args[0] = "unlink"
- for i, key := range keys {
- args[1+i] = key
+ if field.CanInterface() {
+ dst = append(dst, name, field.Interface())
+ }
}
- cmd := NewIntCmd(args...)
- c.process(cmd)
- return cmd
-}
-func (c *cmdable) Dump(key string) *StringCmd {
- cmd := NewStringCmd("dump", key)
- c.process(cmd)
- return cmd
+ return dst
}
-func (c *cmdable) Exists(keys ...string) *IntCmd {
- args := make([]interface{}, 1+len(keys))
- args[0] = "exists"
- for i, key := range keys {
- args[1+i] = key
+func omitEmpty(opt string) bool {
+ for opt != "" {
+ var name string
+ name, opt, _ = strings.Cut(opt, ",")
+ if name == "omitempty" {
+ return true
+ }
}
- cmd := NewIntCmd(args...)
- c.process(cmd)
- return cmd
-}
-
-func (c *cmdable) Expire(key string, expiration time.Duration) *BoolCmd {
- cmd := NewBoolCmd("expire", key, formatSec(expiration))
- c.process(cmd)
- return cmd
-}
-
-func (c *cmdable) ExpireAt(key string, tm time.Time) *BoolCmd {
- cmd := NewBoolCmd("expireat", key, tm.Unix())
- c.process(cmd)
- return cmd
-}
-
-func (c *cmdable) Keys(pattern string) *StringSliceCmd {
- cmd := NewStringSliceCmd("keys", pattern)
- c.process(cmd)
- return cmd
+ return false
+}
+
+func isEmptyValue(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+ return v.Len() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Interface, reflect.Pointer:
+ return v.IsNil()
+ case reflect.Struct:
+ if v.Type() == reflect.TypeOf(time.Time{}) {
+ return v.IsZero()
+ }
+ // Only supports the struct time.Time,
+ // subsequent iterations will follow the func Scan support decoder.
+ }
+ return false
}
-func (c *cmdable) Migrate(host, port, key string, db int64, timeout time.Duration) *StatusCmd {
- cmd := NewStatusCmd(
- "migrate",
- host,
- port,
- key,
- db,
- formatMs(timeout),
- )
- cmd.setReadTimeout(readTimeout(timeout))
- c.process(cmd)
- return cmd
-}
+type Cmdable interface {
+ Pipeline() Pipeliner
+ Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error)
-func (c *cmdable) Move(key string, db int64) *BoolCmd {
- cmd := NewBoolCmd("move", key, db)
- c.process(cmd)
- return cmd
-}
+ TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error)
+ TxPipeline() Pipeliner
-func (c *cmdable) ObjectRefCount(key string) *IntCmd {
- cmd := NewIntCmd("object", "refcount", key)
- c.process(cmd)
- return cmd
+ Command(ctx context.Context) *CommandsInfoCmd
+ CommandList(ctx context.Context, filter *FilterBy) *StringSliceCmd
+ CommandGetKeys(ctx context.Context, commands ...interface{}) *StringSliceCmd
+ CommandGetKeysAndFlags(ctx context.Context, commands ...interface{}) *KeyFlagsCmd
+ ClientGetName(ctx context.Context) *StringCmd
+ Echo(ctx context.Context, message interface{}) *StringCmd
+ Ping(ctx context.Context) *StatusCmd
+ Quit(ctx context.Context) *StatusCmd
+ Unlink(ctx context.Context, keys ...string) *IntCmd
+
+ BgRewriteAOF(ctx context.Context) *StatusCmd
+ BgSave(ctx context.Context) *StatusCmd
+ ClientKill(ctx context.Context, ipPort string) *StatusCmd
+ ClientKillByFilter(ctx context.Context, keys ...string) *IntCmd
+ ClientList(ctx context.Context) *StringCmd
+ ClientInfo(ctx context.Context) *ClientInfoCmd
+ ClientPause(ctx context.Context, dur time.Duration) *BoolCmd
+ ClientUnpause(ctx context.Context) *BoolCmd
+ ClientID(ctx context.Context) *IntCmd
+ ClientUnblock(ctx context.Context, id int64) *IntCmd
+ ClientUnblockWithError(ctx context.Context, id int64) *IntCmd
+ ClientMaintNotifications(ctx context.Context, enabled bool, endpointType string) *StatusCmd
+ ConfigGet(ctx context.Context, parameter string) *MapStringStringCmd
+ ConfigResetStat(ctx context.Context) *StatusCmd
+ ConfigSet(ctx context.Context, parameter, value string) *StatusCmd
+ ConfigRewrite(ctx context.Context) *StatusCmd
+ DBSize(ctx context.Context) *IntCmd
+ FlushAll(ctx context.Context) *StatusCmd
+ FlushAllAsync(ctx context.Context) *StatusCmd
+ FlushDB(ctx context.Context) *StatusCmd
+ FlushDBAsync(ctx context.Context) *StatusCmd
+ Info(ctx context.Context, section ...string) *StringCmd
+ LastSave(ctx context.Context) *IntCmd
+ Save(ctx context.Context) *StatusCmd
+ Shutdown(ctx context.Context) *StatusCmd
+ ShutdownSave(ctx context.Context) *StatusCmd
+ ShutdownNoSave(ctx context.Context) *StatusCmd
+ SlaveOf(ctx context.Context, host, port string) *StatusCmd
+ SlowLogGet(ctx context.Context, num int64) *SlowLogCmd
+ Time(ctx context.Context) *TimeCmd
+ DebugObject(ctx context.Context, key string) *StringCmd
+ MemoryUsage(ctx context.Context, key string, samples ...int) *IntCmd
+
+ ModuleLoadex(ctx context.Context, conf *ModuleLoadexConfig) *StringCmd
+
+ ACLCmdable
+ BitMapCmdable
+ ClusterCmdable
+ GenericCmdable
+ GeoCmdable
+ HashCmdable
+ HyperLogLogCmdable
+ ListCmdable
+ ProbabilisticCmdable
+ PubSubCmdable
+ ScriptingFunctionsCmdable
+ SearchCmdable
+ SetCmdable
+ SortedSetCmdable
+ StringCmdable
+ StreamCmdable
+ TimeseriesCmdable
+ JSONCmdable
+ VectorSetCmdable
}
-func (c *cmdable) ObjectEncoding(key string) *StringCmd {
- cmd := NewStringCmd("object", "encoding", key)
- c.process(cmd)
- return cmd
-}
+type StatefulCmdable interface {
+ Cmdable
+ Auth(ctx context.Context, password string) *StatusCmd
+ AuthACL(ctx context.Context, username, password string) *StatusCmd
+ Select(ctx context.Context, index int) *StatusCmd
+ SwapDB(ctx context.Context, index1, index2 int) *StatusCmd
+ ClientSetName(ctx context.Context, name string) *BoolCmd
+ ClientSetInfo(ctx context.Context, info LibraryInfo) *StatusCmd
+ Hello(ctx context.Context, ver int, username, password, clientName string) *MapStringInterfaceCmd
+}
+
+var (
+ _ Cmdable = (*Client)(nil)
+ _ Cmdable = (*Tx)(nil)
+ _ Cmdable = (*Ring)(nil)
+ _ Cmdable = (*ClusterClient)(nil)
+ _ Cmdable = (*Pipeline)(nil)
+)
-func (c *cmdable) ObjectIdleTime(key string) *DurationCmd {
- cmd := NewDurationCmd(time.Second, "object", "idletime", key)
- c.process(cmd)
- return cmd
-}
+type cmdable func(ctx context.Context, cmd Cmder) error
-func (c *cmdable) Persist(key string) *BoolCmd {
- cmd := NewBoolCmd("persist", key)
- c.process(cmd)
- return cmd
-}
+type statefulCmdable func(ctx context.Context, cmd Cmder) error
-func (c *cmdable) PExpire(key string, expiration time.Duration) *BoolCmd {
- cmd := NewBoolCmd("pexpire", key, formatMs(expiration))
- c.process(cmd)
- return cmd
-}
+//------------------------------------------------------------------------------
-func (c *cmdable) PExpireAt(key string, tm time.Time) *BoolCmd {
- cmd := NewBoolCmd(
- "pexpireat",
- key,
- tm.UnixNano()/int64(time.Millisecond),
- )
- c.process(cmd)
+func (c statefulCmdable) Auth(ctx context.Context, password string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "auth", password)
+ _ = c(ctx, cmd)
return cmd
}
-func (c *cmdable) PTTL(key string) *DurationCmd {
- cmd := NewDurationCmd(time.Millisecond, "pttl", key)
- c.process(cmd)
+// AuthACL Perform an AUTH command, using the given user and pass.
+// Should be used to authenticate the current connection with one of the connections defined in the ACL list
+// when connecting to a Redis 6.0 instance, or greater, that is using the Redis ACL system.
+func (c statefulCmdable) AuthACL(ctx context.Context, username, password string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "auth", username, password)
+ _ = c(ctx, cmd)
return cmd
}
-func (c *cmdable) RandomKey() *StringCmd {
- cmd := NewStringCmd("randomkey")
- c.process(cmd)
+func (c cmdable) Wait(ctx context.Context, numSlaves int, timeout time.Duration) *IntCmd {
+ cmd := NewIntCmd(ctx, "wait", numSlaves, int(timeout/time.Millisecond))
+ cmd.setReadTimeout(timeout)
+ _ = c(ctx, cmd)
return cmd
}
-func (c *cmdable) Rename(key, newkey string) *StatusCmd {
- cmd := NewStatusCmd("rename", key, newkey)
- c.process(cmd)
+func (c cmdable) WaitAOF(ctx context.Context, numLocal, numSlaves int, timeout time.Duration) *IntCmd {
+ cmd := NewIntCmd(ctx, "waitAOF", numLocal, numSlaves, int(timeout/time.Millisecond))
+ cmd.setReadTimeout(timeout)
+ _ = c(ctx, cmd)
return cmd
}
-func (c *cmdable) RenameNX(key, newkey string) *BoolCmd {
- cmd := NewBoolCmd("renamenx", key, newkey)
- c.process(cmd)
+func (c statefulCmdable) Select(ctx context.Context, index int) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "select", index)
+ _ = c(ctx, cmd)
return cmd
}
-func (c *cmdable) Restore(key string, ttl time.Duration, value string) *StatusCmd {
- cmd := NewStatusCmd(
- "restore",
- key,
- formatMs(ttl),
- value,
- )
- c.process(cmd)
+func (c statefulCmdable) SwapDB(ctx context.Context, index1, index2 int) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "swapdb", index1, index2)
+ _ = c(ctx, cmd)
return cmd
}
-func (c *cmdable) RestoreReplace(key string, ttl time.Duration, value string) *StatusCmd {
- cmd := NewStatusCmd(
- "restore",
- key,
- formatMs(ttl),
- value,
- "replace",
- )
- c.process(cmd)
+// ClientSetName assigns a name to the connection.
+func (c statefulCmdable) ClientSetName(ctx context.Context, name string) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "client", "setname", name)
+ _ = c(ctx, cmd)
return cmd
}
-type Sort struct {
- By string
- Offset, Count float64
- Get []string
- Order string
- IsAlpha bool
- Store string
-}
-
-func (sort *Sort) args(key string) []interface{} {
- args := []interface{}{"sort", key}
- if sort.By != "" {
- args = append(args, "by", sort.By)
- }
- if sort.Offset != 0 || sort.Count != 0 {
- args = append(args, "limit", sort.Offset, sort.Count)
- }
- for _, get := range sort.Get {
- args = append(args, "get", get)
- }
- if sort.Order != "" {
- args = append(args, sort.Order)
+// ClientSetInfo sends a CLIENT SETINFO command with the provided info.
+func (c statefulCmdable) ClientSetInfo(ctx context.Context, info LibraryInfo) *StatusCmd {
+ err := info.Validate()
+ if err != nil {
+ panic(err.Error())
}
- if sort.IsAlpha {
- args = append(args, "alpha")
- }
- if sort.Store != "" {
- args = append(args, "store", sort.Store)
- }
- return args
-}
-
-func (c *cmdable) Sort(key string, sort Sort) *StringSliceCmd {
- cmd := NewStringSliceCmd(sort.args(key)...)
- c.process(cmd)
- return cmd
-}
-
-func (c *cmdable) SortInterfaces(key string, sort Sort) *SliceCmd {
- cmd := NewSliceCmd(sort.args(key)...)
- c.process(cmd)
- return cmd
-}
-
-func (c *cmdable) TTL(key string) *DurationCmd {
- cmd := NewDurationCmd(time.Second, "ttl", key)
- c.process(cmd)
- return cmd
-}
-
-func (c *cmdable) Type(key string) *StatusCmd {
- cmd := NewStatusCmd("type", key)
- c.process(cmd)
- return cmd
-}
-func (c *cmdable) Scan(cursor uint64, match string, count int64) *ScanCmd {
- args := []interface{}{"scan", cursor}
- if match != "" {
- args = append(args, "match", match)
- }
- if count > 0 {
- args = append(args, "count", count)
+ var cmd *StatusCmd
+ if info.LibName != nil {
+ libName := fmt.Sprintf("go-redis(%s,%s)", *info.LibName, internal.ReplaceSpaces(runtime.Version()))
+ cmd = NewStatusCmd(ctx, "client", "setinfo", "LIB-NAME", libName)
+ } else {
+ cmd = NewStatusCmd(ctx, "client", "setinfo", "LIB-VER", *info.LibVer)
}
- cmd := NewScanCmd(c.process, args...)
- c.process(cmd)
- return cmd
-}
-func (c *cmdable) SScan(key string, cursor uint64, match string, count int64) *ScanCmd {
- args := []interface{}{"sscan", key, cursor}
- if match != "" {
- args = append(args, "match", match)
- }
- if count > 0 {
- args = append(args, "count", count)
- }
- cmd := NewScanCmd(c.process, args...)
- c.process(cmd)
+ _ = c(ctx, cmd)
return cmd
}
-func (c *cmdable) HScan(key string, cursor uint64, match string, count int64) *ScanCmd {
- args := []interface{}{"hscan", key, cursor}
- if match != "" {
- args = append(args, "match", match)
+// Validate checks if only one field in the struct is non-nil.
+func (info LibraryInfo) Validate() error {
+ if info.LibName != nil && info.LibVer != nil {
+ return errors.New("both LibName and LibVer cannot be set at the same time")
}
- if count > 0 {
- args = append(args, "count", count)
+ if info.LibName == nil && info.LibVer == nil {
+ return errors.New("at least one of LibName and LibVer should be set")
}
- cmd := NewScanCmd(c.process, args...)
- c.process(cmd)
- return cmd
+ return nil
}
-func (c *cmdable) ZScan(key string, cursor uint64, match string, count int64) *ScanCmd {
- args := []interface{}{"zscan", key, cursor}
- if match != "" {
- args = append(args, "match", match)
+// Hello sets the resp protocol used.
+func (c statefulCmdable) Hello(ctx context.Context,
+ ver int, username, password, clientName string,
+) *MapStringInterfaceCmd {
+ args := make([]interface{}, 0, 7)
+ args = append(args, "hello", ver)
+ if password != "" {
+ if username != "" {
+ args = append(args, "auth", username, password)
+ } else {
+ args = append(args, "auth", "default", password)
+ }
}
- if count > 0 {
- args = append(args, "count", count)
+ if clientName != "" {
+ args = append(args, "setname", clientName)
}
- cmd := NewScanCmd(c.process, args...)
- c.process(cmd)
+ cmd := NewMapStringInterfaceCmd(ctx, args...)
+ _ = c(ctx, cmd)
return cmd
}
//------------------------------------------------------------------------------
-func (c *cmdable) Append(key, value string) *IntCmd {
- cmd := NewIntCmd("append", key, value)
- c.process(cmd)
+func (c cmdable) Command(ctx context.Context) *CommandsInfoCmd {
+ cmd := NewCommandsInfoCmd(ctx, "command")
+ _ = c(ctx, cmd)
return cmd
}
-type BitCount struct {
- Start, End int64
+// FilterBy is used for the `CommandList` command parameter.
+type FilterBy struct {
+ Module string
+ ACLCat string
+ Pattern string
}
-func (c *cmdable) BitCount(key string, bitCount *BitCount) *IntCmd {
- args := []interface{}{"bitcount", key}
- if bitCount != nil {
- args = append(
- args,
- bitCount.Start,
- bitCount.End,
- )
+func (c cmdable) CommandList(ctx context.Context, filter *FilterBy) *StringSliceCmd {
+ args := make([]interface{}, 0, 5)
+ args = append(args, "command", "list")
+ if filter != nil {
+ if filter.Module != "" {
+ args = append(args, "filterby", "module", filter.Module)
+ } else if filter.ACLCat != "" {
+ args = append(args, "filterby", "aclcat", filter.ACLCat)
+ } else if filter.Pattern != "" {
+ args = append(args, "filterby", "pattern", filter.Pattern)
+ }
}
- cmd := NewIntCmd(args...)
- c.process(cmd)
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
return cmd
}
-func (c *cmdable) bitOp(op, destKey string, keys ...string) *IntCmd {
- args := make([]interface{}, 3+len(keys))
- args[0] = "bitop"
- args[1] = op
- args[2] = destKey
- for i, key := range keys {
- args[3+i] = key
- }
- cmd := NewIntCmd(args...)
- c.process(cmd)
+func (c cmdable) CommandGetKeys(ctx context.Context, commands ...interface{}) *StringSliceCmd {
+ args := make([]interface{}, 2+len(commands))
+ args[0] = "command"
+ args[1] = "getkeys"
+ copy(args[2:], commands)
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
return cmd
}
-func (c *cmdable) BitOpAnd(destKey string, keys ...string) *IntCmd {
- return c.bitOp("and", destKey, keys...)
-}
-
-func (c *cmdable) BitOpOr(destKey string, keys ...string) *IntCmd {
- return c.bitOp("or", destKey, keys...)
-}
-
-func (c *cmdable) BitOpXor(destKey string, keys ...string) *IntCmd {
- return c.bitOp("xor", destKey, keys...)
-}
-
-func (c *cmdable) BitOpNot(destKey string, key string) *IntCmd {
- return c.bitOp("not", destKey, key)
-}
-
-func (c *cmdable) BitPos(key string, bit int64, pos ...int64) *IntCmd {
- args := make([]interface{}, 3+len(pos))
- args[0] = "bitpos"
- args[1] = key
- args[2] = bit
- switch len(pos) {
- case 0:
- case 1:
- args[3] = pos[0]
- case 2:
- args[3] = pos[0]
- args[4] = pos[1]
- default:
- panic("too many arguments")
- }
- cmd := NewIntCmd(args...)
- c.process(cmd)
+func (c cmdable) CommandGetKeysAndFlags(ctx context.Context, commands ...interface{}) *KeyFlagsCmd {
+ args := make([]interface{}, 2+len(commands))
+ args[0] = "command"
+ args[1] = "getkeysandflags"
+ copy(args[2:], commands)
+ cmd := NewKeyFlagsCmd(ctx, args...)
+ _ = c(ctx, cmd)
return cmd
}
-func (c *cmdable) Decr(key string) *IntCmd {
- cmd := NewIntCmd("decr", key)
- c.process(cmd)
+// ClientGetName returns the name of the connection.
+func (c cmdable) ClientGetName(ctx context.Context) *StringCmd {
+ cmd := NewStringCmd(ctx, "client", "getname")
+ _ = c(ctx, cmd)
return cmd
}
-func (c *cmdable) DecrBy(key string, decrement int64) *IntCmd {
- cmd := NewIntCmd("decrby", key, decrement)
- c.process(cmd)
+func (c cmdable) Echo(ctx context.Context, message interface{}) *StringCmd {
+ cmd := NewStringCmd(ctx, "echo", message)
+ _ = c(ctx, cmd)
return cmd
}
-func (c *cmdable) Get(key string) *StringCmd {
- cmd := NewStringCmd("get", key)
- c.process(cmd)
+func (c cmdable) Ping(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "ping")
+ _ = c(ctx, cmd)
return cmd
}
-func (c *cmdable) GetBit(key string, offset int64) *IntCmd {
- cmd := NewIntCmd("getbit", key, offset)
- c.process(cmd)
+func (c cmdable) Do(ctx context.Context, args ...interface{}) *Cmd {
+ cmd := NewCmd(ctx, args...)
+ _ = c(ctx, cmd)
return cmd
}
-func (c *cmdable) GetRange(key string, start, end int64) *StringCmd {
- cmd := NewStringCmd("getrange", key, start, end)
- c.process(cmd)
- return cmd
+func (c cmdable) Quit(_ context.Context) *StatusCmd {
+ panic("not implemented")
}
-func (c *cmdable) GetSet(key string, value interface{}) *StringCmd {
- cmd := NewStringCmd("getset", key, value)
- c.process(cmd)
- return cmd
-}
+//------------------------------------------------------------------------------
-func (c *cmdable) Incr(key string) *IntCmd {
- cmd := NewIntCmd("incr", key)
- c.process(cmd)
+func (c cmdable) BgRewriteAOF(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "bgrewriteaof")
+ _ = c(ctx, cmd)
return cmd
}
-func (c *cmdable) IncrBy(key string, value int64) *IntCmd {
- cmd := NewIntCmd("incrby", key, value)
- c.process(cmd)
+func (c cmdable) BgSave(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "bgsave")
+ _ = c(ctx, cmd)
return cmd
}
-func (c *cmdable) IncrByFloat(key string, value float64) *FloatCmd {
- cmd := NewFloatCmd("incrbyfloat", key, value)
- c.process(cmd)
+func (c cmdable) ClientKill(ctx context.Context, ipPort string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "client", "kill", ipPort)
+ _ = c(ctx, cmd)
return cmd
}
-func (c *cmdable) MGet(keys ...string) *SliceCmd {
- args := make([]interface{}, 1+len(keys))
- args[0] = "mget"
+// ClientKillByFilter is new style syntax, while the ClientKill is old
+//
+// CLIENT KILL