diff --git a/.gitignore b/.gitignore index 17952c7fd0..4610ac14e8 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,3 @@ .idea tmp +.DS_Store diff --git a/.spellcheck.yml b/.spellcheck.yml index c411a380e8..b63649b810 100644 --- a/.spellcheck.yml +++ b/.spellcheck.yml @@ -1,9 +1,9 @@ files: - '**/*.md' - - '!docs/clients/index.md' - - '!docs/libraries/index.md' - - '!docs/modules/index.md' - - '!docs/tools/index.md' + - '!resources/clients/index.md' + - '!resources/libraries/index.md' + - '!resources/modules/index.md' + - '!resources/tools/index.md' - '!docs/reference/modules/modules-api-ref.md' dictionaries: - wordlist diff --git a/README.md b/README.md index 0913ee1140..1a93bea2d1 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,12 @@ # Redis documentation +> **Important**: This repository got replaced by the new [Redis docs](https://github.com/redis/docs) repository and will be archived soon. + + +## License vs Trademarks + +OPEN SOURCE LICENSE VS. TRADEMARKS. The three-clause BSD license gives you the right to redistribute and use the software in source and binary forms, with or without modification, under certain conditions. However, open source licenses like the three-clause BSD license do not address trademarks. For further details please read the [Redis Trademark Policy](https://www.redis.com/legal/trademark-policy)." + ## Clients All clients are listed under language specific sub-folders of [clients](./clients) @@ -41,16 +48,49 @@ into account: These keywords will get expanded and auto-linked to relevant parts of the documentation. -There should be at least two predefined sections: description and return value. -The return value section is marked using the @return keyword: +Each command will have a description and both RESP2 and RESP3 return values. +Regarding the return values, these are contained in the files: -``` -Returns all keys matching the given pattern. +* `resp2_replies.json` +* `resp3_replies.json` -@return +Each file is a dictionary with a matching set of keys. Each key is an array of strings that, +when processed, produce Markdown content. Here's an example: -@multi-bulk-reply: all the keys that matched the pattern. ``` +{ + ... + "ACL CAT": [ + "One of the following:", + "* [Array reply](/docs/reference/protocol-spec#arrays): an array of [Bulk string reply](/docs/reference/protocol-spec#bulk-strings) elements representing ACL categories or commands in a given category.", + "* [Simple error reply](/docs/reference/protocol-spec#simple-errors): the command returns an error if an invalid category name is given." + ], + ... +} +``` + +**Important**: when adding or editing return values, be sure to edit both files. Use the following +links for the reply type. Note: do not use `@reply-type` specifiers; use only the Markdown link. + +```md +@simple-string-reply: [Simple string reply](https://redis.io/docs/reference/protocol-spec#simple-strings) +@simple-error-reply: [Simple error reply](https://redis.io/docs/reference/protocol-spec#simple-errors) +@integer-reply: [Integer reply](https://redis.io/docs/reference/protocol-spec#integers) +@bulk-string-reply: [Bulk string reply](https://redis.io/docs/reference/protocol-spec#bulk-strings) +@array-reply: [Array reply](https://redis.io/docs/reference/protocol-spec#arrays) +@nil-reply: [Nil reply](https://redis.io/docs/reference/protocol-spec#bulk-strings) +@null-reply: [Null reply](https://redis.io/docs/reference/protocol-spec#nulls) +@boolean-reply: [Boolean reply](https://redis.io/docs/reference/protocol-spec#booleans) +@double-reply: [Double reply](https://redis.io/docs/reference/protocol-spec#doubles) +@big-number-reply: [Big number reply](https://redis.io/docs/reference/protocol-spec#big-numbers) +@bulk-error-reply: [Bulk error reply](https://redis.io/docs/reference/protocol-spec#bulk-errors) +@verbatim-string-reply: [Verbatim string reply](https://redis.io/docs/reference/protocol-spec#verbatim-strings) +@map-reply: [Map reply](https://redis.io/docs/reference/protocol-spec#maps) +@set-reply: [Set reply](https://redis.io/docs/reference/protocol-spec#sets) +@push-reply: [Push reply](https://redis.io/docs/reference/protocol-spec#pushes) +``` + +**Note:** RESP3 return schemas are not currently included in the `resp2/resp3_replies.json` files for Redis Stack modules. ## Styling guidelines diff --git a/clients/ballerina/github.com/ballerina-platform/module-ballerinax-redis.json b/clients/ballerina/github.com/ballerina-platform/module-ballerinax-redis.json new file mode 100644 index 0000000000..49340f841d --- /dev/null +++ b/clients/ballerina/github.com/ballerina-platform/module-ballerinax-redis.json @@ -0,0 +1,7 @@ +{ + "name": "Ballerina Redis Client", + "description": "Official Redis client for Ballerina language with the support for Redis clusters, connection pooling and secure connections.", + "homepage": "https://central.ballerina.io/ballerinax/redis/latest", + "repository": "https://github.com/ballerina-platform/module-ballerinax-redis", + "recommended": true +} diff --git a/clients/bash/github.com/caquino/redis-bash.json b/clients/bash/github.com/caquino/redis-bash.json new file mode 100644 index 0000000000..319f6fed0c --- /dev/null +++ b/clients/bash/github.com/caquino/redis-bash.json @@ -0,0 +1,7 @@ +{ + "name": "redis-bash", + "description": "Bash library and example client to access Redis Databases", + "twitter": [ + "syshero" + ] +} diff --git a/clients/cpp/github.com/wusongwei/soce/tree/master/soce-redis.json b/clients/cpp/github.com/wusongwei/soce.json similarity index 61% rename from clients/cpp/github.com/wusongwei/soce/tree/master/soce-redis.json rename to clients/cpp/github.com/wusongwei/soce.json index b230eb134e..26f6972f1f 100644 --- a/clients/cpp/github.com/wusongwei/soce/tree/master/soce-redis.json +++ b/clients/cpp/github.com/wusongwei/soce.json @@ -1,5 +1,4 @@ { "name": "soce-redis", - "description": "Based on hiredis, accesses the sever(single, sentinel, cluster) with the same interface, supports pipeline and async(by coroutine)", - "homepage": "https://github.com/wusongwei/soce/tree/master/soce-redis" + "description": "Based on hiredis, accesses the sever(single, sentinel, cluster) with the same interface, supports pipeline and async(by coroutine)" } \ No newline at end of file diff --git a/clients/csharp/github.com/redis/NRedisStack.json b/clients/csharp/github.com/redis/NRedisStack.json new file mode 100644 index 0000000000..b02b4895c6 --- /dev/null +++ b/clients/csharp/github.com/redis/NRedisStack.json @@ -0,0 +1,5 @@ +{ + "name": "NRedisStack", + "description": "This client is developed by Redis to bring RedisStack support to CSharp.", + "official": true +} diff --git a/clients/csharp/github.com/zhuovi/XiaoFeng.Redis.json b/clients/csharp/github.com/zhuovi/XiaoFeng.Redis.json new file mode 100644 index 0000000000..8f73792fef --- /dev/null +++ b/clients/csharp/github.com/zhuovi/XiaoFeng.Redis.json @@ -0,0 +1,4 @@ +{ + "name": "XiaoFeng.Redis", + "description": "A useful Redis client that supports the .NET FRAMEWORK,.NET CORE,.NET STANDARD. A client tool that is quite convenient to operate." +} \ No newline at end of file diff --git a/clients/deno/github.com/denodrivers/redis.json b/clients/deno/github.com/denodrivers/redis.json new file mode 100644 index 0000000000..36ee3007f2 --- /dev/null +++ b/clients/deno/github.com/denodrivers/redis.json @@ -0,0 +1,4 @@ +{ + "name": "redis", + "description": "🦕 Redis client for Deno 🍕" +} diff --git a/clients/deno/github.com/iuioiua/r2d2.json b/clients/deno/github.com/iuioiua/r2d2.json new file mode 100644 index 0000000000..636cd2f05e --- /dev/null +++ b/clients/deno/github.com/iuioiua/r2d2.json @@ -0,0 +1,4 @@ +{ + "name": "r2d2", + "description": "Fast, lightweight Redis client library for Deno." +} \ No newline at end of file diff --git a/clients/fancy/github.com/bakkdoor/redis.fy.json b/clients/fancy/github.com/bakkdoor/redis.fy.json deleted file mode 100644 index 99a5c75559..0000000000 --- a/clients/fancy/github.com/bakkdoor/redis.fy.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "name": "redis.fy", - "description": "A Fancy Redis client library", - "twitter": [ - "bakkdoor" - ] -} \ No newline at end of file diff --git a/clients/gleam/github.com/massivefermion/radish.json b/clients/gleam/github.com/massivefermion/radish.json new file mode 100644 index 0000000000..519ed4bf82 --- /dev/null +++ b/clients/gleam/github.com/massivefermion/radish.json @@ -0,0 +1,8 @@ +{ + "name": "Radish", + "description": "Simple and Fast Redis client written in and for Gleam", + "homepage": "https://hexdocs.pm/radish", + "twitter": [ + "massivefermion" + ] +} \ No newline at end of file diff --git a/clients/go/github.com/go-redis/redis.json b/clients/go/github.com/redis/go-redis.json similarity index 66% rename from clients/go/github.com/go-redis/redis.json rename to clients/go/github.com/redis/go-redis.json index c91aa86578..06de74d8c1 100644 --- a/clients/go/github.com/go-redis/redis.json +++ b/clients/go/github.com/redis/go-redis.json @@ -1,5 +1,5 @@ { - "name": "go-redis/redis", + "name": "go-redis", "description": "Redis client for Golang supporting Redis Sentinel and Redis Cluster out of the box.", - "recommended": true -} \ No newline at end of file + "official": true +} diff --git a/clients/java/github.com/mrniko/redisson.json b/clients/java/github.com/mrniko/redisson.json index e6c235317e..861873ffa2 100644 --- a/clients/java/github.com/mrniko/redisson.json +++ b/clients/java/github.com/mrniko/redisson.json @@ -5,4 +5,4 @@ "twitter": [ "mrniko" ] -} \ No newline at end of file +} diff --git a/clients/java/github.com/redis/jedis.json b/clients/java/github.com/redis/jedis.json index 356a16aa42..f7324174e9 100644 --- a/clients/java/github.com/redis/jedis.json +++ b/clients/java/github.com/redis/jedis.json @@ -1,9 +1,9 @@ { "name": "Jedis", "description": "A blazingly small and sane Redis Java client", - "recommended": true, + "official": true, "twitter": [ "xetorthio", "g_korland" ] -} \ No newline at end of file +} diff --git a/clients/kotlin/github.com/domgew/kedis.json b/clients/kotlin/github.com/domgew/kedis.json new file mode 100644 index 0000000000..a620f65a5c --- /dev/null +++ b/clients/kotlin/github.com/domgew/kedis.json @@ -0,0 +1,4 @@ +{ + "name": "Kedis", + "description": "Redis client library for Kotlin Multiplatform (JVM + Native)" +} diff --git a/clients/nodejs/github.com/AWS/GLIDE-for-Redis.json b/clients/nodejs/github.com/AWS/GLIDE-for-Redis.json new file mode 100644 index 0000000000..cfb35fce02 --- /dev/null +++ b/clients/nodejs/github.com/AWS/GLIDE-for-Redis.json @@ -0,0 +1,4 @@ +{ + "name": "GLIDE for Redis", + "description": "General Language Independent Driver for the Enterprise (GLIDE) for Redis is an advanced multi-language Redis client that is feature rich, highly performant, and built for reliability and operational stability. GLIDE for Redis is supported by AWS." +} diff --git a/clients/nodejs/github.com/anchovycation/metronom.json b/clients/nodejs/github.com/anchovycation/metronom.json new file mode 100644 index 0000000000..af473fdda0 --- /dev/null +++ b/clients/nodejs/github.com/anchovycation/metronom.json @@ -0,0 +1,8 @@ +{ + "name": "metronom", + "description": "User friendly Redis ORM for Node.js with asynchronous and TypeScript support.", + "homepage": "https://anchovycation.github.io/metronom/", + "twitter": [ + "saracaIihan" + ] +} diff --git a/clients/nodejs/github.com/redis/node-redis.json b/clients/nodejs/github.com/redis/node-redis.json index c92544ab8c..7caa592234 100644 --- a/clients/nodejs/github.com/redis/node-redis.json +++ b/clients/nodejs/github.com/redis/node-redis.json @@ -1,5 +1,5 @@ { "name": "node-redis", "description": "Recommended client for node.", - "recommended": true -} \ No newline at end of file + "official": true +} diff --git a/clients/perl/github.com/plainbanana/Redis-Cluster-Fast.json b/clients/perl/github.com/plainbanana/Redis-Cluster-Fast.json new file mode 100644 index 0000000000..c23ba9a1a0 --- /dev/null +++ b/clients/perl/github.com/plainbanana/Redis-Cluster-Fast.json @@ -0,0 +1,8 @@ +{ + "name": "Redis::Cluster::Fast", + "description": "A fast Perl binding for Redis Cluster", + "homepage": "http://search.cpan.org/dist/Redis-Cluster-Fast/", + "twitter": [ + "plainbanana" + ] +} \ No newline at end of file diff --git a/clients/php/github.com/jdp/redisent.json b/clients/php/github.com/jdp/redisent.json deleted file mode 100644 index 05614fbee7..0000000000 --- a/clients/php/github.com/jdp/redisent.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "name": "Redisent", - "description": "A Redis client.", - "twitter": [ - "justinpoliey" - ] -} diff --git a/clients/php/github.com/swoole/redis-async.json b/clients/php/github.com/swoole/redis-async.json deleted file mode 100644 index cf010cb66f..0000000000 --- a/clients/php/github.com/swoole/redis-async.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "name": "redis-async", - "description": "Asynchronous redis client library for PHP." -} \ No newline at end of file diff --git a/clients/python/github.com/AWS/GLIDE-for-Redis.json b/clients/python/github.com/AWS/GLIDE-for-Redis.json new file mode 100644 index 0000000000..cfb35fce02 --- /dev/null +++ b/clients/python/github.com/AWS/GLIDE-for-Redis.json @@ -0,0 +1,4 @@ +{ + "name": "GLIDE for Redis", + "description": "General Language Independent Driver for the Enterprise (GLIDE) for Redis is an advanced multi-language Redis client that is feature rich, highly performant, and built for reliability and operational stability. GLIDE for Redis is supported by AWS." +} diff --git a/clients/python/github.com/DriverX/aioredis-cluster.json b/clients/python/github.com/DriverX/aioredis-cluster.json new file mode 100644 index 0000000000..c23469ec2d --- /dev/null +++ b/clients/python/github.com/DriverX/aioredis-cluster.json @@ -0,0 +1,4 @@ +{ + "name": "aioredis-cluster", + "description": "Redis Cluster client implementation based on aioredis v1.x.x" +} diff --git a/clients/python/github.com/redis/redis-py.json b/clients/python/github.com/redis/redis-py.json index e36a228c5c..68dd999ec1 100644 --- a/clients/python/github.com/redis/redis-py.json +++ b/clients/python/github.com/redis/redis-py.json @@ -1,5 +1,5 @@ { "name": "redis-py", - "description": "Mature and supported. Currently the way to go for Python.", - "recommended": true -} \ No newline at end of file + "description": "Mature and supported. The way to go for Python.", + "official": true +} diff --git a/clients/ruby/github.com/etehtsea/oxblood.json b/clients/ruby/github.com/etehtsea/oxblood.json deleted file mode 100644 index 642946babc..0000000000 --- a/clients/ruby/github.com/etehtsea/oxblood.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "name": "oxblood", - "description": "Straightforward Ruby client.", - "twitter": [ - "etehtsea" - ] -} \ No newline at end of file diff --git a/clients/ruby/github.com/redis-rb/redis-client.json b/clients/ruby/github.com/redis-rb/redis-client.json new file mode 100644 index 0000000000..06eb81aca8 --- /dev/null +++ b/clients/ruby/github.com/redis-rb/redis-client.json @@ -0,0 +1,4 @@ +{ + "name": "redis-client", + "description": "Simple low level client for Redis 6+" +} diff --git a/clients/ruby/github.com/redis-rb/redis-cluster-client.json b/clients/ruby/github.com/redis-rb/redis-cluster-client.json new file mode 100644 index 0000000000..bed2139126 --- /dev/null +++ b/clients/ruby/github.com/redis-rb/redis-cluster-client.json @@ -0,0 +1,4 @@ +{ + "name": "redis-cluster-client", + "description": "A simple client for Redis 6+ cluster" +} diff --git a/clients/rust/github.com/dahomey-technologies/rustis.json b/clients/rust/github.com/dahomey-technologies/rustis.json new file mode 100644 index 0000000000..df88e4ecf2 --- /dev/null +++ b/clients/rust/github.com/dahomey-technologies/rustis.json @@ -0,0 +1,4 @@ +{ + "name": "rustis", + "description": "An asynchronous Redis client for Rust." +} diff --git a/clients/scala/github.com/jodersky/redicl.json b/clients/scala/github.com/jodersky/redicl.json new file mode 100644 index 0000000000..ffd4d45e88 --- /dev/null +++ b/clients/scala/github.com/jodersky/redicl.json @@ -0,0 +1,5 @@ +{ + "name": "redicl", + "description": "A lean and mean redis client implementation that uses only the Scala standard library. Available for the JVM and native.", + "homepage": "https://github.com/jodersky/redicl" +} diff --git a/clients/scala/github.com/twitter/finagle/tree/develop/finagle-redis.json b/clients/scala/github.com/twitter/finagle.json similarity index 65% rename from clients/scala/github.com/twitter/finagle/tree/develop/finagle-redis.json rename to clients/scala/github.com/twitter/finagle.json index 031dc0d80e..d85604ce4c 100644 --- a/clients/scala/github.com/twitter/finagle/tree/develop/finagle-redis.json +++ b/clients/scala/github.com/twitter/finagle.json @@ -1,4 +1,4 @@ { - "name": "finagle-redis", + "name": "finagle", "description": "Redis client based on Finagle" } \ No newline at end of file diff --git a/clients/smalltalk/github.com/RediStick/RediStick.json b/clients/smalltalk/github.com/mumez/RediStick.json similarity index 100% rename from clients/smalltalk/github.com/RediStick/RediStick.json rename to clients/smalltalk/github.com/mumez/RediStick.json diff --git a/clients/smalltalk/github.com/SimpleRedisClient/SimpleRedisClient.json b/clients/smalltalk/github.com/svenvc/SimpleRedisClient.json similarity index 100% rename from clients/smalltalk/github.com/SimpleRedisClient/SimpleRedisClient.json rename to clients/smalltalk/github.com/svenvc/SimpleRedisClient.json diff --git a/clients/smalltalk/github.com/Pharo-Redis/Pharo-Redis.json b/clients/smalltalk/github.com/tblanchard/Pharo-Redis.json similarity index 80% rename from clients/smalltalk/github.com/Pharo-Redis/Pharo-Redis.json rename to clients/smalltalk/github.com/tblanchard/Pharo-Redis.json index e8d53bac52..cbd1afb89a 100644 --- a/clients/smalltalk/github.com/Pharo-Redis/Pharo-Redis.json +++ b/clients/smalltalk/github.com/tblanchard/Pharo-Redis.json @@ -1,7 +1,6 @@ { "name": "Pharo-Redis", "language": "Smalltalk", - "repository": "https://github.com/tblanchard/Pharo-Redis", "description": "A full featured Redis client for Pharo. This was forked from svenvc/SimpleRedisClient and that simple client is still at the center of this.", "authors": [ "ToddBlanchard10" diff --git a/clients/smalltalk/squeaksource.com/Redis/Redis.json b/clients/smalltalk/squeaksource.com/Redis/Redis.json deleted file mode 100644 index 8868bdfa4d..0000000000 --- a/clients/smalltalk/squeaksource.com/Redis/Redis.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "name": "Redis", - "language": "Smalltalk", - "repository": "http://www.squeaksource.com/Redis", - "description": "Smalltalk Redis client." -} \ No newline at end of file diff --git a/clients/smalltalk/squeaksource.com/RedisBinaryClient/RedisBinaryClient.json b/clients/smalltalk/squeaksource.com/RedisBinaryClient/RedisBinaryClient.json deleted file mode 100644 index c78a59da11..0000000000 --- a/clients/smalltalk/squeaksource.com/RedisBinaryClient/RedisBinaryClient.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "name": "RedisBinaryClient", - "language": "Smalltalk", - "repository": "http://www.squeaksource.com/RedisBinaryClient/", - "description": "A fork of the RedisConnection from Redis Client." -} \ No newline at end of file diff --git a/clients/swift/github.com/michaelvanstraten/Swifty-Redis.json b/clients/swift/github.com/michaelvanstraten/Swifty-Redis.json new file mode 100644 index 0000000000..ddf86c957f --- /dev/null +++ b/clients/swift/github.com/michaelvanstraten/Swifty-Redis.json @@ -0,0 +1,5 @@ +{ + "name": "SwiftyRedis", + "description": "SwiftyRedis is a high level async redis library for Swift. ", + "homepage": "https://michaelvanstraten.github.io/swifty-redis/documentation/swiftyredis/" +} diff --git a/clients/tcl/github.com/redis/redis/blob/unstable/tests/support/redis.tcl.json b/clients/tcl/github.com/redis/redis.json similarity index 100% rename from clients/tcl/github.com/redis/redis/blob/unstable/tests/support/redis.tcl.json rename to clients/tcl/github.com/redis/redis.json diff --git a/commands.json b/commands.json index 978e128f5b..63637edabd 100644 --- a/commands.json +++ b/commands.json @@ -1,6 +1,6 @@ { "ACL": { - "summary": "A container for Access List Control commands ", + "summary": "A container for Access List Control commands.", "since": "6.0.0", "group": "server", "complexity": "Depends on subcommand.", @@ -10,7 +10,7 @@ "arity": -2 }, "ACL CAT": { - "summary": "List the ACL categories or the commands inside a category", + "summary": "Lists the ACL categories, or the commands inside a category.", "since": "6.0.0", "group": "server", "complexity": "O(1) since the categories and commands are a fixed set.", @@ -20,8 +20,9 @@ "arity": -2, "arguments": [ { - "name": "categoryname", + "name": "category", "type": "string", + "display_text": "category", "optional": true } ], @@ -32,7 +33,7 @@ ] }, "ACL DELUSER": { - "summary": "Remove the specified ACL users and the associated rules", + "summary": "Deletes ACL users, and terminates their connections.", "since": "6.0.0", "group": "server", "complexity": "O(1) amortized time considering the typical user.", @@ -46,6 +47,7 @@ { "name": "username", "type": "string", + "display_text": "username", "multiple": true } ], @@ -54,10 +56,14 @@ "noscript", "loading", "stale" + ], + "hints": [ + "request_policy:all_nodes", + "response_policy:all_succeeded" ] }, "ACL DRYRUN": { - "summary": "Returns whether the user can execute the given command without executing the command.", + "summary": "Simulates the execution of a command by a user, without executing the command.", "since": "7.0.0", "group": "server", "complexity": "O(1).", @@ -70,15 +76,18 @@ "arguments": [ { "name": "username", - "type": "string" + "type": "string", + "display_text": "username" }, { "name": "command", - "type": "string" + "type": "string", + "display_text": "command" }, { "name": "arg", "type": "string", + "display_text": "arg", "optional": true, "multiple": true } @@ -91,7 +100,7 @@ ] }, "ACL GENPASS": { - "summary": "Generate a pseudorandom secure password to use for ACL users", + "summary": "Generates a pseudorandom, secure password that can be used to identify ACL users.", "since": "6.0.0", "group": "server", "complexity": "O(1)", @@ -103,6 +112,7 @@ { "name": "bits", "type": "integer", + "display_text": "bits", "optional": true } ], @@ -113,7 +123,7 @@ ] }, "ACL GETUSER": { - "summary": "Get the rules for a specific ACL user", + "summary": "Lists the ACL rules of a user.", "since": "6.0.0", "group": "server", "complexity": "O(N). Where N is the number of password, command and pattern rules that the user has.", @@ -136,7 +146,8 @@ "arguments": [ { "name": "username", - "type": "string" + "type": "string", + "display_text": "username" } ], "command_flags": [ @@ -147,7 +158,7 @@ ] }, "ACL HELP": { - "summary": "Show helpful text about the different subcommands", + "summary": "Returns helpful text about the different subcommands.", "since": "6.0.0", "group": "server", "complexity": "O(1)", @@ -161,7 +172,7 @@ ] }, "ACL LIST": { - "summary": "List the current ACL rules in ACL config file format", + "summary": "Dumps the effective rules in ACL file format.", "since": "6.0.0", "group": "server", "complexity": "O(N). Where N is the number of configured users.", @@ -179,7 +190,7 @@ ] }, "ACL LOAD": { - "summary": "Reload the ACLs from the configured ACL file", + "summary": "Reloads the rules from the configured ACL file.", "since": "6.0.0", "group": "server", "complexity": "O(N). Where N is the number of configured users.", @@ -197,10 +208,16 @@ ] }, "ACL LOG": { - "summary": "List latest events denied because of ACLs in place", + "summary": "Lists recent security events generated due to ACL rules.", "since": "6.0.0", "group": "server", "complexity": "O(N) with N being the number of entries shown.", + "history": [ + [ + "7.2.0", + "Added entry ID, timestamp created, and timestamp last updated." + ] + ], "acl_categories": [ "@admin", "@slow", @@ -215,11 +232,13 @@ "arguments": [ { "name": "count", - "type": "integer" + "type": "integer", + "display_text": "count" }, { "name": "reset", "type": "pure-token", + "display_text": "reset", "token": "RESET" } ] @@ -233,7 +252,7 @@ ] }, "ACL SAVE": { - "summary": "Save the current ACL rules in the configured ACL file", + "summary": "Saves the effective ACL rules in the configured ACL file.", "since": "6.0.0", "group": "server", "complexity": "O(N). Where N is the number of configured users.", @@ -248,10 +267,14 @@ "noscript", "loading", "stale" + ], + "hints": [ + "request_policy:all_nodes", + "response_policy:all_succeeded" ] }, "ACL SETUSER": { - "summary": "Modify or create the rules for a specific ACL user", + "summary": "Creates and modifies an ACL user and its rules.", "since": "6.0.0", "group": "server", "complexity": "O(N). Where N is the number of rules provided.", @@ -274,11 +297,13 @@ "arguments": [ { "name": "username", - "type": "string" + "type": "string", + "display_text": "username" }, { "name": "rule", "type": "string", + "display_text": "rule", "optional": true, "multiple": true } @@ -288,10 +313,14 @@ "noscript", "loading", "stale" + ], + "hints": [ + "request_policy:all_nodes", + "response_policy:all_succeeded" ] }, "ACL USERS": { - "summary": "List the username of all the configured ACL rules", + "summary": "Lists all ACL users.", "since": "6.0.0", "group": "server", "complexity": "O(N). Where N is the number of configured users.", @@ -309,7 +338,7 @@ ] }, "ACL WHOAMI": { - "summary": "Return the name of the user associated to the current connection", + "summary": "Returns the authenticated username of the current connection.", "since": "6.0.0", "group": "server", "complexity": "O(1)", @@ -324,7 +353,7 @@ ] }, "APPEND": { - "summary": "Append a value to a key", + "summary": "Appends a string to the value of a key. Creates the key if it doesn't exist.", "since": "2.0.0", "group": "string", "complexity": "O(1). The amortized time complexity is O(1) assuming the appended value is small and the already present value is of any size, since the dynamic string library used by Redis will double the free space available on every reallocation.", @@ -358,11 +387,13 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "value", - "type": "string" + "type": "string", + "display_text": "value" } ], "command_flags": [ @@ -372,7 +403,7 @@ ] }, "ASKING": { - "summary": "Sent by cluster clients after an -ASK redirect", + "summary": "Signals that a cluster client is following an -ASK redirect.", "since": "3.0.0", "group": "cluster", "complexity": "O(1)", @@ -386,7 +417,7 @@ ] }, "AUTH": { - "summary": "Authenticate to the server", + "summary": "Authenticates the connection.", "since": "1.0.0", "group": "connection", "complexity": "O(N) where N is the number of passwords defined for the user", @@ -405,12 +436,14 @@ { "name": "username", "type": "string", + "display_text": "username", "since": "6.0.0", "optional": true }, { "name": "password", - "type": "string" + "type": "string", + "display_text": "password" } ], "command_flags": [ @@ -423,7 +456,7 @@ ] }, "BGREWRITEAOF": { - "summary": "Asynchronously rewrite the append-only file", + "summary": "Asynchronously rewrites the append-only file to disk.", "since": "1.0.0", "group": "server", "complexity": "O(1)", @@ -440,7 +473,7 @@ ] }, "BGSAVE": { - "summary": "Asynchronously save the dataset to disk", + "summary": "Asynchronously saves the database(s) to disk.", "since": "1.0.0", "group": "server", "complexity": "O(1)", @@ -460,6 +493,7 @@ { "name": "schedule", "type": "pure-token", + "display_text": "schedule", "token": "SCHEDULE", "since": "3.2.2", "optional": true @@ -472,7 +506,7 @@ ] }, "BITCOUNT": { - "summary": "Count set bits in a string", + "summary": "Counts the number of set bits (population counting) in a string.", "since": "2.6.0", "group": "bitmap", "complexity": "O(N)", @@ -512,23 +546,26 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { - "name": "index", + "name": "range", "type": "block", "optional": true, "arguments": [ { "name": "start", - "type": "integer" + "type": "integer", + "display_text": "start" }, { "name": "end", - "type": "integer" + "type": "integer", + "display_text": "end" }, { - "name": "index_unit", + "name": "unit", "type": "oneof", "since": "7.0.0", "optional": true, @@ -536,11 +573,13 @@ { "name": "byte", "type": "pure-token", + "display_text": "byte", "token": "BYTE" }, { "name": "bit", "type": "pure-token", + "display_text": "bit", "token": "BIT" } ] @@ -553,7 +592,7 @@ ] }, "BITFIELD": { - "summary": "Perform arbitrary bitfield integer operations on strings", + "summary": "Performs arbitrary bitfield integer operations on strings.", "since": "3.2.0", "group": "bitmap", "complexity": "O(1) for each subcommand specified", @@ -590,25 +629,29 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "operation", "type": "oneof", + "optional": true, "multiple": true, "arguments": [ { - "name": "encoding_offset", + "name": "get-block", "type": "block", "token": "GET", "arguments": [ { "name": "encoding", - "type": "string" + "type": "string", + "display_text": "encoding" }, { "name": "offset", - "type": "integer" + "type": "integer", + "display_text": "offset" } ] }, @@ -617,7 +660,7 @@ "type": "block", "arguments": [ { - "name": "wrap_sat_fail", + "name": "overflow-block", "type": "oneof", "token": "OVERFLOW", "optional": true, @@ -625,59 +668,68 @@ { "name": "wrap", "type": "pure-token", + "display_text": "wrap", "token": "WRAP" }, { "name": "sat", "type": "pure-token", + "display_text": "sat", "token": "SAT" }, { "name": "fail", "type": "pure-token", + "display_text": "fail", "token": "FAIL" } ] }, { - "name": "write_operation", + "name": "write-operation", "type": "oneof", "arguments": [ { - "name": "encoding_offset_value", + "name": "set-block", "type": "block", "token": "SET", "arguments": [ { "name": "encoding", - "type": "string" + "type": "string", + "display_text": "encoding" }, { "name": "offset", - "type": "integer" + "type": "integer", + "display_text": "offset" }, { "name": "value", - "type": "integer" + "type": "integer", + "display_text": "value" } ] }, { - "name": "encoding_offset_increment", + "name": "incrby-block", "type": "block", "token": "INCRBY", "arguments": [ { "name": "encoding", - "type": "string" + "type": "string", + "display_text": "encoding" }, { "name": "offset", - "type": "integer" + "type": "integer", + "display_text": "offset" }, { "name": "increment", - "type": "integer" + "type": "integer", + "display_text": "increment" } ] } @@ -694,7 +746,7 @@ ] }, "BITFIELD_RO": { - "summary": "Perform arbitrary bitfield integer operations on strings. Read-only variant of BITFIELD", + "summary": "Performs arbitrary read-only bitfield integer operations on strings.", "since": "6.0.0", "group": "bitmap", "complexity": "O(1) for each subcommand specified", @@ -728,22 +780,26 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { - "name": "encoding_offset", + "name": "get-block", "type": "block", "token": "GET", + "optional": true, "multiple": true, "multiple_token": true, "arguments": [ { "name": "encoding", - "type": "string" + "type": "string", + "display_text": "encoding" }, { "name": "offset", - "type": "integer" + "type": "integer", + "display_text": "offset" } ] } @@ -754,7 +810,7 @@ ] }, "BITOP": { - "summary": "Perform bitwise operations between strings", + "summary": "Performs bitwise operations on multiple strings, and stores the result.", "since": "2.6.0", "group": "bitmap", "complexity": "O(N)", @@ -805,16 +861,44 @@ "arguments": [ { "name": "operation", - "type": "string" + "type": "oneof", + "arguments": [ + { + "name": "and", + "type": "pure-token", + "display_text": "and", + "token": "AND" + }, + { + "name": "or", + "type": "pure-token", + "display_text": "or", + "token": "OR" + }, + { + "name": "xor", + "type": "pure-token", + "display_text": "xor", + "token": "XOR" + }, + { + "name": "not", + "type": "pure-token", + "display_text": "not", + "token": "NOT" + } + ] }, { "name": "destkey", "type": "key", + "display_text": "destkey", "key_spec_index": 0 }, { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 1, "multiple": true } @@ -825,7 +909,7 @@ ] }, "BITPOS": { - "summary": "Find first bit set or clear in a string", + "summary": "Finds the first set (1) or clear (0) bit in a string.", "since": "2.8.7", "group": "bitmap", "complexity": "O(N)", @@ -865,32 +949,36 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "bit", - "type": "integer" + "type": "integer", + "display_text": "bit" }, { - "name": "index", + "name": "range", "type": "block", "optional": true, "arguments": [ { "name": "start", - "type": "integer" + "type": "integer", + "display_text": "start" }, { - "name": "end_index", + "name": "end-unit-block", "type": "block", "optional": true, "arguments": [ { "name": "end", - "type": "integer" + "type": "integer", + "display_text": "end" }, { - "name": "index_unit", + "name": "unit", "type": "oneof", "since": "7.0.0", "optional": true, @@ -898,11 +986,13 @@ { "name": "byte", "type": "pure-token", + "display_text": "byte", "token": "BYTE" }, { "name": "bit", "type": "pure-token", + "display_text": "bit", "token": "BIT" } ] @@ -917,7 +1007,7 @@ ] }, "BLMOVE": { - "summary": "Pop an element from a list, push it to another list and return it; or block until one is available", + "summary": "Pops an element from a list, pushes it to another list and returns it. Blocks until an element is available otherwise. Deletes the list if the last element was moved.", "since": "6.2.0", "group": "list", "complexity": "O(1)", @@ -971,11 +1061,13 @@ { "name": "source", "type": "key", + "display_text": "source", "key_spec_index": 0 }, { "name": "destination", "type": "key", + "display_text": "destination", "key_spec_index": 1 }, { @@ -985,11 +1077,13 @@ { "name": "left", "type": "pure-token", + "display_text": "left", "token": "LEFT" }, { "name": "right", "type": "pure-token", + "display_text": "right", "token": "RIGHT" } ] @@ -1001,29 +1095,31 @@ { "name": "left", "type": "pure-token", + "display_text": "left", "token": "LEFT" }, { "name": "right", "type": "pure-token", + "display_text": "right", "token": "RIGHT" } ] }, { "name": "timeout", - "type": "double" + "type": "double", + "display_text": "timeout" } ], "command_flags": [ "write", "denyoom", - "noscript", "blocking" ] }, "BLMPOP": { - "summary": "Pop elements from a list, or block until one is available", + "summary": "Pops the first element from one of multiple lists. Blocks until an element is available otherwise. Deletes the list if the last element was popped.", "since": "7.0.0", "group": "list", "complexity": "O(N+M) where N is the number of provided keys and M is the number of elements returned.", @@ -1058,15 +1154,18 @@ "arguments": [ { "name": "timeout", - "type": "double" + "type": "double", + "display_text": "timeout" }, { "name": "numkeys", - "type": "integer" + "type": "integer", + "display_text": "numkeys" }, { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0, "multiple": true }, @@ -1077,11 +1176,13 @@ { "name": "left", "type": "pure-token", + "display_text": "left", "token": "LEFT" }, { "name": "right", "type": "pure-token", + "display_text": "right", "token": "RIGHT" } ] @@ -1089,6 +1190,7 @@ { "name": "count", "type": "integer", + "display_text": "count", "token": "COUNT", "optional": true } @@ -1100,7 +1202,7 @@ ] }, "BLPOP": { - "summary": "Remove and get the first element in a list, or block until one is available", + "summary": "Removes and returns the first element in a list. Blocks until an element is available otherwise. Deletes the list if the last element was popped.", "since": "2.0.0", "group": "list", "complexity": "O(N) where N is the number of provided keys.", @@ -1142,22 +1244,23 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0, "multiple": true }, { "name": "timeout", - "type": "double" + "type": "double", + "display_text": "timeout" } ], "command_flags": [ "write", - "noscript", "blocking" ] }, "BRPOP": { - "summary": "Remove and get the last element in a list, or block until one is available", + "summary": "Removes and returns the last element in a list. Blocks until an element is available otherwise. Deletes the list if the last element was popped.", "since": "2.0.0", "group": "list", "complexity": "O(N) where N is the number of provided keys.", @@ -1199,22 +1302,23 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0, "multiple": true }, { "name": "timeout", - "type": "double" + "type": "double", + "display_text": "timeout" } ], "command_flags": [ "write", - "noscript", "blocking" ] }, "BRPOPLPUSH": { - "summary": "Pop an element from a list, push it to another list and return it; or block until one is available", + "summary": "Pops an element from a list, pushes it to another list and returns it. Block until an element is available otherwise. Deletes the list if the last element was popped.", "since": "2.2.0", "group": "list", "complexity": "O(1)", @@ -1276,22 +1380,24 @@ { "name": "source", "type": "key", + "display_text": "source", "key_spec_index": 0 }, { "name": "destination", "type": "key", + "display_text": "destination", "key_spec_index": 1 }, { "name": "timeout", - "type": "double" + "type": "double", + "display_text": "timeout" } ], "command_flags": [ "write", "denyoom", - "noscript", "blocking" ], "doc_flags": [ @@ -1299,10 +1405,10 @@ ] }, "BZMPOP": { - "summary": "Remove and return members with scores in a sorted set or block until one is available", + "summary": "Removes and returns a member by score from one or more sorted sets. Blocks until a member is available otherwise. Deletes the sorted set if the last element was popped.", "since": "7.0.0", "group": "sorted-set", - "complexity": "O(K) + O(N*log(M)) where K is the number of provided keys, N being the number of elements in the sorted set, and M being the number of elements popped.", + "complexity": "O(K) + O(M*log(N)) where K is the number of provided keys, N being the number of elements in the sorted set, and M being the number of elements popped.", "acl_categories": [ "@write", "@sortedset", @@ -1334,15 +1440,18 @@ "arguments": [ { "name": "timeout", - "type": "double" + "type": "double", + "display_text": "timeout" }, { "name": "numkeys", - "type": "integer" + "type": "integer", + "display_text": "numkeys" }, { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0, "multiple": true }, @@ -1353,11 +1462,13 @@ { "name": "min", "type": "pure-token", + "display_text": "min", "token": "MIN" }, { "name": "max", "type": "pure-token", + "display_text": "max", "token": "MAX" } ] @@ -1365,6 +1476,7 @@ { "name": "count", "type": "integer", + "display_text": "count", "token": "COUNT", "optional": true } @@ -1376,7 +1488,7 @@ ] }, "BZPOPMAX": { - "summary": "Remove and return the member with the highest score from one or more sorted sets, or block until one is available", + "summary": "Removes and returns the member with the highest score from one or more sorted sets. Blocks until a member available otherwise. Deletes the sorted set if the last element was popped.", "since": "5.0.0", "group": "sorted-set", "complexity": "O(log(N)) with N being the number of elements in the sorted set.", @@ -1418,23 +1530,24 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0, "multiple": true }, { "name": "timeout", - "type": "double" + "type": "double", + "display_text": "timeout" } ], "command_flags": [ "write", - "noscript", "blocking", "fast" ] }, "BZPOPMIN": { - "summary": "Remove and return the member with the lowest score from one or more sorted sets, or block until one is available", + "summary": "Removes and returns the member with the lowest score from one or more sorted sets. Blocks until a member is available otherwise. Deletes the sorted set if the last element was popped.", "since": "5.0.0", "group": "sorted-set", "complexity": "O(log(N)) with N being the number of elements in the sorted set.", @@ -1476,23 +1589,24 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0, "multiple": true }, { "name": "timeout", - "type": "double" + "type": "double", + "display_text": "timeout" } ], "command_flags": [ "write", - "noscript", "blocking", "fast" ] }, "CLIENT": { - "summary": "A container for client connection commands", + "summary": "A container for client connection commands.", "since": "2.4.0", "group": "connection", "complexity": "Depends on subcommand.", @@ -1502,7 +1616,7 @@ "arity": -2 }, "CLIENT CACHING": { - "summary": "Instruct the server about tracking or not keys in the next request", + "summary": "Instructs the server whether to track the keys in the next request.", "since": "6.0.0", "group": "connection", "complexity": "O(1)", @@ -1519,11 +1633,13 @@ { "name": "yes", "type": "pure-token", + "display_text": "yes", "token": "YES" }, { "name": "no", "type": "pure-token", + "display_text": "no", "token": "NO" } ] @@ -1536,7 +1652,7 @@ ] }, "CLIENT GETNAME": { - "summary": "Get the current connection name", + "summary": "Returns the name of the connection.", "since": "2.6.9", "group": "connection", "complexity": "O(1)", @@ -1552,7 +1668,7 @@ ] }, "CLIENT GETREDIR": { - "summary": "Get tracking notifications redirection client ID if any", + "summary": "Returns the client ID to which the connection's tracking notifications are redirected.", "since": "6.0.0", "group": "connection", "complexity": "O(1)", @@ -1568,7 +1684,7 @@ ] }, "CLIENT HELP": { - "summary": "Show helpful text about the different subcommands", + "summary": "Returns helpful text about the different subcommands.", "since": "5.0.0", "group": "connection", "complexity": "O(1)", @@ -1583,7 +1699,7 @@ ] }, "CLIENT ID": { - "summary": "Returns the client ID for the current connection", + "summary": "Returns the unique client ID of the connection.", "since": "5.0.0", "group": "connection", "complexity": "O(1)", @@ -1599,7 +1715,7 @@ ] }, "CLIENT INFO": { - "summary": "Returns information about the current client connection.", + "summary": "Returns information about the connection.", "since": "6.2.0", "group": "connection", "complexity": "O(1)", @@ -1618,7 +1734,7 @@ ] }, "CLIENT KILL": { - "summary": "Kill the connection of a client", + "summary": "Terminates open connections.", "since": "2.4.0", "group": "connection", "complexity": "O(N) where N is the number of client connections", @@ -1653,77 +1769,114 @@ "arity": -3, "arguments": [ { - "name": "ip:port", - "type": "string", - "optional": true - }, - { - "name": "client-id", - "type": "integer", - "token": "ID", - "since": "2.8.12", - "optional": true - }, - { - "name": "normal_master_slave_pubsub", + "name": "filter", "type": "oneof", - "token": "TYPE", - "since": "2.8.12", - "optional": true, "arguments": [ { - "name": "normal", - "type": "pure-token", - "token": "NORMAL" - }, - { - "name": "master", - "type": "pure-token", - "token": "MASTER", - "since": "3.2.0" - }, - { - "name": "slave", - "type": "pure-token", - "token": "SLAVE" - }, - { - "name": "replica", - "type": "pure-token", - "token": "REPLICA", - "since": "5.0.0" + "name": "old-format", + "type": "string", + "display_text": "ip:port", + "deprecated_since": "2.8.12" }, { - "name": "pubsub", - "type": "pure-token", - "token": "PUBSUB" + "name": "new-format", + "type": "oneof", + "multiple": true, + "arguments": [ + { + "name": "client-id", + "type": "integer", + "display_text": "client-id", + "token": "ID", + "since": "2.8.12", + "optional": true + }, + { + "name": "client-type", + "type": "oneof", + "token": "TYPE", + "since": "2.8.12", + "optional": true, + "arguments": [ + { + "name": "normal", + "type": "pure-token", + "display_text": "normal", + "token": "NORMAL" + }, + { + "name": "master", + "type": "pure-token", + "display_text": "master", + "token": "MASTER", + "since": "3.2.0" + }, + { + "name": "slave", + "type": "pure-token", + "display_text": "slave", + "token": "SLAVE" + }, + { + "name": "replica", + "type": "pure-token", + "display_text": "replica", + "token": "REPLICA", + "since": "5.0.0" + }, + { + "name": "pubsub", + "type": "pure-token", + "display_text": "pubsub", + "token": "PUBSUB" + } + ] + }, + { + "name": "username", + "type": "string", + "display_text": "username", + "token": "USER", + "optional": true + }, + { + "name": "addr", + "type": "string", + "display_text": "ip:port", + "token": "ADDR", + "optional": true + }, + { + "name": "laddr", + "type": "string", + "display_text": "ip:port", + "token": "LADDR", + "since": "6.2.0", + "optional": true + }, + { + "name": "skipme", + "type": "oneof", + "token": "SKIPME", + "optional": true, + "arguments": [ + { + "name": "yes", + "type": "pure-token", + "display_text": "yes", + "token": "YES" + }, + { + "name": "no", + "type": "pure-token", + "display_text": "no", + "token": "NO" + } + ] + } + ] } ] - }, - { - "name": "username", - "type": "string", - "token": "USER", - "optional": true - }, - { - "name": "ip:port", - "type": "string", - "token": "ADDR", - "optional": true - }, - { - "name": "ip:port", - "type": "string", - "token": "LADDR", - "since": "6.2.0", - "optional": true - }, - { - "name": "yes/no", - "type": "string", - "token": "SKIPME", - "optional": true } ], "command_flags": [ @@ -1734,7 +1887,7 @@ ] }, "CLIENT LIST": { - "summary": "Get the list of client connections", + "summary": "Lists open connections.", "since": "2.4.0", "group": "connection", "complexity": "O(N) where N is the number of client connections", @@ -1747,9 +1900,21 @@ "5.0.0", "Added optional `TYPE` filter." ], + [ + "6.0.0", + "Added `user` field." + ], [ "6.2.0", - "Added `laddr` field and the optional `ID` filter." + "Added `argv-mem`, `tot-mem`, `laddr` and `redir` fields and the optional `ID` filter." + ], + [ + "7.0.0", + "Added `resp`, `multi-mem`, `rbs` and `rbp` fields." + ], + [ + "7.0.3", + "Added `ssub` field." ] ], "acl_categories": [ @@ -1761,7 +1926,7 @@ "arity": -2, "arguments": [ { - "name": "normal_master_replica_pubsub", + "name": "client-type", "type": "oneof", "token": "TYPE", "since": "5.0.0", @@ -1770,38 +1935,37 @@ { "name": "normal", "type": "pure-token", + "display_text": "normal", "token": "NORMAL" }, { "name": "master", "type": "pure-token", + "display_text": "master", "token": "MASTER" }, { "name": "replica", "type": "pure-token", + "display_text": "replica", "token": "REPLICA" }, { "name": "pubsub", "type": "pure-token", + "display_text": "pubsub", "token": "PUBSUB" } ] }, { - "name": "id", - "type": "block", + "name": "client-id", + "type": "integer", + "display_text": "client-id", "token": "ID", "since": "6.2.0", "optional": true, - "arguments": [ - { - "name": "client-id", - "type": "integer", - "multiple": true - } - ] + "multiple": true } ], "command_flags": [ @@ -1815,7 +1979,7 @@ ] }, "CLIENT NO-EVICT": { - "summary": "Set client eviction mode for the current connection", + "summary": "Sets the client eviction mode of the connection.", "since": "7.0.0", "group": "connection", "complexity": "O(1)", @@ -1834,11 +1998,13 @@ { "name": "on", "type": "pure-token", + "display_text": "on", "token": "ON" }, { "name": "off", "type": "pure-token", + "display_text": "off", "token": "OFF" } ] @@ -1851,9 +2017,45 @@ "stale" ] }, + "CLIENT NO-TOUCH": { + "summary": "Controls whether commands sent by the client affect the LRU/LFU of accessed keys.", + "since": "7.2.0", + "group": "connection", + "complexity": "O(1)", + "acl_categories": [ + "@slow", + "@connection" + ], + "arity": 3, + "arguments": [ + { + "name": "enabled", + "type": "oneof", + "arguments": [ + { + "name": "on", + "type": "pure-token", + "display_text": "on", + "token": "ON" + }, + { + "name": "off", + "type": "pure-token", + "display_text": "off", + "token": "OFF" + } + ] + } + ], + "command_flags": [ + "noscript", + "loading", + "stale" + ] + }, "CLIENT PAUSE": { - "summary": "Stop processing commands from clients for some time", - "since": "2.9.50", + "summary": "Suspends commands processing.", + "since": "3.0.0", "group": "connection", "complexity": "O(1)", "history": [ @@ -1872,7 +2074,8 @@ "arguments": [ { "name": "timeout", - "type": "integer" + "type": "integer", + "display_text": "timeout" }, { "name": "mode", @@ -1883,11 +2086,13 @@ { "name": "write", "type": "pure-token", + "display_text": "write", "token": "WRITE" }, { "name": "all", "type": "pure-token", + "display_text": "all", "token": "ALL" } ] @@ -1901,7 +2106,7 @@ ] }, "CLIENT REPLY": { - "summary": "Instruct the server whether to reply to commands", + "summary": "Instructs the server whether to reply to commands.", "since": "3.2.0", "group": "connection", "complexity": "O(1)", @@ -1912,22 +2117,25 @@ "arity": 3, "arguments": [ { - "name": "on_off_skip", + "name": "action", "type": "oneof", "arguments": [ { "name": "on", "type": "pure-token", + "display_text": "on", "token": "ON" }, { "name": "off", "type": "pure-token", + "display_text": "off", "token": "OFF" }, { "name": "skip", "type": "pure-token", + "display_text": "skip", "token": "SKIP" } ] @@ -1939,8 +2147,48 @@ "stale" ] }, + "CLIENT SETINFO": { + "summary": "Sets information specific to the client or connection.", + "since": "7.2.0", + "group": "connection", + "complexity": "O(1)", + "acl_categories": [ + "@slow", + "@connection" + ], + "arity": 4, + "arguments": [ + { + "name": "attr", + "type": "oneof", + "arguments": [ + { + "name": "libname", + "type": "string", + "display_text": "libname", + "token": "LIB-NAME" + }, + { + "name": "libver", + "type": "string", + "display_text": "libver", + "token": "LIB-VER" + } + ] + } + ], + "command_flags": [ + "noscript", + "loading", + "stale" + ], + "hints": [ + "request_policy:all_nodes", + "response_policy:all_succeeded" + ] + }, "CLIENT SETNAME": { - "summary": "Set the current connection name", + "summary": "Sets the connection name.", "since": "2.6.9", "group": "connection", "complexity": "O(1)", @@ -1952,17 +2200,22 @@ "arguments": [ { "name": "connection-name", - "type": "string" + "type": "string", + "display_text": "connection-name" } ], "command_flags": [ "noscript", "loading", "stale" + ], + "hints": [ + "request_policy:all_nodes", + "response_policy:all_succeeded" ] }, "CLIENT TRACKING": { - "summary": "Enable or disable server assisted client side caching support", + "summary": "Controls server-assisted client-side caching for the connection.", "since": "6.0.0", "group": "connection", "complexity": "O(1). Some options may introduce additional complexity.", @@ -1979,11 +2232,13 @@ { "name": "on", "type": "pure-token", + "display_text": "on", "token": "ON" }, { "name": "off", "type": "pure-token", + "display_text": "off", "token": "OFF" } ] @@ -1991,12 +2246,14 @@ { "name": "client-id", "type": "integer", + "display_text": "client-id", "token": "REDIRECT", "optional": true }, { "name": "prefix", "type": "string", + "display_text": "prefix", "token": "PREFIX", "optional": true, "multiple": true, @@ -2005,24 +2262,28 @@ { "name": "bcast", "type": "pure-token", + "display_text": "bcast", "token": "BCAST", "optional": true }, { "name": "optin", "type": "pure-token", + "display_text": "optin", "token": "OPTIN", "optional": true }, { "name": "optout", "type": "pure-token", + "display_text": "optout", "token": "OPTOUT", "optional": true }, { "name": "noloop", "type": "pure-token", + "display_text": "noloop", "token": "NOLOOP", "optional": true } @@ -2034,7 +2295,7 @@ ] }, "CLIENT TRACKINGINFO": { - "summary": "Return information about server assisted client side caching for the current connection", + "summary": "Returns information about server-assisted client-side caching for the connection.", "since": "6.2.0", "group": "connection", "complexity": "O(1)", @@ -2050,7 +2311,7 @@ ] }, "CLIENT UNBLOCK": { - "summary": "Unblock a client blocked in a blocking command from a different connection", + "summary": "Unblocks a client blocked by a blocking command from a different connection.", "since": "5.0.0", "group": "connection", "complexity": "O(log N) where N is the number of client connections", @@ -2064,21 +2325,24 @@ "arguments": [ { "name": "client-id", - "type": "integer" + "type": "integer", + "display_text": "client-id" }, { - "name": "timeout_error", + "name": "unblock-type", "type": "oneof", "optional": true, "arguments": [ { "name": "timeout", "type": "pure-token", + "display_text": "timeout", "token": "TIMEOUT" }, { "name": "error", "type": "pure-token", + "display_text": "error", "token": "ERROR" } ] @@ -2092,7 +2356,7 @@ ] }, "CLIENT UNPAUSE": { - "summary": "Resume processing of clients that were paused", + "summary": "Resumes processing commands from paused clients.", "since": "6.2.0", "group": "connection", "complexity": "O(N) Where N is the number of paused clients", @@ -2111,7 +2375,7 @@ ] }, "CLUSTER": { - "summary": "A container for cluster commands", + "summary": "A container for Redis Cluster commands.", "since": "3.0.0", "group": "cluster", "complexity": "Depends on subcommand.", @@ -2121,7 +2385,7 @@ "arity": -2 }, "CLUSTER ADDSLOTS": { - "summary": "Assign new hash slots to receiving node", + "summary": "Assigns new hash slots to a node.", "since": "3.0.0", "group": "cluster", "complexity": "O(N) where N is the total number of hash slot arguments", @@ -2135,6 +2399,7 @@ { "name": "slot", "type": "integer", + "display_text": "slot", "multiple": true } ], @@ -2142,13 +2407,10 @@ "admin", "stale", "no_async_loading" - ], - "hints": [ - "nondeterministic_output" ] }, "CLUSTER ADDSLOTSRANGE": { - "summary": "Assign new hash slots to receiving node", + "summary": "Assigns new hash slot ranges to a node.", "since": "7.0.0", "group": "cluster", "complexity": "O(N) where N is the total number of the slots between the start slot and end slot arguments.", @@ -2160,17 +2422,19 @@ "arity": -4, "arguments": [ { - "name": "start-slot_end-slot", + "name": "range", "type": "block", "multiple": true, "arguments": [ { "name": "start-slot", - "type": "integer" + "type": "integer", + "display_text": "start-slot" }, { "name": "end-slot", - "type": "integer" + "type": "integer", + "display_text": "end-slot" } ] } @@ -2179,13 +2443,10 @@ "admin", "stale", "no_async_loading" - ], - "hints": [ - "nondeterministic_output" ] }, "CLUSTER BUMPEPOCH": { - "summary": "Advance the cluster config epoch", + "summary": "Advances the cluster config epoch.", "since": "3.0.0", "group": "cluster", "complexity": "O(1)", @@ -2205,7 +2466,7 @@ ] }, "CLUSTER COUNT-FAILURE-REPORTS": { - "summary": "Return the number of failure reports active for a given node", + "summary": "Returns the number of active failure reports active for a node.", "since": "3.0.0", "group": "cluster", "complexity": "O(N) where N is the number of failure reports", @@ -2218,7 +2479,8 @@ "arguments": [ { "name": "node-id", - "type": "string" + "type": "string", + "display_text": "node-id" } ], "command_flags": [ @@ -2230,7 +2492,7 @@ ] }, "CLUSTER COUNTKEYSINSLOT": { - "summary": "Return the number of local keys in the specified hash slot", + "summary": "Returns the number of keys in a hash slot.", "since": "3.0.0", "group": "cluster", "complexity": "O(1)", @@ -2241,18 +2503,16 @@ "arguments": [ { "name": "slot", - "type": "integer" + "type": "integer", + "display_text": "slot" } ], "command_flags": [ "stale" - ], - "hints": [ - "nondeterministic_output" ] }, "CLUSTER DELSLOTS": { - "summary": "Set hash slots as unbound in receiving node", + "summary": "Sets hash slots as unbound for a node.", "since": "3.0.0", "group": "cluster", "complexity": "O(N) where N is the total number of hash slot arguments", @@ -2266,6 +2526,7 @@ { "name": "slot", "type": "integer", + "display_text": "slot", "multiple": true } ], @@ -2273,13 +2534,10 @@ "admin", "stale", "no_async_loading" - ], - "hints": [ - "nondeterministic_output" ] }, "CLUSTER DELSLOTSRANGE": { - "summary": "Set hash slots as unbound in receiving node", + "summary": "Sets hash slot ranges as unbound for a node.", "since": "7.0.0", "group": "cluster", "complexity": "O(N) where N is the total number of the slots between the start slot and end slot arguments.", @@ -2291,17 +2549,19 @@ "arity": -4, "arguments": [ { - "name": "start-slot_end-slot", + "name": "range", "type": "block", "multiple": true, "arguments": [ { "name": "start-slot", - "type": "integer" + "type": "integer", + "display_text": "start-slot" }, { "name": "end-slot", - "type": "integer" + "type": "integer", + "display_text": "end-slot" } ] } @@ -2310,9 +2570,6 @@ "admin", "stale", "no_async_loading" - ], - "hints": [ - "nondeterministic_output" ] }, "CLUSTER FAILOVER": { @@ -2335,11 +2592,13 @@ { "name": "force", "type": "pure-token", + "display_text": "force", "token": "FORCE" }, { "name": "takeover", "type": "pure-token", + "display_text": "takeover", "token": "TAKEOVER" } ] @@ -2349,13 +2608,10 @@ "admin", "stale", "no_async_loading" - ], - "hints": [ - "nondeterministic_output" ] }, "CLUSTER FLUSHSLOTS": { - "summary": "Delete a node's own slots information", + "summary": "Deletes all slots information from a node.", "since": "3.0.0", "group": "cluster", "complexity": "O(1)", @@ -2369,13 +2625,10 @@ "admin", "stale", "no_async_loading" - ], - "hints": [ - "nondeterministic_output" ] }, "CLUSTER FORGET": { - "summary": "Remove a node from the nodes table", + "summary": "Removes a node from the nodes table.", "since": "3.0.0", "group": "cluster", "complexity": "O(1)", @@ -2388,23 +2641,21 @@ "arguments": [ { "name": "node-id", - "type": "string" + "type": "string", + "display_text": "node-id" } ], "command_flags": [ "admin", "stale", "no_async_loading" - ], - "hints": [ - "nondeterministic_output" ] }, "CLUSTER GETKEYSINSLOT": { - "summary": "Return local key names in the specified hash slot", + "summary": "Returns the key names in a hash slot.", "since": "3.0.0", "group": "cluster", - "complexity": "O(log(N)) where N is the number of requested keys", + "complexity": "O(N) where N is the number of requested keys", "acl_categories": [ "@slow" ], @@ -2412,11 +2663,13 @@ "arguments": [ { "name": "slot", - "type": "integer" + "type": "integer", + "display_text": "slot" }, { "name": "count", - "type": "integer" + "type": "integer", + "display_text": "count" } ], "command_flags": [ @@ -2427,7 +2680,7 @@ ] }, "CLUSTER HELP": { - "summary": "Show helpful text about the different subcommands", + "summary": "Returns helpful text about the different subcommands.", "since": "5.0.0", "group": "cluster", "complexity": "O(1)", @@ -2441,7 +2694,7 @@ ] }, "CLUSTER INFO": { - "summary": "Provides info about Redis Cluster node state", + "summary": "Returns information about the state of a node.", "since": "3.0.0", "group": "cluster", "complexity": "O(1)", @@ -2457,7 +2710,7 @@ ] }, "CLUSTER KEYSLOT": { - "summary": "Returns the hash slot of the specified key", + "summary": "Returns the hash slot for a key.", "since": "3.0.0", "group": "cluster", "complexity": "O(N) where N is the number of bytes in the key", @@ -2468,18 +2721,16 @@ "arguments": [ { "name": "key", - "type": "string" + "type": "string", + "display_text": "key" } ], "command_flags": [ "stale" - ], - "hints": [ - "nondeterministic_output" ] }, "CLUSTER LINKS": { - "summary": "Returns a list of all TCP links to and from peer nodes in cluster", + "summary": "Returns a list of all TCP links to and from peer nodes.", "since": "7.0.0", "group": "cluster", "complexity": "O(N) where N is the total number of Cluster nodes", @@ -2495,7 +2746,7 @@ ] }, "CLUSTER MEET": { - "summary": "Force a node cluster to handshake with another node", + "summary": "Forces a node to handshake with another node.", "since": "3.0.0", "group": "cluster", "complexity": "O(1)", @@ -2514,15 +2765,18 @@ "arguments": [ { "name": "ip", - "type": "string" + "type": "string", + "display_text": "ip" }, { "name": "port", - "type": "integer" + "type": "integer", + "display_text": "port" }, { - "name": "cluster_bus_port", + "name": "cluster-bus-port", "type": "integer", + "display_text": "cluster-bus-port", "since": "4.0.0", "optional": true } @@ -2531,13 +2785,10 @@ "admin", "stale", "no_async_loading" - ], - "hints": [ - "nondeterministic_output" ] }, "CLUSTER MYID": { - "summary": "Return the node id", + "summary": "Returns the ID of a node.", "since": "3.0.0", "group": "cluster", "complexity": "O(1)", @@ -2545,6 +2796,19 @@ "@slow" ], "arity": 2, + "command_flags": [ + "stale" + ] + }, + "CLUSTER MYSHARDID": { + "summary": "Returns the shard ID of a node.", + "since": "7.2.0", + "group": "cluster", + "complexity": "O(1)", + "acl_categories": [ + "@slow" + ], + "arity": 2, "command_flags": [ "stale" ], @@ -2553,7 +2817,7 @@ ] }, "CLUSTER NODES": { - "summary": "Get Cluster config for the node", + "summary": "Returns the cluster configuration for a node.", "since": "3.0.0", "group": "cluster", "complexity": "O(N) where N is the total number of Cluster nodes", @@ -2569,10 +2833,10 @@ ] }, "CLUSTER REPLICAS": { - "summary": "List replica nodes of the specified master node", + "summary": "Lists the replica nodes of a master node.", "since": "5.0.0", "group": "cluster", - "complexity": "O(1)", + "complexity": "O(N) where N is the number of replicas.", "acl_categories": [ "@admin", "@slow", @@ -2582,7 +2846,8 @@ "arguments": [ { "name": "node-id", - "type": "string" + "type": "string", + "display_text": "node-id" } ], "command_flags": [ @@ -2594,7 +2859,7 @@ ] }, "CLUSTER REPLICATE": { - "summary": "Reconfigure a node as a replica of the specified master node", + "summary": "Configure a node as replica of a master node.", "since": "3.0.0", "group": "cluster", "complexity": "O(1)", @@ -2607,20 +2872,18 @@ "arguments": [ { "name": "node-id", - "type": "string" + "type": "string", + "display_text": "node-id" } ], "command_flags": [ "admin", "stale", "no_async_loading" - ], - "hints": [ - "nondeterministic_output" ] }, "CLUSTER RESET": { - "summary": "Reset a Redis Cluster node", + "summary": "Resets a node.", "since": "3.0.0", "group": "cluster", "complexity": "O(N) where N is the number of known nodes. The command may execute a FLUSHALL as a side effect.", @@ -2632,18 +2895,20 @@ "arity": -2, "arguments": [ { - "name": "hard_soft", + "name": "reset-type", "type": "oneof", "optional": true, "arguments": [ { "name": "hard", "type": "pure-token", + "display_text": "hard", "token": "HARD" }, { "name": "soft", "type": "pure-token", + "display_text": "soft", "token": "SOFT" } ] @@ -2653,13 +2918,10 @@ "admin", "noscript", "stale" - ], - "hints": [ - "nondeterministic_output" ] }, "CLUSTER SAVECONFIG": { - "summary": "Forces the node to save cluster state on disk", + "summary": "Forces a node to save the cluster configuration to disk.", "since": "3.0.0", "group": "cluster", "complexity": "O(1)", @@ -2673,13 +2935,10 @@ "admin", "stale", "no_async_loading" - ], - "hints": [ - "nondeterministic_output" ] }, "CLUSTER SET-CONFIG-EPOCH": { - "summary": "Set the configuration epoch in a new node", + "summary": "Sets the configuration epoch for a new node.", "since": "3.0.0", "group": "cluster", "complexity": "O(1)", @@ -2692,20 +2951,18 @@ "arguments": [ { "name": "config-epoch", - "type": "integer" + "type": "integer", + "display_text": "config-epoch" } ], "command_flags": [ "admin", "stale", "no_async_loading" - ], - "hints": [ - "nondeterministic_output" ] }, "CLUSTER SETSLOT": { - "summary": "Bind a hash slot to a specific node", + "summary": "Binds a hash slot to a node.", "since": "3.0.0", "group": "cluster", "complexity": "O(1)", @@ -2718,30 +2975,35 @@ "arguments": [ { "name": "slot", - "type": "integer" + "type": "integer", + "display_text": "slot" }, { "name": "subcommand", "type": "oneof", "arguments": [ { - "name": "node-id", + "name": "importing", "type": "string", + "display_text": "node-id", "token": "IMPORTING" }, { - "name": "node-id", + "name": "migrating", "type": "string", + "display_text": "node-id", "token": "MIGRATING" }, { - "name": "node-id", + "name": "node", "type": "string", + "display_text": "node-id", "token": "NODE" }, { "name": "stable", "type": "pure-token", + "display_text": "stable", "token": "STABLE" } ] @@ -2751,13 +3013,10 @@ "admin", "stale", "no_async_loading" - ], - "hints": [ - "nondeterministic_output" ] }, "CLUSTER SHARDS": { - "summary": "Get array of cluster slots to node mappings", + "summary": "Returns the mapping of cluster slots to shards.", "since": "7.0.0", "group": "cluster", "complexity": "O(N) where N is the total number of cluster nodes", @@ -2766,6 +3025,7 @@ ], "arity": 2, "command_flags": [ + "loading", "stale" ], "hints": [ @@ -2773,10 +3033,10 @@ ] }, "CLUSTER SLAVES": { - "summary": "List replica nodes of the specified master node", + "summary": "Lists the replica nodes of a master node.", "since": "3.0.0", "group": "cluster", - "complexity": "O(1)", + "complexity": "O(N) where N is the number of replicas.", "deprecated_since": "5.0.0", "replaced_by": "`CLUSTER REPLICAS`", "acl_categories": [ @@ -2788,7 +3048,8 @@ "arguments": [ { "name": "node-id", - "type": "string" + "type": "string", + "display_text": "node-id" } ], "command_flags": [ @@ -2803,7 +3064,7 @@ ] }, "CLUSTER SLOTS": { - "summary": "Get array of Cluster slot to node mappings", + "summary": "Returns the mapping of cluster slots to nodes.", "since": "3.0.0", "group": "cluster", "complexity": "O(N) where N is the total number of Cluster nodes", @@ -2824,6 +3085,7 @@ ], "arity": 2, "command_flags": [ + "loading", "stale" ], "doc_flags": [ @@ -2834,7 +3096,7 @@ ] }, "COMMAND": { - "summary": "Get array of Redis command details", + "summary": "Returns detailed information about all commands.", "since": "2.8.13", "group": "server", "complexity": "O(N) where N is the total number of Redis commands", @@ -2852,7 +3114,7 @@ ] }, "COMMAND COUNT": { - "summary": "Get total number of Redis commands", + "summary": "Returns a count of commands.", "since": "2.8.13", "group": "server", "complexity": "O(1)", @@ -2867,7 +3129,7 @@ ] }, "COMMAND DOCS": { - "summary": "Get array of specific Redis command documentation", + "summary": "Returns documentary information about one, multiple or all commands.", "since": "7.0.0", "group": "server", "complexity": "O(N) where N is the number of commands to look up", @@ -2880,6 +3142,7 @@ { "name": "command-name", "type": "string", + "display_text": "command-name", "optional": true, "multiple": true } @@ -2893,7 +3156,7 @@ ] }, "COMMAND GETKEYS": { - "summary": "Extract keys given a full Redis command", + "summary": "Extracts the key names from an arbitrary command.", "since": "2.8.13", "group": "server", "complexity": "O(N) where N is the number of arguments to the command", @@ -2901,14 +3164,28 @@ "@slow", "@connection" ], - "arity": -4, + "arity": -3, + "arguments": [ + { + "name": "command", + "type": "string", + "display_text": "command" + }, + { + "name": "arg", + "type": "string", + "display_text": "arg", + "optional": true, + "multiple": true + } + ], "command_flags": [ "loading", "stale" ] }, "COMMAND GETKEYSANDFLAGS": { - "summary": "Extract keys and access flags given a full Redis command", + "summary": "Extracts the key names and access flags for an arbitrary command.", "since": "7.0.0", "group": "server", "complexity": "O(N) where N is the number of arguments to the command", @@ -2916,14 +3193,28 @@ "@slow", "@connection" ], - "arity": -4, + "arity": -3, + "arguments": [ + { + "name": "command", + "type": "string", + "display_text": "command" + }, + { + "name": "arg", + "type": "string", + "display_text": "arg", + "optional": true, + "multiple": true + } + ], "command_flags": [ "loading", "stale" ] }, "COMMAND HELP": { - "summary": "Show helpful text about the different subcommands", + "summary": "Returns helpful text about the different subcommands.", "since": "5.0.0", "group": "server", "complexity": "O(1)", @@ -2938,7 +3229,7 @@ ] }, "COMMAND INFO": { - "summary": "Get array of specific Redis command details, or all when no argument is given.", + "summary": "Returns information about one, multiple or all commands.", "since": "2.8.13", "group": "server", "complexity": "O(N) where N is the number of commands to look up", @@ -2957,6 +3248,7 @@ { "name": "command-name", "type": "string", + "display_text": "command-name", "optional": true, "multiple": true } @@ -2970,7 +3262,7 @@ ] }, "COMMAND LIST": { - "summary": "Get an array of Redis command names", + "summary": "Returns a list of command names.", "since": "7.0.0", "group": "server", "complexity": "O(N) where N is the total number of Redis commands", @@ -2989,16 +3281,19 @@ { "name": "module-name", "type": "string", + "display_text": "module-name", "token": "MODULE" }, { "name": "category", "type": "string", + "display_text": "category", "token": "ACLCAT" }, { "name": "pattern", "type": "pattern", + "display_text": "pattern", "token": "PATTERN" } ] @@ -3013,7 +3308,7 @@ ] }, "CONFIG": { - "summary": "A container for server configuration commands", + "summary": "A container for server configuration commands.", "since": "2.0.0", "group": "server", "complexity": "Depends on subcommand.", @@ -3023,7 +3318,7 @@ "arity": -2 }, "CONFIG GET": { - "summary": "Get the values of configuration parameters", + "summary": "Returns the effective values of configuration parameters.", "since": "2.0.0", "group": "server", "complexity": "O(N) when N is the number of configuration parameters provided", @@ -3042,14 +3337,9 @@ "arguments": [ { "name": "parameter", - "type": "block", - "multiple": true, - "arguments": [ - { - "name": "parameter", - "type": "string" - } - ] + "type": "string", + "display_text": "parameter", + "multiple": true } ], "command_flags": [ @@ -3060,7 +3350,7 @@ ] }, "CONFIG HELP": { - "summary": "Show helpful text about the different subcommands", + "summary": "Returns helpful text about the different subcommands.", "since": "5.0.0", "group": "server", "complexity": "O(1)", @@ -3074,7 +3364,7 @@ ] }, "CONFIG RESETSTAT": { - "summary": "Reset the stats returned by INFO", + "summary": "Resets the server's statistics.", "since": "2.0.0", "group": "server", "complexity": "O(1)", @@ -3089,10 +3379,14 @@ "noscript", "loading", "stale" + ], + "hints": [ + "request_policy:all_nodes", + "response_policy:all_succeeded" ] }, "CONFIG REWRITE": { - "summary": "Rewrite the configuration file with the in memory configuration", + "summary": "Persists the effective configuration to file.", "since": "2.8.0", "group": "server", "complexity": "O(1)", @@ -3107,10 +3401,14 @@ "noscript", "loading", "stale" + ], + "hints": [ + "request_policy:all_nodes", + "response_policy:all_succeeded" ] }, "CONFIG SET": { - "summary": "Set configuration parameters to the given values", + "summary": "Sets configuration parameters in-flight.", "since": "2.0.0", "group": "server", "complexity": "O(N) when N is the number of configuration parameters provided", @@ -3128,17 +3426,19 @@ "arity": -4, "arguments": [ { - "name": "parameter_value", + "name": "data", "type": "block", "multiple": true, "arguments": [ { "name": "parameter", - "type": "string" + "type": "string", + "display_text": "parameter" }, { "name": "value", - "type": "string" + "type": "string", + "display_text": "value" } ] } @@ -3155,7 +3455,7 @@ ] }, "COPY": { - "summary": "Copy a key", + "summary": "Copies the value of a key to a new key.", "since": "6.2.0", "group": "generic", "complexity": "O(N) worst case for collections, where N is the number of nested items. O(1) for string values.", @@ -3207,22 +3507,26 @@ { "name": "source", "type": "key", + "display_text": "source", "key_spec_index": 0 }, { "name": "destination", "type": "key", + "display_text": "destination", "key_spec_index": 1 }, { "name": "destination-db", "type": "integer", + "display_text": "destination-db", "token": "DB", "optional": true }, { "name": "replace", "type": "pure-token", + "display_text": "replace", "token": "REPLACE", "optional": true } @@ -3233,7 +3537,7 @@ ] }, "DBSIZE": { - "summary": "Return the number of keys in the selected database", + "summary": "Returns the number of keys in the database.", "since": "1.0.0", "group": "server", "complexity": "O(1)", @@ -3253,7 +3557,7 @@ ] }, "DEBUG": { - "summary": "A container for debugging commands", + "summary": "A container for debugging commands.", "since": "1.0.0", "group": "server", "complexity": "Depends on subcommand.", @@ -3274,7 +3578,7 @@ ] }, "DECR": { - "summary": "Decrement the integer value of a key by one", + "summary": "Decrements the integer value of a key by one. Uses 0 as initial value if the key doesn't exist.", "since": "1.0.0", "group": "string", "complexity": "O(1)", @@ -3309,6 +3613,7 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 } ], @@ -3319,7 +3624,7 @@ ] }, "DECRBY": { - "summary": "Decrement the integer value of a key by the given number", + "summary": "Decrements a number from the integer value of a key. Uses 0 as initial value if the key doesn't exist.", "since": "1.0.0", "group": "string", "complexity": "O(1)", @@ -3354,11 +3659,13 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "decrement", - "type": "integer" + "type": "integer", + "display_text": "decrement" } ], "command_flags": [ @@ -3368,7 +3675,7 @@ ] }, "DEL": { - "summary": "Delete a key", + "summary": "Deletes one or more keys.", "since": "1.0.0", "group": "generic", "complexity": "O(N) where N is the number of keys that will be removed. When a key to remove holds a value other than a string, the individual complexity for this key is O(M) where M is the number of elements in the list, set, sorted set or hash. Removing a single key that holds a string value is O(1).", @@ -3402,6 +3709,7 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0, "multiple": true } @@ -3415,7 +3723,7 @@ ] }, "DISCARD": { - "summary": "Discard all commands issued after MULTI", + "summary": "Discards a transaction.", "since": "2.0.0", "group": "transactions", "complexity": "O(N), when N is the number of queued commands", @@ -3433,7 +3741,7 @@ ] }, "DUMP": { - "summary": "Return a serialized version of the value stored at the specified key.", + "summary": "Returns a serialized representation of the value stored at a key.", "since": "2.6.0", "group": "generic", "complexity": "O(1) to access the key and additional O(N*M) to serialize it, where N is the number of Redis objects composing the value and M their average size. For small string values the time complexity is thus O(1)+O(1*M) where M is small, so simply O(1).", @@ -3467,6 +3775,7 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 } ], @@ -3478,7 +3787,7 @@ ] }, "ECHO": { - "summary": "Echo the given string", + "summary": "Returns the given string.", "since": "1.0.0", "group": "connection", "complexity": "O(1)", @@ -3490,7 +3799,8 @@ "arguments": [ { "name": "message", - "type": "string" + "type": "string", + "display_text": "message" } ], "command_flags": [ @@ -3500,7 +3810,7 @@ ] }, "EVAL": { - "summary": "Execute a Lua script server side", + "summary": "Executes a server-side Lua script.", "since": "2.6.0", "group": "scripting", "complexity": "Depends on the script that is executed.", @@ -3534,15 +3844,18 @@ "arguments": [ { "name": "script", - "type": "string" + "type": "string", + "display_text": "script" }, { "name": "numkeys", - "type": "integer" + "type": "integer", + "display_text": "numkeys" }, { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0, "optional": true, "multiple": true @@ -3550,6 +3863,7 @@ { "name": "arg", "type": "string", + "display_text": "arg", "optional": true, "multiple": true } @@ -3563,7 +3877,7 @@ ] }, "EVALSHA": { - "summary": "Execute a Lua script server side", + "summary": "Executes a server-side Lua script by SHA1 digest.", "since": "2.6.0", "group": "scripting", "complexity": "Depends on the script that is executed.", @@ -3596,15 +3910,18 @@ "arguments": [ { "name": "sha1", - "type": "string" + "type": "string", + "display_text": "sha1" }, { "name": "numkeys", - "type": "integer" + "type": "integer", + "display_text": "numkeys" }, { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0, "optional": true, "multiple": true @@ -3612,6 +3929,7 @@ { "name": "arg", "type": "string", + "display_text": "arg", "optional": true, "multiple": true } @@ -3625,7 +3943,7 @@ ] }, "EVALSHA_RO": { - "summary": "Execute a read-only Lua script server side", + "summary": "Executes a read-only server-side Lua script by SHA1 digest.", "since": "7.0.0", "group": "scripting", "complexity": "Depends on the script that is executed.", @@ -3657,21 +3975,27 @@ "arguments": [ { "name": "sha1", - "type": "string" + "type": "string", + "display_text": "sha1" }, { "name": "numkeys", - "type": "integer" + "type": "integer", + "display_text": "numkeys" }, { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0, + "optional": true, "multiple": true }, { "name": "arg", "type": "string", + "display_text": "arg", + "optional": true, "multiple": true } ], @@ -3685,7 +4009,7 @@ ] }, "EVAL_RO": { - "summary": "Execute a read-only Lua script server side", + "summary": "Executes a read-only server-side Lua script.", "since": "7.0.0", "group": "scripting", "complexity": "Depends on the script that is executed.", @@ -3718,21 +4042,27 @@ "arguments": [ { "name": "script", - "type": "string" + "type": "string", + "display_text": "script" }, { "name": "numkeys", - "type": "integer" + "type": "integer", + "display_text": "numkeys" }, { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0, + "optional": true, "multiple": true }, { "name": "arg", "type": "string", + "display_text": "arg", + "optional": true, "multiple": true } ], @@ -3746,7 +4076,7 @@ ] }, "EXEC": { - "summary": "Execute all commands issued after MULTI", + "summary": "Executes all commands in a transaction.", "since": "1.2.0", "group": "transactions", "complexity": "Depends on commands in the transaction", @@ -3763,7 +4093,7 @@ ] }, "EXISTS": { - "summary": "Determine if a key exists", + "summary": "Determines whether one or more keys exist.", "since": "1.0.0", "group": "generic", "complexity": "O(N) where N is the number of keys to check.", @@ -3802,6 +4132,7 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0, "multiple": true } @@ -3816,7 +4147,7 @@ ] }, "EXPIRE": { - "summary": "Set a key's time to live in seconds", + "summary": "Sets the expiration time of a key in seconds.", "since": "1.0.0", "group": "generic", "complexity": "O(1)", @@ -3856,11 +4187,13 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "seconds", - "type": "integer" + "type": "integer", + "display_text": "seconds" }, { "name": "condition", @@ -3871,21 +4204,25 @@ { "name": "nx", "type": "pure-token", + "display_text": "nx", "token": "NX" }, { "name": "xx", "type": "pure-token", + "display_text": "xx", "token": "XX" }, { "name": "gt", "type": "pure-token", + "display_text": "gt", "token": "GT" }, { "name": "lt", "type": "pure-token", + "display_text": "lt", "token": "LT" } ] @@ -3897,7 +4234,7 @@ ] }, "EXPIREAT": { - "summary": "Set the expiration for a key as a UNIX timestamp", + "summary": "Sets the expiration time of a key to a Unix timestamp.", "since": "1.2.0", "group": "generic", "complexity": "O(1)", @@ -3937,11 +4274,13 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "unix-time-seconds", - "type": "unix-time" + "type": "unix-time", + "display_text": "unix-time-seconds" }, { "name": "condition", @@ -3952,21 +4291,25 @@ { "name": "nx", "type": "pure-token", + "display_text": "nx", "token": "NX" }, { "name": "xx", "type": "pure-token", + "display_text": "xx", "token": "XX" }, { "name": "gt", "type": "pure-token", + "display_text": "gt", "token": "GT" }, { "name": "lt", "type": "pure-token", + "display_text": "lt", "token": "LT" } ] @@ -3978,7 +4321,7 @@ ] }, "EXPIRETIME": { - "summary": "Get the expiration Unix timestamp for a key", + "summary": "Returns the expiration time of a key as a Unix timestamp.", "since": "7.0.0", "group": "generic", "complexity": "O(1)", @@ -4012,6 +4355,7 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 } ], @@ -4021,7 +4365,7 @@ ] }, "FAILOVER": { - "summary": "Start a coordinated failover between this server and one of its replicas.", + "summary": "Starts a coordinated failover from a server to one of its replicas.", "since": "6.2.0", "group": "server", "complexity": "O(1)", @@ -4040,15 +4384,18 @@ "arguments": [ { "name": "host", - "type": "string" + "type": "string", + "display_text": "host" }, { "name": "port", - "type": "integer" + "type": "integer", + "display_text": "port" }, { "name": "force", "type": "pure-token", + "display_text": "force", "token": "FORCE", "optional": true } @@ -4057,12 +4404,14 @@ { "name": "abort", "type": "pure-token", + "display_text": "abort", "token": "ABORT", "optional": true }, { "name": "milliseconds", "type": "integer", + "display_text": "milliseconds", "token": "TIMEOUT", "optional": true } @@ -4074,7 +4423,7 @@ ] }, "FCALL": { - "summary": "Invoke a function", + "summary": "Invokes a function.", "since": "7.0.0", "group": "scripting", "complexity": "Depends on the function that is executed.", @@ -4108,21 +4457,27 @@ "arguments": [ { "name": "function", - "type": "string" + "type": "string", + "display_text": "function" }, { "name": "numkeys", - "type": "integer" + "type": "integer", + "display_text": "numkeys" }, { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0, + "optional": true, "multiple": true }, { "name": "arg", "type": "string", + "display_text": "arg", + "optional": true, "multiple": true } ], @@ -4135,7 +4490,7 @@ ] }, "FCALL_RO": { - "summary": "Invoke a read-only function", + "summary": "Invokes a read-only function.", "since": "7.0.0", "group": "scripting", "complexity": "Depends on the function that is executed.", @@ -4168,21 +4523,27 @@ "arguments": [ { "name": "function", - "type": "string" + "type": "string", + "display_text": "function" }, { "name": "numkeys", - "type": "integer" + "type": "integer", + "display_text": "numkeys" }, { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0, + "optional": true, "multiple": true }, { "name": "arg", "type": "string", + "display_text": "arg", + "optional": true, "multiple": true } ], @@ -4196,7 +4557,7 @@ ] }, "FLUSHALL": { - "summary": "Remove all keys from all databases", + "summary": "Removes all keys from all databases.", "since": "1.0.0", "group": "server", "complexity": "O(N) where N is the total number of keys in all databases", @@ -4219,19 +4580,21 @@ "arity": -1, "arguments": [ { - "name": "async", + "name": "flush-type", "type": "oneof", "optional": true, "arguments": [ { "name": "async", "type": "pure-token", + "display_text": "async", "token": "ASYNC", "since": "4.0.0" }, { "name": "sync", "type": "pure-token", + "display_text": "sync", "token": "SYNC", "since": "6.2.0" } @@ -4247,7 +4610,7 @@ ] }, "FLUSHDB": { - "summary": "Remove all keys from the current database", + "summary": "Remove all keys from the current database.", "since": "1.0.0", "group": "server", "complexity": "O(N) where N is the number of keys in the selected database", @@ -4270,19 +4633,21 @@ "arity": -1, "arguments": [ { - "name": "async", + "name": "flush-type", "type": "oneof", "optional": true, "arguments": [ { "name": "async", "type": "pure-token", + "display_text": "async", "token": "ASYNC", "since": "4.0.0" }, { "name": "sync", "type": "pure-token", + "display_text": "sync", "token": "SYNC", "since": "6.2.0" } @@ -4298,7 +4663,7 @@ ] }, "FUNCTION": { - "summary": "A container for function commands", + "summary": "A container for function commands.", "since": "7.0.0", "group": "scripting", "complexity": "Depends on subcommand.", @@ -4308,7 +4673,7 @@ "arity": -2 }, "FUNCTION DELETE": { - "summary": "Delete a function by name", + "summary": "Deletes a library and its functions.", "since": "7.0.0", "group": "scripting", "complexity": "O(1)", @@ -4321,7 +4686,8 @@ "arguments": [ { "name": "library-name", - "type": "string" + "type": "string", + "display_text": "library-name" } ], "command_flags": [ @@ -4334,7 +4700,7 @@ ] }, "FUNCTION DUMP": { - "summary": "Dump all functions into a serialized binary payload", + "summary": "Dumps all libraries into a serialized binary payload.", "since": "7.0.0", "group": "scripting", "complexity": "O(N) where N is the number of functions", @@ -4348,7 +4714,7 @@ ] }, "FUNCTION FLUSH": { - "summary": "Deleting all functions", + "summary": "Deletes all libraries and functions.", "since": "7.0.0", "group": "scripting", "complexity": "O(N) where N is the number of functions deleted", @@ -4360,18 +4726,20 @@ "arity": -2, "arguments": [ { - "name": "async", + "name": "flush-type", "type": "oneof", "optional": true, "arguments": [ { "name": "async", "type": "pure-token", + "display_text": "async", "token": "ASYNC" }, { "name": "sync", "type": "pure-token", + "display_text": "sync", "token": "SYNC" } ] @@ -4387,7 +4755,7 @@ ] }, "FUNCTION HELP": { - "summary": "Show helpful text about the different subcommands", + "summary": "Returns helpful text about the different subcommands.", "since": "7.0.0", "group": "scripting", "complexity": "O(1)", @@ -4402,7 +4770,7 @@ ] }, "FUNCTION KILL": { - "summary": "Kill the function currently in execution.", + "summary": "Terminates a function during execution.", "since": "7.0.0", "group": "scripting", "complexity": "O(1)", @@ -4421,7 +4789,7 @@ ] }, "FUNCTION LIST": { - "summary": "List information about all the functions", + "summary": "Returns information about all libraries.", "since": "7.0.0", "group": "scripting", "complexity": "O(N) where N is the number of functions", @@ -4434,12 +4802,14 @@ { "name": "library-name-pattern", "type": "string", + "display_text": "library-name-pattern", "token": "LIBRARYNAME", "optional": true }, { "name": "withcode", "type": "pure-token", + "display_text": "withcode", "token": "WITHCODE", "optional": true } @@ -4452,7 +4822,7 @@ ] }, "FUNCTION LOAD": { - "summary": "Create a function with the given arguments (name, code, description)", + "summary": "Creates a library.", "since": "7.0.0", "group": "scripting", "complexity": "O(1) (considering compilation time is redundant)", @@ -4466,12 +4836,14 @@ { "name": "replace", "type": "pure-token", + "display_text": "replace", "token": "REPLACE", "optional": true }, { "name": "function-code", - "type": "string" + "type": "string", + "display_text": "function-code" } ], "command_flags": [ @@ -4485,7 +4857,7 @@ ] }, "FUNCTION RESTORE": { - "summary": "Restore all the functions on the given payload", + "summary": "Restores all libraries from a payload.", "since": "7.0.0", "group": "scripting", "complexity": "O(N) where N is the number of functions on the payload", @@ -4498,7 +4870,8 @@ "arguments": [ { "name": "serialized-value", - "type": "string" + "type": "string", + "display_text": "serialized-value" }, { "name": "policy", @@ -4508,16 +4881,19 @@ { "name": "flush", "type": "pure-token", + "display_text": "flush", "token": "FLUSH" }, { "name": "append", "type": "pure-token", + "display_text": "append", "token": "APPEND" }, { "name": "replace", "type": "pure-token", + "display_text": "replace", "token": "REPLACE" } ] @@ -4534,7 +4910,7 @@ ] }, "FUNCTION STATS": { - "summary": "Return information about the function currently running (name, description, duration)", + "summary": "Returns information about a function during execution.", "since": "7.0.0", "group": "scripting", "complexity": "O(1)", @@ -4554,7 +4930,7 @@ ] }, "GEOADD": { - "summary": "Add one or more geospatial items in the geospatial index represented using a sorted set", + "summary": "Adds one or more members to a geospatial index. The key is created if it doesn't exist.", "since": "3.2.0", "group": "geo", "complexity": "O(log(N)) for each item added, where N is the number of elements in the sorted set.", @@ -4594,6 +4970,7 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { @@ -4605,11 +4982,13 @@ { "name": "nx", "type": "pure-token", + "display_text": "nx", "token": "NX" }, { "name": "xx", "type": "pure-token", + "display_text": "xx", "token": "XX" } ] @@ -4617,26 +4996,30 @@ { "name": "change", "type": "pure-token", + "display_text": "change", "token": "CH", "since": "6.2.0", "optional": true }, { - "name": "longitude_latitude_member", + "name": "data", "type": "block", "multiple": true, "arguments": [ { "name": "longitude", - "type": "double" + "type": "double", + "display_text": "longitude" }, { "name": "latitude", - "type": "double" + "type": "double", + "display_text": "latitude" }, { "name": "member", - "type": "string" + "type": "string", + "display_text": "member" } ] } @@ -4647,10 +5030,10 @@ ] }, "GEODIST": { - "summary": "Returns the distance between two members of a geospatial index", + "summary": "Returns the distance between two members of a geospatial index.", "since": "3.2.0", "group": "geo", - "complexity": "O(log(N))", + "complexity": "O(1)", "acl_categories": [ "@read", "@geo", @@ -4681,15 +5064,18 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "member1", - "type": "string" + "type": "string", + "display_text": "member1" }, { "name": "member2", - "type": "string" + "type": "string", + "display_text": "member2" }, { "name": "unit", @@ -4699,21 +5085,25 @@ { "name": "m", "type": "pure-token", + "display_text": "m", "token": "M" }, { "name": "km", "type": "pure-token", + "display_text": "km", "token": "KM" }, { "name": "ft", "type": "pure-token", + "display_text": "ft", "token": "FT" }, { "name": "mi", "type": "pure-token", + "display_text": "mi", "token": "MI" } ] @@ -4724,10 +5114,10 @@ ] }, "GEOHASH": { - "summary": "Returns members of a geospatial index as standard geohash strings", + "summary": "Returns members from a geospatial index as geohash strings.", "since": "3.2.0", "group": "geo", - "complexity": "O(log(N)) for each member requested, where N is the number of elements in the sorted set.", + "complexity": "O(1) for each member requested.", "acl_categories": [ "@read", "@geo", @@ -4758,11 +5148,14 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "member", "type": "string", + "display_text": "member", + "optional": true, "multiple": true } ], @@ -4771,10 +5164,10 @@ ] }, "GEOPOS": { - "summary": "Returns longitude and latitude of members of a geospatial index", + "summary": "Returns the longitude and latitude of members from a geospatial index.", "since": "3.2.0", "group": "geo", - "complexity": "O(N) where N is the number of members requested.", + "complexity": "O(1) for each member requested.", "acl_categories": [ "@read", "@geo", @@ -4805,11 +5198,14 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "member", "type": "string", + "display_text": "member", + "optional": true, "multiple": true } ], @@ -4818,7 +5214,7 @@ ] }, "GEORADIUS": { - "summary": "Query a sorted set representing a geospatial index to fetch members matching a given maximum distance from a point", + "summary": "Queries a geospatial index for members within a distance from a coordinate, optionally stores the result.", "since": "3.2.0", "group": "geo", "complexity": "O(N+log(M)) where N is the number of elements inside the bounding box of the circular area delimited by center and radius and M is the number of items inside the index.", @@ -4828,6 +5224,10 @@ [ "6.2.0", "Added the `ANY` option for `COUNT`." + ], + [ + "7.0.0", + "Added support for uppercase unit names." ] ], "acl_categories": [ @@ -4898,19 +5298,23 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "longitude", - "type": "double" + "type": "double", + "display_text": "longitude" }, { "name": "latitude", - "type": "double" + "type": "double", + "display_text": "latitude" }, { "name": "radius", - "type": "double" + "type": "double", + "display_text": "radius" }, { "name": "unit", @@ -4919,21 +5323,25 @@ { "name": "m", "type": "pure-token", + "display_text": "m", "token": "M" }, { "name": "km", "type": "pure-token", + "display_text": "km", "token": "KM" }, { "name": "ft", "type": "pure-token", + "display_text": "ft", "token": "FT" }, { "name": "mi", "type": "pure-token", + "display_text": "mi", "token": "MI" } ] @@ -4941,34 +5349,39 @@ { "name": "withcoord", "type": "pure-token", + "display_text": "withcoord", "token": "WITHCOORD", "optional": true }, { "name": "withdist", "type": "pure-token", + "display_text": "withdist", "token": "WITHDIST", "optional": true }, { "name": "withhash", "type": "pure-token", + "display_text": "withhash", "token": "WITHHASH", "optional": true }, { - "name": "count", + "name": "count-block", "type": "block", "optional": true, "arguments": [ { "name": "count", "type": "integer", + "display_text": "count", "token": "COUNT" }, { "name": "any", "type": "pure-token", + "display_text": "any", "token": "ANY", "since": "6.2.0", "optional": true @@ -4983,28 +5396,37 @@ { "name": "asc", "type": "pure-token", + "display_text": "asc", "token": "ASC" }, { "name": "desc", "type": "pure-token", + "display_text": "desc", "token": "DESC" } ] }, { - "name": "key", - "type": "key", - "key_spec_index": 1, - "token": "STORE", - "optional": true - }, - { - "name": "key", - "type": "key", - "key_spec_index": 2, - "token": "STOREDIST", - "optional": true + "name": "store", + "type": "oneof", + "optional": true, + "arguments": [ + { + "name": "storekey", + "type": "key", + "display_text": "key", + "key_spec_index": 1, + "token": "STORE" + }, + { + "name": "storedistkey", + "type": "key", + "display_text": "key", + "key_spec_index": 2, + "token": "STOREDIST" + } + ] } ], "command_flags": [ @@ -5017,12 +5439,18 @@ ] }, "GEORADIUSBYMEMBER": { - "summary": "Query a sorted set representing a geospatial index to fetch members matching a given maximum distance from a member", + "summary": "Queries a geospatial index for members within a distance from a member, optionally stores the result.", "since": "3.2.0", "group": "geo", "complexity": "O(N+log(M)) where N is the number of elements inside the bounding box of the circular area delimited by center and radius and M is the number of items inside the index.", "deprecated_since": "6.2.0", "replaced_by": "`GEOSEARCH` and `GEOSEARCHSTORE` with the `BYRADIUS` and `FROMMEMBER` arguments", + "history": [ + [ + "7.0.0", + "Added support for uppercase unit names." + ] + ], "acl_categories": [ "@write", "@geo", @@ -5091,15 +5519,18 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "member", - "type": "string" + "type": "string", + "display_text": "member" }, { "name": "radius", - "type": "double" + "type": "double", + "display_text": "radius" }, { "name": "unit", @@ -5108,21 +5539,25 @@ { "name": "m", "type": "pure-token", + "display_text": "m", "token": "M" }, { "name": "km", "type": "pure-token", + "display_text": "km", "token": "KM" }, { "name": "ft", "type": "pure-token", + "display_text": "ft", "token": "FT" }, { "name": "mi", "type": "pure-token", + "display_text": "mi", "token": "MI" } ] @@ -5130,34 +5565,39 @@ { "name": "withcoord", "type": "pure-token", + "display_text": "withcoord", "token": "WITHCOORD", "optional": true }, { "name": "withdist", "type": "pure-token", + "display_text": "withdist", "token": "WITHDIST", "optional": true }, { "name": "withhash", "type": "pure-token", + "display_text": "withhash", "token": "WITHHASH", "optional": true }, { - "name": "count", + "name": "count-block", "type": "block", "optional": true, "arguments": [ { "name": "count", "type": "integer", + "display_text": "count", "token": "COUNT" }, { "name": "any", "type": "pure-token", + "display_text": "any", "token": "ANY", "optional": true } @@ -5171,28 +5611,37 @@ { "name": "asc", "type": "pure-token", + "display_text": "asc", "token": "ASC" }, { "name": "desc", "type": "pure-token", + "display_text": "desc", "token": "DESC" } ] }, { - "name": "key", - "type": "key", - "key_spec_index": 1, - "token": "STORE", - "optional": true - }, - { - "name": "key", - "type": "key", - "key_spec_index": 2, - "token": "STOREDIST", - "optional": true + "name": "store", + "type": "oneof", + "optional": true, + "arguments": [ + { + "name": "storekey", + "type": "key", + "display_text": "key", + "key_spec_index": 1, + "token": "STORE" + }, + { + "name": "storedistkey", + "type": "key", + "display_text": "key", + "key_spec_index": 2, + "token": "STOREDIST" + } + ] } ], "command_flags": [ @@ -5205,7 +5654,7 @@ ] }, "GEORADIUSBYMEMBER_RO": { - "summary": "A read-only variant for GEORADIUSBYMEMBER", + "summary": "Returns members from a geospatial index that are within a distance from a member.", "since": "3.2.10", "group": "geo", "complexity": "O(N+log(M)) where N is the number of elements inside the bounding box of the circular area delimited by center and radius and M is the number of items inside the index.", @@ -5241,15 +5690,18 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "member", - "type": "string" + "type": "string", + "display_text": "member" }, { "name": "radius", - "type": "double" + "type": "double", + "display_text": "radius" }, { "name": "unit", @@ -5258,21 +5710,25 @@ { "name": "m", "type": "pure-token", + "display_text": "m", "token": "M" }, { "name": "km", "type": "pure-token", + "display_text": "km", "token": "KM" }, { "name": "ft", "type": "pure-token", + "display_text": "ft", "token": "FT" }, { "name": "mi", "type": "pure-token", + "display_text": "mi", "token": "MI" } ] @@ -5280,34 +5736,39 @@ { "name": "withcoord", "type": "pure-token", + "display_text": "withcoord", "token": "WITHCOORD", "optional": true }, { "name": "withdist", "type": "pure-token", + "display_text": "withdist", "token": "WITHDIST", "optional": true }, { "name": "withhash", "type": "pure-token", + "display_text": "withhash", "token": "WITHHASH", "optional": true }, { - "name": "count", + "name": "count-block", "type": "block", "optional": true, "arguments": [ { "name": "count", "type": "integer", + "display_text": "count", "token": "COUNT" }, { "name": "any", "type": "pure-token", + "display_text": "any", "token": "ANY", "optional": true } @@ -5321,11 +5782,13 @@ { "name": "asc", "type": "pure-token", + "display_text": "asc", "token": "ASC" }, { "name": "desc", "type": "pure-token", + "display_text": "desc", "token": "DESC" } ] @@ -5339,7 +5802,7 @@ ] }, "GEORADIUS_RO": { - "summary": "A read-only variant for GEORADIUS", + "summary": "Returns members from a geospatial index that are within a distance from a coordinate.", "since": "3.2.10", "group": "geo", "complexity": "O(N+log(M)) where N is the number of elements inside the bounding box of the circular area delimited by center and radius and M is the number of items inside the index.", @@ -5381,19 +5844,23 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "longitude", - "type": "double" + "type": "double", + "display_text": "longitude" }, { "name": "latitude", - "type": "double" + "type": "double", + "display_text": "latitude" }, { "name": "radius", - "type": "double" + "type": "double", + "display_text": "radius" }, { "name": "unit", @@ -5402,21 +5869,25 @@ { "name": "m", "type": "pure-token", + "display_text": "m", "token": "M" }, { "name": "km", "type": "pure-token", + "display_text": "km", "token": "KM" }, { "name": "ft", "type": "pure-token", + "display_text": "ft", "token": "FT" }, { "name": "mi", "type": "pure-token", + "display_text": "mi", "token": "MI" } ] @@ -5424,34 +5895,39 @@ { "name": "withcoord", "type": "pure-token", + "display_text": "withcoord", "token": "WITHCOORD", "optional": true }, { "name": "withdist", "type": "pure-token", + "display_text": "withdist", "token": "WITHDIST", "optional": true }, { "name": "withhash", "type": "pure-token", + "display_text": "withhash", "token": "WITHHASH", "optional": true }, { - "name": "count", + "name": "count-block", "type": "block", "optional": true, "arguments": [ { "name": "count", "type": "integer", + "display_text": "count", "token": "COUNT" }, { "name": "any", "type": "pure-token", + "display_text": "any", "token": "ANY", "since": "6.2.0", "optional": true @@ -5466,11 +5942,13 @@ { "name": "asc", "type": "pure-token", + "display_text": "asc", "token": "ASC" }, { "name": "desc", "type": "pure-token", + "display_text": "desc", "token": "DESC" } ] @@ -5484,10 +5962,16 @@ ] }, "GEOSEARCH": { - "summary": "Query a sorted set representing a geospatial index to fetch members inside an area of a box or a circle.", + "summary": "Queries a geospatial index for members inside an area of a box or a circle.", "since": "6.2.0", "group": "geo", "complexity": "O(N+log(M)) where N is the number of elements in the grid-aligned bounding box area around the shape provided as the filter and M is the number of items inside the shape", + "history": [ + [ + "7.0.0", + "Added support for uppercase unit names." + ] + ], "acl_categories": [ "@read", "@geo", @@ -5518,6 +6002,7 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { @@ -5527,20 +6012,23 @@ { "name": "member", "type": "string", + "display_text": "member", "token": "FROMMEMBER" }, { - "name": "longitude_latitude", + "name": "fromlonlat", "type": "block", "token": "FROMLONLAT", "arguments": [ { "name": "longitude", - "type": "double" + "type": "double", + "display_text": "longitude" }, { "name": "latitude", - "type": "double" + "type": "double", + "display_text": "latitude" } ] } @@ -5557,6 +6045,7 @@ { "name": "radius", "type": "double", + "display_text": "radius", "token": "BYRADIUS" }, { @@ -5566,21 +6055,25 @@ { "name": "m", "type": "pure-token", + "display_text": "m", "token": "M" }, { "name": "km", "type": "pure-token", + "display_text": "km", "token": "KM" }, { "name": "ft", "type": "pure-token", + "display_text": "ft", "token": "FT" }, { "name": "mi", "type": "pure-token", + "display_text": "mi", "token": "MI" } ] @@ -5594,11 +6087,13 @@ { "name": "width", "type": "double", + "display_text": "width", "token": "BYBOX" }, { "name": "height", - "type": "double" + "type": "double", + "display_text": "height" }, { "name": "unit", @@ -5607,21 +6102,25 @@ { "name": "m", "type": "pure-token", + "display_text": "m", "token": "M" }, { "name": "km", "type": "pure-token", + "display_text": "km", "token": "KM" }, { "name": "ft", "type": "pure-token", + "display_text": "ft", "token": "FT" }, { "name": "mi", "type": "pure-token", + "display_text": "mi", "token": "MI" } ] @@ -5638,28 +6137,32 @@ { "name": "asc", "type": "pure-token", + "display_text": "asc", "token": "ASC" }, { "name": "desc", "type": "pure-token", + "display_text": "desc", "token": "DESC" } ] }, { - "name": "count", + "name": "count-block", "type": "block", "optional": true, "arguments": [ { "name": "count", "type": "integer", + "display_text": "count", "token": "COUNT" }, { "name": "any", "type": "pure-token", + "display_text": "any", "token": "ANY", "optional": true } @@ -5668,18 +6171,21 @@ { "name": "withcoord", "type": "pure-token", + "display_text": "withcoord", "token": "WITHCOORD", "optional": true }, { "name": "withdist", "type": "pure-token", + "display_text": "withdist", "token": "WITHDIST", "optional": true }, { "name": "withhash", "type": "pure-token", + "display_text": "withhash", "token": "WITHHASH", "optional": true } @@ -5689,10 +6195,16 @@ ] }, "GEOSEARCHSTORE": { - "summary": "Query a sorted set representing a geospatial index to fetch members inside an area of a box or a circle, and store the result in another key.", + "summary": "Queries a geospatial index for members inside an area of a box or a circle, optionally stores the result.", "since": "6.2.0", "group": "geo", "complexity": "O(N+log(M)) where N is the number of elements in the grid-aligned bounding box area around the shape provided as the filter and M is the number of items inside the shape", + "history": [ + [ + "7.0.0", + "Added support for uppercase unit names." + ] + ], "acl_categories": [ "@write", "@geo", @@ -5741,11 +6253,13 @@ { "name": "destination", "type": "key", + "display_text": "destination", "key_spec_index": 0 }, { "name": "source", "type": "key", + "display_text": "source", "key_spec_index": 1 }, { @@ -5755,20 +6269,23 @@ { "name": "member", "type": "string", + "display_text": "member", "token": "FROMMEMBER" }, { - "name": "longitude_latitude", + "name": "fromlonlat", "type": "block", "token": "FROMLONLAT", "arguments": [ { "name": "longitude", - "type": "double" + "type": "double", + "display_text": "longitude" }, { "name": "latitude", - "type": "double" + "type": "double", + "display_text": "latitude" } ] } @@ -5785,6 +6302,7 @@ { "name": "radius", "type": "double", + "display_text": "radius", "token": "BYRADIUS" }, { @@ -5794,21 +6312,25 @@ { "name": "m", "type": "pure-token", + "display_text": "m", "token": "M" }, { "name": "km", "type": "pure-token", + "display_text": "km", "token": "KM" }, { "name": "ft", "type": "pure-token", + "display_text": "ft", "token": "FT" }, { "name": "mi", "type": "pure-token", + "display_text": "mi", "token": "MI" } ] @@ -5822,11 +6344,13 @@ { "name": "width", "type": "double", + "display_text": "width", "token": "BYBOX" }, { "name": "height", - "type": "double" + "type": "double", + "display_text": "height" }, { "name": "unit", @@ -5835,21 +6359,25 @@ { "name": "m", "type": "pure-token", + "display_text": "m", "token": "M" }, { "name": "km", "type": "pure-token", + "display_text": "km", "token": "KM" }, { "name": "ft", "type": "pure-token", + "display_text": "ft", "token": "FT" }, { "name": "mi", "type": "pure-token", + "display_text": "mi", "token": "MI" } ] @@ -5866,28 +6394,32 @@ { "name": "asc", "type": "pure-token", + "display_text": "asc", "token": "ASC" }, { "name": "desc", "type": "pure-token", + "display_text": "desc", "token": "DESC" } ] }, { - "name": "count", + "name": "count-block", "type": "block", "optional": true, "arguments": [ { "name": "count", "type": "integer", + "display_text": "count", "token": "COUNT" }, { "name": "any", "type": "pure-token", + "display_text": "any", "token": "ANY", "optional": true } @@ -5896,6 +6428,7 @@ { "name": "storedist", "type": "pure-token", + "display_text": "storedist", "token": "STOREDIST", "optional": true } @@ -5906,7 +6439,7 @@ ] }, "GET": { - "summary": "Get the value of a key", + "summary": "Returns the string value of a key.", "since": "1.0.0", "group": "string", "complexity": "O(1)", @@ -5940,6 +6473,7 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 } ], @@ -5949,7 +6483,7 @@ ] }, "GETBIT": { - "summary": "Returns the bit value at offset in the string value stored at key", + "summary": "Returns a bit value by offset.", "since": "2.2.0", "group": "bitmap", "complexity": "O(1)", @@ -5983,11 +6517,13 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "offset", - "type": "integer" + "type": "integer", + "display_text": "offset" } ], "command_flags": [ @@ -5996,7 +6532,7 @@ ] }, "GETDEL": { - "summary": "Get the value of a key and delete the key", + "summary": "Returns the string value of a key after deleting the key.", "since": "6.2.0", "group": "string", "complexity": "O(1)", @@ -6031,6 +6567,7 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 } ], @@ -6040,7 +6577,7 @@ ] }, "GETEX": { - "summary": "Get the value of a key and optionally set its expiration", + "summary": "Returns the string value of a key after setting its expiration time.", "since": "6.2.0", "group": "string", "complexity": "O(1)", @@ -6076,6 +6613,7 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { @@ -6086,26 +6624,31 @@ { "name": "seconds", "type": "integer", + "display_text": "seconds", "token": "EX" }, { "name": "milliseconds", "type": "integer", + "display_text": "milliseconds", "token": "PX" }, { "name": "unix-time-seconds", "type": "unix-time", + "display_text": "unix-time-seconds", "token": "EXAT" }, { "name": "unix-time-milliseconds", "type": "unix-time", + "display_text": "unix-time-milliseconds", "token": "PXAT" }, { "name": "persist", "type": "pure-token", + "display_text": "persist", "token": "PERSIST" } ] @@ -6117,7 +6660,7 @@ ] }, "GETRANGE": { - "summary": "Get a substring of the string stored at a key", + "summary": "Returns a substring of the string stored at a key.", "since": "2.4.0", "group": "string", "complexity": "O(N) where N is the length of the returned string. The complexity is ultimately determined by the returned length, but because creating a substring from an existing string is very cheap, it can be considered O(1) for small strings.", @@ -6151,15 +6694,18 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "start", - "type": "integer" + "type": "integer", + "display_text": "start" }, { "name": "end", - "type": "integer" + "type": "integer", + "display_text": "end" } ], "command_flags": [ @@ -6167,7 +6713,7 @@ ] }, "GETSET": { - "summary": "Set the string value of a key and return its old value", + "summary": "Returns the previous string value of a key after setting it to a new value.", "since": "1.0.0", "group": "string", "complexity": "O(1)", @@ -6204,11 +6750,13 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "value", - "type": "string" + "type": "string", + "display_text": "value" } ], "command_flags": [ @@ -6221,7 +6769,7 @@ ] }, "HDEL": { - "summary": "Delete one or more hash fields", + "summary": "Deletes one or more fields and their values from a hash. Deletes the hash if no fields remain.", "since": "2.0.0", "group": "hash", "complexity": "O(N) where N is the number of fields to be removed.", @@ -6261,11 +6809,13 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "field", "type": "string", + "display_text": "field", "multiple": true } ], @@ -6275,7 +6825,7 @@ ] }, "HELLO": { - "summary": "Handshake with Redis", + "summary": "Handshakes with the Redis server.", "since": "6.0.0", "group": "connection", "complexity": "O(1)", @@ -6298,27 +6848,31 @@ "arguments": [ { "name": "protover", - "type": "integer" + "type": "integer", + "display_text": "protover" }, { - "name": "username_password", + "name": "auth", "type": "block", "token": "AUTH", "optional": true, "arguments": [ { "name": "username", - "type": "string" + "type": "string", + "display_text": "username" }, { "name": "password", - "type": "string" + "type": "string", + "display_text": "password" } ] }, { "name": "clientname", "type": "string", + "display_text": "clientname", "token": "SETNAME", "optional": true } @@ -6335,7 +6889,7 @@ ] }, "HEXISTS": { - "summary": "Determine if a hash field exists", + "summary": "Determines whether a field exists in a hash.", "since": "2.0.0", "group": "hash", "complexity": "O(1)", @@ -6368,11 +6922,13 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "field", - "type": "string" + "type": "string", + "display_text": "field" } ], "command_flags": [ @@ -6381,7 +6937,7 @@ ] }, "HGET": { - "summary": "Get the value of a hash field", + "summary": "Returns the value of a field in a hash.", "since": "2.0.0", "group": "hash", "complexity": "O(1)", @@ -6415,11 +6971,13 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "field", - "type": "string" + "type": "string", + "display_text": "field" } ], "command_flags": [ @@ -6428,7 +6986,7 @@ ] }, "HGETALL": { - "summary": "Get all the fields and values in a hash", + "summary": "Returns all fields and values in a hash.", "since": "2.0.0", "group": "hash", "complexity": "O(N) where N is the size of the hash.", @@ -6462,6 +7020,7 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 } ], @@ -6473,7 +7032,7 @@ ] }, "HINCRBY": { - "summary": "Increment the integer value of a hash field by the given number", + "summary": "Increments the integer value of a field in a hash by a number. Uses 0 as initial value if the field doesn't exist.", "since": "2.0.0", "group": "hash", "complexity": "O(1)", @@ -6508,15 +7067,18 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "field", - "type": "string" + "type": "string", + "display_text": "field" }, { "name": "increment", - "type": "integer" + "type": "integer", + "display_text": "increment" } ], "command_flags": [ @@ -6526,7 +7088,7 @@ ] }, "HINCRBYFLOAT": { - "summary": "Increment the float value of a hash field by the given amount", + "summary": "Increments the floating point value of a field by a number. Uses 0 as initial value if the field doesn't exist.", "since": "2.6.0", "group": "hash", "complexity": "O(1)", @@ -6561,15 +7123,18 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "field", - "type": "string" + "type": "string", + "display_text": "field" }, { "name": "increment", - "type": "double" + "type": "double", + "display_text": "increment" } ], "command_flags": [ @@ -6579,7 +7144,7 @@ ] }, "HKEYS": { - "summary": "Get all the fields in a hash", + "summary": "Returns all fields in a hash.", "since": "2.0.0", "group": "hash", "complexity": "O(N) where N is the size of the hash.", @@ -6613,6 +7178,7 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 } ], @@ -6624,7 +7190,7 @@ ] }, "HLEN": { - "summary": "Get the number of fields in a hash", + "summary": "Returns the number of fields in a hash.", "since": "2.0.0", "group": "hash", "complexity": "O(1)", @@ -6657,6 +7223,7 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 } ], @@ -6666,7 +7233,7 @@ ] }, "HMGET": { - "summary": "Get the values of all the given hash fields", + "summary": "Returns the values of all fields in a hash.", "since": "2.0.0", "group": "hash", "complexity": "O(N) where N is the number of fields being requested.", @@ -6700,11 +7267,13 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "field", "type": "string", + "display_text": "field", "multiple": true } ], @@ -6714,7 +7283,7 @@ ] }, "HMSET": { - "summary": "Set multiple hash fields to multiple values", + "summary": "Sets the values of multiple fields.", "since": "2.0.0", "group": "hash", "complexity": "O(N) where N is the number of fields being set.", @@ -6750,20 +7319,23 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { - "name": "field_value", + "name": "data", "type": "block", "multiple": true, "arguments": [ { "name": "field", - "type": "string" + "type": "string", + "display_text": "field" }, { "name": "value", - "type": "string" + "type": "string", + "display_text": "value" } ] } @@ -6778,7 +7350,7 @@ ] }, "HRANDFIELD": { - "summary": "Get one or multiple random fields from a hash", + "summary": "Returns one or more random fields from a hash.", "since": "6.2.0", "group": "hash", "complexity": "O(N) where N is the number of fields returned", @@ -6812,6 +7384,7 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { @@ -6821,11 +7394,13 @@ "arguments": [ { "name": "count", - "type": "integer" + "type": "integer", + "display_text": "count" }, { "name": "withvalues", "type": "pure-token", + "display_text": "withvalues", "token": "WITHVALUES", "optional": true } @@ -6840,10 +7415,10 @@ ] }, "HSCAN": { - "summary": "Incrementally iterate hash fields and associated values", + "summary": "Iterates over fields and values of a hash.", "since": "2.8.0", "group": "hash", - "complexity": "O(1) for every call. O(N) for a complete iteration, including enough command calls for the cursor to return back to 0. N is the number of elements inside the collection..", + "complexity": "O(1) for every call. O(N) for a complete iteration, including enough command calls for the cursor to return back to 0. N is the number of elements inside the collection.", "acl_categories": [ "@read", "@hash", @@ -6874,21 +7449,25 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "cursor", - "type": "integer" + "type": "integer", + "display_text": "cursor" }, { "name": "pattern", "type": "pattern", + "display_text": "pattern", "token": "MATCH", "optional": true }, { "name": "count", "type": "integer", + "display_text": "count", "token": "COUNT", "optional": true } @@ -6901,7 +7480,7 @@ ] }, "HSET": { - "summary": "Set the string value of a hash field", + "summary": "Creates or modifies the value of a field in a hash.", "since": "2.0.0", "group": "hash", "complexity": "O(1) for each field/value pair added, so O(N) to add N field/value pairs when the command is called with multiple field/value pairs.", @@ -6941,20 +7520,23 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { - "name": "field_value", + "name": "data", "type": "block", "multiple": true, "arguments": [ { "name": "field", - "type": "string" + "type": "string", + "display_text": "field" }, { "name": "value", - "type": "string" + "type": "string", + "display_text": "value" } ] } @@ -6966,7 +7548,7 @@ ] }, "HSETNX": { - "summary": "Set the value of a hash field, only if the field does not exist", + "summary": "Sets the value of a field in a hash only when the field doesn't exist.", "since": "2.0.0", "group": "hash", "complexity": "O(1)", @@ -7000,15 +7582,18 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "field", - "type": "string" + "type": "string", + "display_text": "field" }, { "name": "value", - "type": "string" + "type": "string", + "display_text": "value" } ], "command_flags": [ @@ -7018,7 +7603,7 @@ ] }, "HSTRLEN": { - "summary": "Get the length of the value of a hash field", + "summary": "Returns the length of the value of a field.", "since": "3.2.0", "group": "hash", "complexity": "O(1)", @@ -7051,11 +7636,13 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "field", - "type": "string" + "type": "string", + "display_text": "field" } ], "command_flags": [ @@ -7064,7 +7651,7 @@ ] }, "HVALS": { - "summary": "Get all the values in a hash", + "summary": "Returns all values in a hash.", "since": "2.0.0", "group": "hash", "complexity": "O(N) where N is the size of the hash.", @@ -7098,6 +7685,7 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 } ], @@ -7109,7 +7697,7 @@ ] }, "INCR": { - "summary": "Increment the integer value of a key by one", + "summary": "Increments the integer value of a key by one. Uses 0 as initial value if the key doesn't exist.", "since": "1.0.0", "group": "string", "complexity": "O(1)", @@ -7144,6 +7732,7 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 } ], @@ -7154,7 +7743,7 @@ ] }, "INCRBY": { - "summary": "Increment the integer value of a key by the given amount", + "summary": "Increments the integer value of a key by a number. Uses 0 as initial value if the key doesn't exist.", "since": "1.0.0", "group": "string", "complexity": "O(1)", @@ -7189,11 +7778,13 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "increment", - "type": "integer" + "type": "integer", + "display_text": "increment" } ], "command_flags": [ @@ -7203,7 +7794,7 @@ ] }, "INCRBYFLOAT": { - "summary": "Increment the float value of a key by the given amount", + "summary": "Increment the floating point value of a key by a number. Uses 0 as initial value if the key doesn't exist.", "since": "2.6.0", "group": "string", "complexity": "O(1)", @@ -7238,11 +7829,13 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "increment", - "type": "double" + "type": "double", + "display_text": "increment" } ], "command_flags": [ @@ -7252,7 +7845,7 @@ ] }, "INFO": { - "summary": "Get information and statistics about the server", + "summary": "Returns information and statistics about the server.", "since": "1.0.0", "group": "server", "complexity": "O(1)", @@ -7271,6 +7864,7 @@ { "name": "section", "type": "string", + "display_text": "section", "optional": true, "multiple": true } @@ -7286,7 +7880,7 @@ ] }, "KEYS": { - "summary": "Find all keys matching the given pattern", + "summary": "Returns all key names that match a pattern.", "since": "1.0.0", "group": "generic", "complexity": "O(N) with N being the number of keys in the database, under the assumption that the key names in the database and the given pattern have limited length.", @@ -7300,7 +7894,8 @@ "arguments": [ { "name": "pattern", - "type": "pattern" + "type": "pattern", + "display_text": "pattern" } ], "command_flags": [ @@ -7312,7 +7907,7 @@ ] }, "LASTSAVE": { - "summary": "Get the UNIX time stamp of the last successful save to disk", + "summary": "Returns the Unix timestamp of the last successful save to disk.", "since": "1.0.0", "group": "server", "complexity": "O(1)", @@ -7332,7 +7927,7 @@ ] }, "LATENCY": { - "summary": "A container for latency diagnostics commands", + "summary": "A container for latency diagnostics commands.", "since": "2.8.13", "group": "server", "complexity": "Depends on subcommand.", @@ -7342,7 +7937,7 @@ "arity": -2 }, "LATENCY DOCTOR": { - "summary": "Return a human readable latency analysis report.", + "summary": "Returns a human-readable latency analysis report.", "since": "2.8.13", "group": "server", "complexity": "O(1)", @@ -7365,7 +7960,7 @@ ] }, "LATENCY GRAPH": { - "summary": "Return a latency graph for the event.", + "summary": "Returns a latency graph for an event.", "since": "2.8.13", "group": "server", "complexity": "O(1)", @@ -7378,7 +7973,8 @@ "arguments": [ { "name": "event", - "type": "string" + "type": "string", + "display_text": "event" } ], "command_flags": [ @@ -7394,7 +7990,7 @@ ] }, "LATENCY HELP": { - "summary": "Show helpful text about the different subcommands.", + "summary": "Returns helpful text about the different subcommands.", "since": "2.8.13", "group": "server", "complexity": "O(1)", @@ -7408,7 +8004,7 @@ ] }, "LATENCY HISTOGRAM": { - "summary": "Return the cumulative distribution of latencies of a subset of commands or all.", + "summary": "Returns the cumulative distribution of latencies of a subset or all commands.", "since": "7.0.0", "group": "server", "complexity": "O(N) where N is the number of commands with latency information being retrieved.", @@ -7422,6 +8018,7 @@ { "name": "command", "type": "string", + "display_text": "command", "optional": true, "multiple": true } @@ -7439,7 +8036,7 @@ ] }, "LATENCY HISTORY": { - "summary": "Return timestamp-latency samples for the event.", + "summary": "Returns timestamp-latency samples for an event.", "since": "2.8.13", "group": "server", "complexity": "O(1)", @@ -7452,7 +8049,8 @@ "arguments": [ { "name": "event", - "type": "string" + "type": "string", + "display_text": "event" } ], "command_flags": [ @@ -7468,7 +8066,7 @@ ] }, "LATENCY LATEST": { - "summary": "Return the latest latency samples for all events.", + "summary": "Returns the latest latency samples for all events.", "since": "2.8.13", "group": "server", "complexity": "O(1)", @@ -7491,7 +8089,7 @@ ] }, "LATENCY RESET": { - "summary": "Reset latency data for one or more events.", + "summary": "Resets the latency data for one or more events.", "since": "2.8.13", "group": "server", "complexity": "O(1)", @@ -7505,6 +8103,7 @@ { "name": "event", "type": "string", + "display_text": "event", "optional": true, "multiple": true } @@ -7517,11 +8116,11 @@ ], "hints": [ "request_policy:all_nodes", - "response_policy:all_succeeded" + "response_policy:agg_sum" ] }, "LCS": { - "summary": "Find longest common substring", + "summary": "Finds the longest common substring.", "since": "7.0.0", "group": "string", "complexity": "O(N*M) where N and M are the lengths of s1 and s2, respectively", @@ -7555,34 +8154,40 @@ { "name": "key1", "type": "key", + "display_text": "key1", "key_spec_index": 0 }, { "name": "key2", "type": "key", + "display_text": "key2", "key_spec_index": 0 }, { "name": "len", "type": "pure-token", + "display_text": "len", "token": "LEN", "optional": true }, { "name": "idx", "type": "pure-token", + "display_text": "idx", "token": "IDX", "optional": true }, { - "name": "len", + "name": "min-match-len", "type": "integer", + "display_text": "min-match-len", "token": "MINMATCHLEN", "optional": true }, { "name": "withmatchlen", "type": "pure-token", + "display_text": "withmatchlen", "token": "WITHMATCHLEN", "optional": true } @@ -7592,7 +8197,7 @@ ] }, "LINDEX": { - "summary": "Get an element from a list by its index", + "summary": "Returns an element from a list by its index.", "since": "1.0.0", "group": "list", "complexity": "O(N) where N is the number of elements to traverse to get to the element at index. This makes asking for the first or the last element of the list O(1).", @@ -7626,11 +8231,13 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "index", - "type": "integer" + "type": "integer", + "display_text": "index" } ], "command_flags": [ @@ -7638,7 +8245,7 @@ ] }, "LINSERT": { - "summary": "Insert an element before or after another element in a list", + "summary": "Inserts an element before or after another element in a list.", "since": "2.2.0", "group": "list", "complexity": "O(N) where N is the number of elements to traverse before seeing the value pivot. This means that inserting somewhere on the left end on the list (head) can be considered O(1) and inserting somewhere on the right end (tail) is O(N).", @@ -7672,6 +8279,7 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { @@ -7681,22 +8289,26 @@ { "name": "before", "type": "pure-token", + "display_text": "before", "token": "BEFORE" }, { "name": "after", "type": "pure-token", + "display_text": "after", "token": "AFTER" } ] }, { "name": "pivot", - "type": "string" + "type": "string", + "display_text": "pivot" }, { "name": "element", - "type": "string" + "type": "string", + "display_text": "element" } ], "command_flags": [ @@ -7705,7 +8317,7 @@ ] }, "LLEN": { - "summary": "Get the length of a list", + "summary": "Returns the length of a list.", "since": "1.0.0", "group": "list", "complexity": "O(1)", @@ -7738,6 +8350,7 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 } ], @@ -7747,7 +8360,7 @@ ] }, "LMOVE": { - "summary": "Pop an element from a list, push it to another list and return it", + "summary": "Returns an element after popping it from one list and pushing it to another. Deletes the list if the last element was moved.", "since": "6.2.0", "group": "list", "complexity": "O(1)", @@ -7800,11 +8413,13 @@ { "name": "source", "type": "key", + "display_text": "source", "key_spec_index": 0 }, { "name": "destination", "type": "key", + "display_text": "destination", "key_spec_index": 1 }, { @@ -7814,11 +8429,13 @@ { "name": "left", "type": "pure-token", + "display_text": "left", "token": "LEFT" }, { "name": "right", "type": "pure-token", + "display_text": "right", "token": "RIGHT" } ] @@ -7830,11 +8447,13 @@ { "name": "left", "type": "pure-token", + "display_text": "left", "token": "LEFT" }, { "name": "right", "type": "pure-token", + "display_text": "right", "token": "RIGHT" } ] @@ -7846,7 +8465,7 @@ ] }, "LMPOP": { - "summary": "Pop elements from a list", + "summary": "Returns multiple elements from a list after removing them. Deletes the list if the last element was popped.", "since": "7.0.0", "group": "list", "complexity": "O(N+M) where N is the number of provided keys and M is the number of elements returned.", @@ -7880,11 +8499,13 @@ "arguments": [ { "name": "numkeys", - "type": "integer" + "type": "integer", + "display_text": "numkeys" }, { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0, "multiple": true }, @@ -7895,11 +8516,13 @@ { "name": "left", "type": "pure-token", + "display_text": "left", "token": "LEFT" }, { "name": "right", "type": "pure-token", + "display_text": "right", "token": "RIGHT" } ] @@ -7907,6 +8530,7 @@ { "name": "count", "type": "integer", + "display_text": "count", "token": "COUNT", "optional": true } @@ -7917,7 +8541,7 @@ ] }, "LOLWUT": { - "summary": "Display some computer art and the Redis version", + "summary": "Displays computer art and the Redis version", "since": "5.0.0", "group": "server", "acl_categories": [ @@ -7929,6 +8553,7 @@ { "name": "version", "type": "integer", + "display_text": "version", "token": "VERSION", "optional": true } @@ -7939,7 +8564,7 @@ ] }, "LPOP": { - "summary": "Remove and get the first elements in a list", + "summary": "Returns the first elements in a list after removing it. Deletes the list if the last element was popped.", "since": "1.0.0", "group": "list", "complexity": "O(N) where N is the number of elements returned", @@ -7980,11 +8605,13 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "count", "type": "integer", + "display_text": "count", "since": "6.2.0", "optional": true } @@ -7995,7 +8622,7 @@ ] }, "LPOS": { - "summary": "Return the index of matching elements on a list", + "summary": "Returns the index of matching elements in a list.", "since": "6.0.6", "group": "list", "complexity": "O(N) where N is the number of elements in the list, for the average case. When searching for elements near the head or the tail of the list, or when the MAXLEN option is provided, the command may run in constant time.", @@ -8029,27 +8656,32 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "element", - "type": "string" + "type": "string", + "display_text": "element" }, { "name": "rank", "type": "integer", + "display_text": "rank", "token": "RANK", "optional": true }, { "name": "num-matches", "type": "integer", + "display_text": "num-matches", "token": "COUNT", "optional": true }, { "name": "len", "type": "integer", + "display_text": "len", "token": "MAXLEN", "optional": true } @@ -8059,7 +8691,7 @@ ] }, "LPUSH": { - "summary": "Prepend one or multiple elements to a list", + "summary": "Prepends one or more elements to a list. Creates the key if it doesn't exist.", "since": "1.0.0", "group": "list", "complexity": "O(1) for each element added, so O(N) to add N elements when the command is called with multiple arguments.", @@ -8099,11 +8731,13 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "element", "type": "string", + "display_text": "element", "multiple": true } ], @@ -8114,7 +8748,7 @@ ] }, "LPUSHX": { - "summary": "Prepend an element to a list, only if the list exists", + "summary": "Prepends one or more elements to a list only when the list exists.", "since": "2.2.0", "group": "list", "complexity": "O(1) for each element added, so O(N) to add N elements when the command is called with multiple arguments.", @@ -8154,11 +8788,13 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "element", "type": "string", + "display_text": "element", "multiple": true } ], @@ -8169,7 +8805,7 @@ ] }, "LRANGE": { - "summary": "Get a range of elements from a list", + "summary": "Returns a range of elements from a list.", "since": "1.0.0", "group": "list", "complexity": "O(S+N) where S is the distance of start offset from HEAD for small lists, from nearest end (HEAD or TAIL) for large lists; and N is the number of elements in the specified range.", @@ -8203,15 +8839,18 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "start", - "type": "integer" + "type": "integer", + "display_text": "start" }, { "name": "stop", - "type": "integer" + "type": "integer", + "display_text": "stop" } ], "command_flags": [ @@ -8219,7 +8858,7 @@ ] }, "LREM": { - "summary": "Remove elements from a list", + "summary": "Removes elements from a list. Deletes the list if the last element was removed.", "since": "1.0.0", "group": "list", "complexity": "O(N+M) where N is the length of the list and M is the number of elements removed.", @@ -8253,15 +8892,18 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "count", - "type": "integer" + "type": "integer", + "display_text": "count" }, { "name": "element", - "type": "string" + "type": "string", + "display_text": "element" } ], "command_flags": [ @@ -8269,7 +8911,7 @@ ] }, "LSET": { - "summary": "Set the value of an element in a list by its index", + "summary": "Sets the value of an element in a list by its index.", "since": "1.0.0", "group": "list", "complexity": "O(N) where N is the length of the list. Setting either the first or the last element of the list is O(1).", @@ -8303,15 +8945,18 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "index", - "type": "integer" + "type": "integer", + "display_text": "index" }, { "name": "element", - "type": "string" + "type": "string", + "display_text": "element" } ], "command_flags": [ @@ -8320,7 +8965,7 @@ ] }, "LTRIM": { - "summary": "Trim a list to the specified range", + "summary": "Removes elements from both ends a list. Deletes the list if all elements were trimmed.", "since": "1.0.0", "group": "list", "complexity": "O(N) where N is the number of elements to be removed by the operation.", @@ -8354,15 +8999,18 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "start", - "type": "integer" + "type": "integer", + "display_text": "start" }, { "name": "stop", - "type": "integer" + "type": "integer", + "display_text": "stop" } ], "command_flags": [ @@ -8370,7 +9018,7 @@ ] }, "MEMORY": { - "summary": "A container for memory diagnostics commands", + "summary": "A container for memory diagnostics commands.", "since": "4.0.0", "group": "server", "complexity": "Depends on subcommand.", @@ -8380,7 +9028,7 @@ "arity": -2 }, "MEMORY DOCTOR": { - "summary": "Outputs memory problems report", + "summary": "Outputs a memory problems report.", "since": "4.0.0", "group": "server", "complexity": "O(1)", @@ -8395,7 +9043,7 @@ ] }, "MEMORY HELP": { - "summary": "Show helpful text about the different subcommands", + "summary": "Returns helpful text about the different subcommands.", "since": "4.0.0", "group": "server", "complexity": "O(1)", @@ -8409,7 +9057,7 @@ ] }, "MEMORY MALLOC-STATS": { - "summary": "Show allocator internal stats", + "summary": "Returns the allocator statistics.", "since": "4.0.0", "group": "server", "complexity": "Depends on how much memory is allocated, could be slow", @@ -8424,7 +9072,7 @@ ] }, "MEMORY PURGE": { - "summary": "Ask the allocator to release memory", + "summary": "Asks the allocator to release memory.", "since": "4.0.0", "group": "server", "complexity": "Depends on how much memory is allocated, could be slow", @@ -8438,7 +9086,7 @@ ] }, "MEMORY STATS": { - "summary": "Show memory usage details", + "summary": "Returns details about memory usage.", "since": "4.0.0", "group": "server", "complexity": "O(1)", @@ -8453,7 +9101,7 @@ ] }, "MEMORY USAGE": { - "summary": "Estimate the memory usage of a key", + "summary": "Estimates the memory usage of a key.", "since": "4.0.0", "group": "server", "complexity": "O(N) where N is the number of samples.", @@ -8485,11 +9133,13 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "count", "type": "integer", + "display_text": "count", "token": "SAMPLES", "optional": true } @@ -8499,7 +9149,7 @@ ] }, "MGET": { - "summary": "Get the values of all the given keys", + "summary": "Atomically returns the string values of one or more keys.", "since": "1.0.0", "group": "string", "complexity": "O(N) where N is the number of keys to retrieve.", @@ -8533,6 +9183,7 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0, "multiple": true } @@ -8546,7 +9197,7 @@ ] }, "MIGRATE": { - "summary": "Atomically transfer a key from a Redis instance to another one.", + "summary": "Atomically transfers a key from one Redis instance to another.", "since": "2.6.0", "group": "generic", "complexity": "This command actually executes a DUMP+DEL in the source instance, and a RESTORE in the target instance. See the pages of these commands for time complexity. Also an O(N) data transfer between the two instances is performed.", @@ -8620,39 +9271,46 @@ "arguments": [ { "name": "host", - "type": "string" + "type": "string", + "display_text": "host" }, { "name": "port", - "type": "integer" + "type": "integer", + "display_text": "port" }, { - "name": "key_or_empty_string", + "name": "key-selector", "type": "oneof", "arguments": [ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { - "name": "empty_string", + "name": "empty-string", "type": "pure-token", + "display_text": "empty-string", "token": "" } ] }, { "name": "destination-db", - "type": "integer" + "type": "integer", + "display_text": "destination-db" }, { "name": "timeout", - "type": "integer" + "type": "integer", + "display_text": "timeout" }, { "name": "copy", "type": "pure-token", + "display_text": "copy", "token": "COPY", "since": "3.0.0", "optional": true @@ -8660,6 +9318,7 @@ { "name": "replace", "type": "pure-token", + "display_text": "replace", "token": "REPLACE", "since": "3.0.0", "optional": true @@ -8670,34 +9329,36 @@ "optional": true, "arguments": [ { - "name": "password", + "name": "auth", "type": "string", + "display_text": "password", "token": "AUTH", - "since": "4.0.7", - "optional": true + "since": "4.0.7" }, { - "name": "username_password", + "name": "auth2", "type": "block", "token": "AUTH2", "since": "6.0.0", - "optional": true, "arguments": [ { "name": "username", - "type": "string" + "type": "string", + "display_text": "username" }, { "name": "password", - "type": "string" + "type": "string", + "display_text": "password" } ] } ] }, { - "name": "key", + "name": "keys", "type": "key", + "display_text": "key", "key_spec_index": 1, "token": "KEYS", "since": "3.0.6", @@ -8714,7 +9375,7 @@ ] }, "MODULE": { - "summary": "A container for module commands", + "summary": "A container for module commands.", "since": "4.0.0", "group": "server", "complexity": "Depends on subcommand.", @@ -8724,7 +9385,7 @@ "arity": -2 }, "MODULE HELP": { - "summary": "Show helpful text about the different subcommands", + "summary": "Returns helpful text about the different subcommands.", "since": "5.0.0", "group": "server", "complexity": "O(1)", @@ -8738,7 +9399,7 @@ ] }, "MODULE LIST": { - "summary": "List all modules loaded by the server", + "summary": "Returns all loaded modules.", "since": "4.0.0", "group": "server", "complexity": "O(N) where N is the number of loaded modules.", @@ -8757,7 +9418,7 @@ ] }, "MODULE LOAD": { - "summary": "Load a module", + "summary": "Loads a module.", "since": "4.0.0", "group": "server", "complexity": "O(1)", @@ -8770,11 +9431,13 @@ "arguments": [ { "name": "path", - "type": "string" + "type": "string", + "display_text": "path" }, { "name": "arg", "type": "string", + "display_text": "arg", "optional": true, "multiple": true } @@ -8786,7 +9449,7 @@ ] }, "MODULE LOADEX": { - "summary": "Load a module with extended parameters", + "summary": "Loads a module using extended parameters.", "since": "7.0.0", "group": "server", "complexity": "O(1)", @@ -8799,7 +9462,8 @@ "arguments": [ { "name": "path", - "type": "string" + "type": "string", + "display_text": "path" }, { "name": "configs", @@ -8811,26 +9475,23 @@ "arguments": [ { "name": "name", - "type": "string" + "type": "string", + "display_text": "name" }, { "name": "value", - "type": "string" + "type": "string", + "display_text": "value" } ] }, { "name": "args", - "type": "block", + "type": "string", + "display_text": "args", "token": "ARGS", "optional": true, - "multiple": true, - "arguments": [ - { - "name": "arg", - "type": "string" - } - ] + "multiple": true } ], "command_flags": [ @@ -8840,7 +9501,7 @@ ] }, "MODULE UNLOAD": { - "summary": "Unload a module", + "summary": "Unloads a module.", "since": "4.0.0", "group": "server", "complexity": "O(1)", @@ -8853,7 +9514,8 @@ "arguments": [ { "name": "name", - "type": "string" + "type": "string", + "display_text": "name" } ], "command_flags": [ @@ -8863,7 +9525,7 @@ ] }, "MONITOR": { - "summary": "Listen for all requests received by the server in real time", + "summary": "Listens for all requests received by the server in real-time.", "since": "1.0.0", "group": "server", "acl_categories": [ @@ -8880,7 +9542,7 @@ ] }, "MOVE": { - "summary": "Move a key to another database", + "summary": "Moves a key to another database.", "since": "1.0.0", "group": "generic", "complexity": "O(1)", @@ -8915,11 +9577,13 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "db", - "type": "integer" + "type": "integer", + "display_text": "db" } ], "command_flags": [ @@ -8928,7 +9592,7 @@ ] }, "MSET": { - "summary": "Set multiple keys to multiple values", + "summary": "Atomically creates or modifies the string values of one or more keys.", "since": "1.0.1", "group": "string", "complexity": "O(N) where N is the number of keys to set.", @@ -8960,18 +9624,20 @@ ], "arguments": [ { - "name": "key_value", + "name": "data", "type": "block", "multiple": true, "arguments": [ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "value", - "type": "string" + "type": "string", + "display_text": "value" } ] } @@ -8986,7 +9652,7 @@ ] }, "MSETNX": { - "summary": "Set multiple keys to multiple values, only if none of the keys exist", + "summary": "Atomically modifies the string values of one or more keys only when all keys don't exist.", "since": "1.0.1", "group": "string", "complexity": "O(N) where N is the number of keys to set.", @@ -9018,18 +9684,20 @@ ], "arguments": [ { - "name": "key_value", + "name": "data", "type": "block", "multiple": true, "arguments": [ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "value", - "type": "string" + "type": "string", + "display_text": "value" } ] } @@ -9037,14 +9705,10 @@ "command_flags": [ "write", "denyoom" - ], - "hints": [ - "request_policy:multi_shard", - "response_policy:agg_min" ] }, "MULTI": { - "summary": "Mark the start of a transaction block", + "summary": "Starts a transaction.", "since": "1.2.0", "group": "transactions", "complexity": "O(1)", @@ -9062,7 +9726,7 @@ ] }, "OBJECT": { - "summary": "A container for object introspection commands", + "summary": "A container for object introspection commands.", "since": "2.2.3", "group": "generic", "complexity": "Depends on subcommand.", @@ -9072,7 +9736,7 @@ "arity": -2 }, "OBJECT ENCODING": { - "summary": "Inspect the internal encoding of a Redis object", + "summary": "Returns the internal encoding of a Redis object.", "since": "2.2.3", "group": "generic", "complexity": "O(1)", @@ -9105,6 +9769,7 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 } ], @@ -9116,7 +9781,7 @@ ] }, "OBJECT FREQ": { - "summary": "Get the logarithmic access frequency counter of a Redis object", + "summary": "Returns the logarithmic access frequency counter of a Redis object.", "since": "4.0.0", "group": "generic", "complexity": "O(1)", @@ -9149,6 +9814,7 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 } ], @@ -9160,7 +9826,7 @@ ] }, "OBJECT HELP": { - "summary": "Show helpful text about the different subcommands", + "summary": "Returns helpful text about the different subcommands.", "since": "6.2.0", "group": "generic", "complexity": "O(1)", @@ -9175,7 +9841,7 @@ ] }, "OBJECT IDLETIME": { - "summary": "Get the time since a Redis object was last accessed", + "summary": "Returns the time since the last access to a Redis object.", "since": "2.2.3", "group": "generic", "complexity": "O(1)", @@ -9208,6 +9874,7 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 } ], @@ -9219,7 +9886,7 @@ ] }, "OBJECT REFCOUNT": { - "summary": "Get the number of references to the value of the key", + "summary": "Returns the reference count of a value of a key.", "since": "2.2.3", "group": "generic", "complexity": "O(1)", @@ -9252,6 +9919,7 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 } ], @@ -9263,7 +9931,7 @@ ] }, "PERSIST": { - "summary": "Remove the expiration from a key", + "summary": "Removes the expiration time of a key.", "since": "2.2.0", "group": "generic", "complexity": "O(1)", @@ -9297,6 +9965,7 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 } ], @@ -9306,7 +9975,7 @@ ] }, "PEXPIRE": { - "summary": "Set a key's time to live in milliseconds", + "summary": "Sets the expiration time of a key in milliseconds.", "since": "2.6.0", "group": "generic", "complexity": "O(1)", @@ -9346,11 +10015,13 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "milliseconds", - "type": "integer" + "type": "integer", + "display_text": "milliseconds" }, { "name": "condition", @@ -9361,21 +10032,25 @@ { "name": "nx", "type": "pure-token", + "display_text": "nx", "token": "NX" }, { "name": "xx", "type": "pure-token", + "display_text": "xx", "token": "XX" }, { "name": "gt", "type": "pure-token", + "display_text": "gt", "token": "GT" }, { "name": "lt", "type": "pure-token", + "display_text": "lt", "token": "LT" } ] @@ -9387,7 +10062,7 @@ ] }, "PEXPIREAT": { - "summary": "Set the expiration for a key as a UNIX timestamp specified in milliseconds", + "summary": "Sets the expiration time of a key to a Unix milliseconds timestamp.", "since": "2.6.0", "group": "generic", "complexity": "O(1)", @@ -9427,11 +10102,13 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "unix-time-milliseconds", - "type": "unix-time" + "type": "unix-time", + "display_text": "unix-time-milliseconds" }, { "name": "condition", @@ -9442,21 +10119,25 @@ { "name": "nx", "type": "pure-token", + "display_text": "nx", "token": "NX" }, { "name": "xx", "type": "pure-token", + "display_text": "xx", "token": "XX" }, { "name": "gt", "type": "pure-token", + "display_text": "gt", "token": "GT" }, { "name": "lt", "type": "pure-token", + "display_text": "lt", "token": "LT" } ] @@ -9468,7 +10149,7 @@ ] }, "PEXPIRETIME": { - "summary": "Get the expiration Unix timestamp for a key in milliseconds", + "summary": "Returns the expiration time of a key as a Unix milliseconds timestamp.", "since": "7.0.0", "group": "generic", "complexity": "O(1)", @@ -9502,6 +10183,7 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 } ], @@ -9511,7 +10193,7 @@ ] }, "PFADD": { - "summary": "Adds the specified elements to the specified HyperLogLog.", + "summary": "Adds elements to a HyperLogLog key. Creates the key if it doesn't exist.", "since": "2.8.9", "group": "hyperloglog", "complexity": "O(1) to add every element.", @@ -9545,11 +10227,13 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "element", "type": "string", + "display_text": "element", "optional": true, "multiple": true } @@ -9561,7 +10245,7 @@ ] }, "PFCOUNT": { - "summary": "Return the approximated cardinality of the set(s) observed by the HyperLogLog at key(s).", + "summary": "Returns the approximated cardinality of the set(s) observed by the HyperLogLog key(s).", "since": "2.8.9", "group": "hyperloglog", "complexity": "O(1) with a very small average constant time when called with a single key. O(N) with N being the number of keys, and much bigger constant times, when called with multiple keys.", @@ -9596,6 +10280,7 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0, "multiple": true } @@ -9605,7 +10290,7 @@ ] }, "PFDEBUG": { - "summary": "Internal commands for debugging HyperLogLog values", + "summary": "Internal commands for debugging HyperLogLog values.", "since": "2.8.9", "group": "hyperloglog", "complexity": "N/A", @@ -9640,11 +10325,13 @@ "arguments": [ { "name": "subcommand", - "type": "string" + "type": "string", + "display_text": "subcommand" }, { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 } ], @@ -9658,7 +10345,7 @@ ] }, "PFMERGE": { - "summary": "Merge N different HyperLogLogs into a single one.", + "summary": "Merges one or more HyperLogLog values into a single key.", "since": "2.8.9", "group": "hyperloglog", "complexity": "O(N) to merge N HyperLogLogs, but with high constant times.", @@ -9711,12 +10398,15 @@ { "name": "destkey", "type": "key", + "display_text": "destkey", "key_spec_index": 0 }, { "name": "sourcekey", "type": "key", + "display_text": "sourcekey", "key_spec_index": 1, + "optional": true, "multiple": true } ], @@ -9726,7 +10416,7 @@ ] }, "PFSELFTEST": { - "summary": "An internal command for testing HyperLogLog values", + "summary": "An internal command for testing HyperLogLog values.", "since": "2.8.9", "group": "hyperloglog", "complexity": "N/A", @@ -9745,7 +10435,7 @@ ] }, "PING": { - "summary": "Ping the server", + "summary": "Returns the server's liveliness response.", "since": "1.0.0", "group": "connection", "complexity": "O(1)", @@ -9758,6 +10448,7 @@ { "name": "message", "type": "string", + "display_text": "message", "optional": true } ], @@ -9770,10 +10461,12 @@ ] }, "PSETEX": { - "summary": "Set the value and expiration in milliseconds of a key", + "summary": "Sets both string value and expiration time in milliseconds of a key. The key is created if it doesn't exist.", "since": "2.6.0", "group": "string", "complexity": "O(1)", + "deprecated_since": "2.6.12", + "replaced_by": "`SET` with the `PX` argument", "acl_categories": [ "@write", "@string", @@ -9804,27 +10497,33 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "milliseconds", - "type": "integer" + "type": "integer", + "display_text": "milliseconds" }, { "name": "value", - "type": "string" + "type": "string", + "display_text": "value" } ], "command_flags": [ "write", "denyoom" + ], + "doc_flags": [ + "deprecated" ] }, "PSUBSCRIBE": { - "summary": "Listen for messages published to channels matching the given patterns", + "summary": "Listens for messages published to channels that match one or more patterns.", "since": "2.0.0", "group": "pubsub", - "complexity": "O(N) where N is the number of patterns the client is already subscribed to.", + "complexity": "O(N) where N is the number of patterns to subscribe to.", "acl_categories": [ "@pubsub", "@slow" @@ -9833,14 +10532,9 @@ "arguments": [ { "name": "pattern", - "type": "block", - "multiple": true, - "arguments": [ - { - "name": "pattern", - "type": "pattern" - } - ] + "type": "pattern", + "display_text": "pattern", + "multiple": true } ], "command_flags": [ @@ -9851,7 +10545,7 @@ ] }, "PSYNC": { - "summary": "Internal command used for replication", + "summary": "An internal command used in replication.", "since": "2.8.0", "group": "server", "acl_categories": [ @@ -9863,11 +10557,13 @@ "arguments": [ { "name": "replicationid", - "type": "string" + "type": "string", + "display_text": "replicationid" }, { "name": "offset", - "type": "integer" + "type": "integer", + "display_text": "offset" } ], "command_flags": [ @@ -9878,7 +10574,7 @@ ] }, "PTTL": { - "summary": "Get the time to live for a key in milliseconds", + "summary": "Returns the expiration time in milliseconds of a key.", "since": "2.6.0", "group": "generic", "complexity": "O(1)", @@ -9918,6 +10614,7 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 } ], @@ -9930,7 +10627,7 @@ ] }, "PUBLISH": { - "summary": "Post a message to a channel", + "summary": "Posts a message to a channel.", "since": "2.0.0", "group": "pubsub", "complexity": "O(N+M) where N is the number of clients subscribed to the receiving channel and M is the total number of subscribed patterns (by any client).", @@ -9942,11 +10639,13 @@ "arguments": [ { "name": "channel", - "type": "string" + "type": "string", + "display_text": "channel" }, { "name": "message", - "type": "string" + "type": "string", + "display_text": "message" } ], "command_flags": [ @@ -9957,7 +10656,7 @@ ] }, "PUBSUB": { - "summary": "A container for Pub/Sub commands", + "summary": "A container for Pub/Sub commands.", "since": "2.8.0", "group": "pubsub", "complexity": "Depends on subcommand.", @@ -9967,7 +10666,7 @@ "arity": -2 }, "PUBSUB CHANNELS": { - "summary": "List active channels", + "summary": "Returns the active channels.", "since": "2.8.0", "group": "pubsub", "complexity": "O(N) where N is the number of active channels, and assuming constant time pattern matching (relatively short channels and patterns)", @@ -9980,6 +10679,7 @@ { "name": "pattern", "type": "pattern", + "display_text": "pattern", "optional": true } ], @@ -9990,7 +10690,7 @@ ] }, "PUBSUB HELP": { - "summary": "Show helpful text about the different subcommands", + "summary": "Returns helpful text about the different subcommands.", "since": "6.2.0", "group": "pubsub", "complexity": "O(1)", @@ -10004,7 +10704,7 @@ ] }, "PUBSUB NUMPAT": { - "summary": "Get the count of unique patterns pattern subscriptions", + "summary": "Returns a count of unique pattern subscriptions.", "since": "2.8.0", "group": "pubsub", "complexity": "O(1)", @@ -10020,7 +10720,7 @@ ] }, "PUBSUB NUMSUB": { - "summary": "Get the count of subscribers for channels", + "summary": "Returns a count of subscribers to channels.", "since": "2.8.0", "group": "pubsub", "complexity": "O(N) for the NUMSUB subcommand, where N is the number of requested channels", @@ -10033,6 +10733,7 @@ { "name": "channel", "type": "string", + "display_text": "channel", "optional": true, "multiple": true } @@ -10044,7 +10745,7 @@ ] }, "PUBSUB SHARDCHANNELS": { - "summary": "List active shard channels", + "summary": "Returns the active shard channels.", "since": "7.0.0", "group": "pubsub", "complexity": "O(N) where N is the number of active shard channels, and assuming constant time pattern matching (relatively short shard channels).", @@ -10057,6 +10758,7 @@ { "name": "pattern", "type": "pattern", + "display_text": "pattern", "optional": true } ], @@ -10067,7 +10769,7 @@ ] }, "PUBSUB SHARDNUMSUB": { - "summary": "Get the count of subscribers for shard channels", + "summary": "Returns the count of subscribers of shard channels.", "since": "7.0.0", "group": "pubsub", "complexity": "O(N) for the SHARDNUMSUB subcommand, where N is the number of requested shard channels", @@ -10080,6 +10782,7 @@ { "name": "shardchannel", "type": "string", + "display_text": "shardchannel", "optional": true, "multiple": true } @@ -10091,10 +10794,10 @@ ] }, "PUNSUBSCRIBE": { - "summary": "Stop listening for messages posted to channels matching the given patterns", + "summary": "Stops listening to messages published to channels that match one or more patterns.", "since": "2.0.0", "group": "pubsub", - "complexity": "O(N+M) where N is the number of patterns the client is already subscribed and M is the number of total patterns subscribed in the system (by any client).", + "complexity": "O(N) where N is the number of patterns to unsubscribe.", "acl_categories": [ "@pubsub", "@slow" @@ -10104,6 +10807,7 @@ { "name": "pattern", "type": "pattern", + "display_text": "pattern", "optional": true, "multiple": true } @@ -10116,10 +10820,12 @@ ] }, "QUIT": { - "summary": "Close the connection", + "summary": "Closes the connection.", "since": "1.0.0", "group": "connection", "complexity": "O(1)", + "deprecated_since": "7.2.0", + "replaced_by": "just closing the connection", "acl_categories": [ "@fast", "@connection" @@ -10132,10 +10838,13 @@ "fast", "no_auth", "allow_busy" + ], + "doc_flags": [ + "deprecated" ] }, "RANDOMKEY": { - "summary": "Return a random key from the keyspace", + "summary": "Returns a random key name from the database.", "since": "1.0.0", "group": "generic", "complexity": "O(1)", @@ -10150,11 +10859,12 @@ ], "hints": [ "request_policy:all_shards", + "response_policy:special", "nondeterministic_output" ] }, "READONLY": { - "summary": "Enables read queries for a connection to a cluster replica node", + "summary": "Enables read-only queries for a connection to a Redis Cluster replica node.", "since": "3.0.0", "group": "cluster", "complexity": "O(1)", @@ -10170,7 +10880,7 @@ ] }, "READWRITE": { - "summary": "Disables read queries for a connection to a cluster replica node", + "summary": "Enables read-write queries for a connection to a Reids Cluster replica node.", "since": "3.0.0", "group": "cluster", "complexity": "O(1)", @@ -10186,7 +10896,7 @@ ] }, "RENAME": { - "summary": "Rename a key", + "summary": "Renames a key and overwrites the destination.", "since": "1.0.0", "group": "generic", "complexity": "O(1)", @@ -10239,11 +10949,13 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "newkey", "type": "key", + "display_text": "newkey", "key_spec_index": 1 } ], @@ -10252,7 +10964,7 @@ ] }, "RENAMENX": { - "summary": "Rename a key, only if the new key does not exist", + "summary": "Renames a key only when the target key name doesn't exist.", "since": "1.0.0", "group": "generic", "complexity": "O(1)", @@ -10311,11 +11023,13 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "newkey", "type": "key", + "display_text": "newkey", "key_spec_index": 1 } ], @@ -10325,7 +11039,7 @@ ] }, "REPLCONF": { - "summary": "An internal command for configuring the replication stream", + "summary": "An internal command for configuring the replication stream.", "since": "3.0.0", "group": "server", "complexity": "O(1)", @@ -10347,7 +11061,7 @@ ] }, "REPLICAOF": { - "summary": "Make the server a replica of another instance, or promote it as master.", + "summary": "Configures a server as replica of another, or promotes it to a master.", "since": "5.0.0", "group": "server", "complexity": "O(1)", @@ -10359,12 +11073,44 @@ "arity": 3, "arguments": [ { - "name": "host", - "type": "string" - }, - { - "name": "port", - "type": "integer" + "name": "args", + "type": "oneof", + "arguments": [ + { + "name": "host-port", + "type": "block", + "arguments": [ + { + "name": "host", + "type": "string", + "display_text": "host" + }, + { + "name": "port", + "type": "integer", + "display_text": "port" + } + ] + }, + { + "name": "no-one", + "type": "block", + "arguments": [ + { + "name": "no", + "type": "pure-token", + "display_text": "no", + "token": "NO" + }, + { + "name": "one", + "type": "pure-token", + "display_text": "one", + "token": "ONE" + } + ] + } + ] } ], "command_flags": [ @@ -10375,7 +11121,7 @@ ] }, "RESET": { - "summary": "Reset the connection", + "summary": "Resets the connection.", "since": "6.2.0", "group": "connection", "complexity": "O(1)", @@ -10394,7 +11140,7 @@ ] }, "RESTORE": { - "summary": "Create a key using the provided serialized value, previously obtained using DUMP.", + "summary": "Creates a key from the serialized representation of a value.", "since": "2.6.0", "group": "generic", "complexity": "O(1) to create the new key and additional O(N*M) to reconstruct the serialized value, where N is the number of Redis objects composing the value and M their average size. For small string values the time complexity is thus O(1)+O(1*M) where M is small, so simply O(1). However for sorted set values the complexity is O(N*M*log(N)) because inserting values into sorted sets is O(log(N)).", @@ -10443,19 +11189,23 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "ttl", - "type": "integer" + "type": "integer", + "display_text": "ttl" }, { "name": "serialized-value", - "type": "string" + "type": "string", + "display_text": "serialized-value" }, { "name": "replace", "type": "pure-token", + "display_text": "replace", "token": "REPLACE", "since": "3.0.0", "optional": true @@ -10463,6 +11213,7 @@ { "name": "absttl", "type": "pure-token", + "display_text": "absttl", "token": "ABSTTL", "since": "5.0.0", "optional": true @@ -10470,6 +11221,7 @@ { "name": "seconds", "type": "integer", + "display_text": "seconds", "token": "IDLETIME", "since": "5.0.0", "optional": true @@ -10477,6 +11229,7 @@ { "name": "frequency", "type": "integer", + "display_text": "frequency", "token": "FREQ", "since": "5.0.0", "optional": true @@ -10488,7 +11241,7 @@ ] }, "RESTORE-ASKING": { - "summary": "An internal command for migrating keys in a cluster", + "summary": "An internal command for migrating keys in a cluster.", "since": "3.0.0", "group": "server", "complexity": "O(1) to create the new key and additional O(N*M) to reconstruct the serialized value, where N is the number of Redis objects composing the value and M their average size. For small string values the time complexity is thus O(1)+O(1*M) where M is small, so simply O(1). However for sorted set values the complexity is O(N*M*log(N)) because inserting values into sorted sets is O(log(N)).", @@ -10537,19 +11290,23 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "ttl", - "type": "integer" + "type": "integer", + "display_text": "ttl" }, { "name": "serialized-value", - "type": "string" + "type": "string", + "display_text": "serialized-value" }, { "name": "replace", "type": "pure-token", + "display_text": "replace", "token": "REPLACE", "since": "3.0.0", "optional": true @@ -10557,6 +11314,7 @@ { "name": "absttl", "type": "pure-token", + "display_text": "absttl", "token": "ABSTTL", "since": "5.0.0", "optional": true @@ -10564,6 +11322,7 @@ { "name": "seconds", "type": "integer", + "display_text": "seconds", "token": "IDLETIME", "since": "5.0.0", "optional": true @@ -10571,6 +11330,7 @@ { "name": "frequency", "type": "integer", + "display_text": "frequency", "token": "FREQ", "since": "5.0.0", "optional": true @@ -10586,7 +11346,7 @@ ] }, "ROLE": { - "summary": "Return the role of the instance in the context of replication", + "summary": "Returns the replication role.", "since": "2.8.12", "group": "server", "complexity": "O(1)", @@ -10604,7 +11364,7 @@ ] }, "RPOP": { - "summary": "Remove and get the last elements in a list", + "summary": "Returns and removes the last elements of a list. Deletes the list if the last element was popped.", "since": "1.0.0", "group": "list", "complexity": "O(N) where N is the number of elements returned", @@ -10645,11 +11405,13 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "count", "type": "integer", + "display_text": "count", "since": "6.2.0", "optional": true } @@ -10660,7 +11422,7 @@ ] }, "RPOPLPUSH": { - "summary": "Remove the last element in a list, prepend it to another list and return it", + "summary": "Returns the last element of a list after removing and pushing it to another list. Deletes the list if the last element was popped.", "since": "1.2.0", "group": "list", "complexity": "O(1)", @@ -10715,11 +11477,13 @@ { "name": "source", "type": "key", + "display_text": "source", "key_spec_index": 0 }, { "name": "destination", "type": "key", + "display_text": "destination", "key_spec_index": 1 } ], @@ -10732,7 +11496,7 @@ ] }, "RPUSH": { - "summary": "Append one or multiple elements to a list", + "summary": "Appends one or more elements to a list. Creates the key if it doesn't exist.", "since": "1.0.0", "group": "list", "complexity": "O(1) for each element added, so O(N) to add N elements when the command is called with multiple arguments.", @@ -10772,11 +11536,13 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "element", "type": "string", + "display_text": "element", "multiple": true } ], @@ -10787,7 +11553,7 @@ ] }, "RPUSHX": { - "summary": "Append an element to a list, only if the list exists", + "summary": "Appends an element to a list only when the list exists.", "since": "2.2.0", "group": "list", "complexity": "O(1) for each element added, so O(N) to add N elements when the command is called with multiple arguments.", @@ -10827,11 +11593,13 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "element", "type": "string", + "display_text": "element", "multiple": true } ], @@ -10842,7 +11610,7 @@ ] }, "SADD": { - "summary": "Add one or more members to a set", + "summary": "Adds one or more members to a set. Creates the key if it doesn't exist.", "since": "1.0.0", "group": "set", "complexity": "O(1) for each element added, so O(N) to add N elements when the command is called with multiple arguments.", @@ -10882,11 +11650,13 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "member", "type": "string", + "display_text": "member", "multiple": true } ], @@ -10897,7 +11667,7 @@ ] }, "SAVE": { - "summary": "Synchronously save the dataset to disk", + "summary": "Synchronously saves the database(s) to disk.", "since": "1.0.0", "group": "server", "complexity": "O(N) where N is the total number of keys in all databases", @@ -10915,7 +11685,7 @@ ] }, "SCAN": { - "summary": "Incrementally iterate the keys space", + "summary": "Iterates over the key names in the database.", "since": "2.8.0", "group": "generic", "complexity": "O(1) for every call. O(N) for a complete iteration, including enough command calls for the cursor to return back to 0. N is the number of elements inside the collection.", @@ -10934,23 +11704,27 @@ "arguments": [ { "name": "cursor", - "type": "integer" + "type": "integer", + "display_text": "cursor" }, { "name": "pattern", "type": "pattern", + "display_text": "pattern", "token": "MATCH", "optional": true }, { "name": "count", "type": "integer", + "display_text": "count", "token": "COUNT", "optional": true }, { "name": "type", "type": "string", + "display_text": "type", "token": "TYPE", "since": "6.0.0", "optional": true @@ -10961,11 +11735,12 @@ ], "hints": [ "nondeterministic_output", - "request_policy:special" + "request_policy:special", + "response_policy:special" ] }, "SCARD": { - "summary": "Get the number of members in a set", + "summary": "Returns the number of members in a set.", "since": "1.0.0", "group": "set", "complexity": "O(1)", @@ -10998,6 +11773,7 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 } ], @@ -11007,7 +11783,7 @@ ] }, "SCRIPT": { - "summary": "A container for Lua scripts management commands", + "summary": "A container for Lua scripts management commands.", "since": "2.6.0", "group": "scripting", "complexity": "Depends on subcommand.", @@ -11017,7 +11793,7 @@ "arity": -2 }, "SCRIPT DEBUG": { - "summary": "Set the debug mode for executed scripts.", + "summary": "Sets the debug mode of server-side Lua scripts.", "since": "3.2.0", "group": "scripting", "complexity": "O(1)", @@ -11034,16 +11810,19 @@ { "name": "yes", "type": "pure-token", + "display_text": "yes", "token": "YES" }, { "name": "sync", "type": "pure-token", + "display_text": "sync", "token": "SYNC" }, { "name": "no", "type": "pure-token", + "display_text": "no", "token": "NO" } ] @@ -11054,7 +11833,7 @@ ] }, "SCRIPT EXISTS": { - "summary": "Check existence of scripts in the script cache.", + "summary": "Determines whether server-side Lua scripts exist in the script cache.", "since": "2.6.0", "group": "scripting", "complexity": "O(N) with N being the number of scripts to check (so checking a single script is an O(1) operation).", @@ -11067,6 +11846,7 @@ { "name": "sha1", "type": "string", + "display_text": "sha1", "multiple": true } ], @@ -11079,7 +11859,7 @@ ] }, "SCRIPT FLUSH": { - "summary": "Remove all the scripts from the script cache.", + "summary": "Removes all server-side Lua scripts from the script cache.", "since": "2.6.0", "group": "scripting", "complexity": "O(N) with N being the number of scripts in cache", @@ -11096,7 +11876,7 @@ "arity": -2, "arguments": [ { - "name": "async", + "name": "flush-type", "type": "oneof", "since": "6.2.0", "optional": true, @@ -11104,11 +11884,13 @@ { "name": "async", "type": "pure-token", + "display_text": "async", "token": "ASYNC" }, { "name": "sync", "type": "pure-token", + "display_text": "sync", "token": "SYNC" } ] @@ -11123,7 +11905,7 @@ ] }, "SCRIPT HELP": { - "summary": "Show helpful text about the different subcommands", + "summary": "Returns helpful text about the different subcommands.", "since": "5.0.0", "group": "scripting", "complexity": "O(1)", @@ -11138,7 +11920,7 @@ ] }, "SCRIPT KILL": { - "summary": "Kill the script currently in execution.", + "summary": "Terminates a server-side Lua script during execution.", "since": "2.6.0", "group": "scripting", "complexity": "O(1)", @@ -11157,7 +11939,7 @@ ] }, "SCRIPT LOAD": { - "summary": "Load the specified Lua script into the script cache.", + "summary": "Loads a server-side Lua script to the script cache.", "since": "2.6.0", "group": "scripting", "complexity": "O(N) with N being the length in bytes of the script body.", @@ -11169,7 +11951,8 @@ "arguments": [ { "name": "script", - "type": "string" + "type": "string", + "display_text": "script" } ], "command_flags": [ @@ -11182,7 +11965,7 @@ ] }, "SDIFF": { - "summary": "Subtract multiple sets", + "summary": "Returns the difference of multiple sets.", "since": "1.0.0", "group": "set", "complexity": "O(N) where N is the total number of elements in all given sets.", @@ -11216,6 +11999,7 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0, "multiple": true } @@ -11228,7 +12012,7 @@ ] }, "SDIFFSTORE": { - "summary": "Subtract multiple sets and store the resulting set in a key", + "summary": "Stores the difference of multiple sets in a key.", "since": "1.0.0", "group": "set", "complexity": "O(N) where N is the total number of elements in all given sets.", @@ -11280,11 +12064,13 @@ { "name": "destination", "type": "key", + "display_text": "destination", "key_spec_index": 0 }, { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 1, "multiple": true } @@ -11295,7 +12081,7 @@ ] }, "SELECT": { - "summary": "Change the selected database for the current connection", + "summary": "Changes the selected database.", "since": "1.0.0", "group": "connection", "complexity": "O(1)", @@ -11307,7 +12093,8 @@ "arguments": [ { "name": "index", - "type": "integer" + "type": "integer", + "display_text": "index" } ], "command_flags": [ @@ -11317,7 +12104,7 @@ ] }, "SET": { - "summary": "Set the string value of a key", + "summary": "Sets the string value of a key, ignoring its type. The key is created if it doesn't exist.", "since": "1.0.0", "group": "string", "complexity": "O(1)", @@ -11372,11 +12159,13 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "value", - "type": "string" + "type": "string", + "display_text": "value" }, { "name": "condition", @@ -11387,11 +12176,13 @@ { "name": "nx", "type": "pure-token", + "display_text": "nx", "token": "NX" }, { "name": "xx", "type": "pure-token", + "display_text": "xx", "token": "XX" } ] @@ -11399,6 +12190,7 @@ { "name": "get", "type": "pure-token", + "display_text": "get", "token": "GET", "since": "6.2.0", "optional": true @@ -11411,30 +12203,35 @@ { "name": "seconds", "type": "integer", + "display_text": "seconds", "token": "EX", "since": "2.6.12" }, { "name": "milliseconds", "type": "integer", + "display_text": "milliseconds", "token": "PX", "since": "2.6.12" }, { "name": "unix-time-seconds", "type": "unix-time", + "display_text": "unix-time-seconds", "token": "EXAT", "since": "6.2.0" }, { "name": "unix-time-milliseconds", "type": "unix-time", + "display_text": "unix-time-milliseconds", "token": "PXAT", "since": "6.2.0" }, { "name": "keepttl", "type": "pure-token", + "display_text": "keepttl", "token": "KEEPTTL", "since": "6.0.0" } @@ -11447,7 +12244,7 @@ ] }, "SETBIT": { - "summary": "Sets or clears the bit at offset in the string value stored at key", + "summary": "Sets or clears the bit at offset of the string value. Creates the key if it doesn't exist.", "since": "2.2.0", "group": "bitmap", "complexity": "O(1)", @@ -11482,15 +12279,18 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "offset", - "type": "integer" + "type": "integer", + "display_text": "offset" }, { "name": "value", - "type": "integer" + "type": "integer", + "display_text": "value" } ], "command_flags": [ @@ -11499,10 +12299,12 @@ ] }, "SETEX": { - "summary": "Set the value and expiration of a key", + "summary": "Sets the string value and expiration time of a key. Creates the key if it doesn't exist.", "since": "2.0.0", "group": "string", "complexity": "O(1)", + "deprecated_since": "2.6.12", + "replaced_by": "`SET` with the `EX` argument", "acl_categories": [ "@write", "@string", @@ -11533,27 +12335,35 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "seconds", - "type": "integer" + "type": "integer", + "display_text": "seconds" }, { "name": "value", - "type": "string" + "type": "string", + "display_text": "value" } ], "command_flags": [ "write", "denyoom" + ], + "doc_flags": [ + "deprecated" ] }, "SETNX": { - "summary": "Set the value of a key, only if the key does not exist", + "summary": "Set the string value of a key only when the key doesn't exist.", "since": "1.0.0", "group": "string", "complexity": "O(1)", + "deprecated_since": "2.6.12", + "replaced_by": "`SET` with the `NX` argument", "acl_categories": [ "@write", "@string", @@ -11584,21 +12394,26 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "value", - "type": "string" + "type": "string", + "display_text": "value" } ], "command_flags": [ "write", "denyoom", "fast" + ], + "doc_flags": [ + "deprecated" ] }, "SETRANGE": { - "summary": "Overwrite part of a string at key starting at the specified offset", + "summary": "Overwrites a part of a string value with another by an offset. Creates the key if it doesn't exist.", "since": "2.2.0", "group": "string", "complexity": "O(1), not counting the time taken to copy the new string in place. Usually, this string is very small so the amortized complexity is O(1). Otherwise, complexity is O(M) with M being the length of the value argument.", @@ -11632,15 +12447,18 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "offset", - "type": "integer" + "type": "integer", + "display_text": "offset" }, { "name": "value", - "type": "string" + "type": "string", + "display_text": "value" } ], "command_flags": [ @@ -11649,7 +12467,7 @@ ] }, "SHUTDOWN": { - "summary": "Synchronously save the dataset to disk and then shut down the server", + "summary": "Synchronously saves the database(s) to disk and shuts down the Redis server.", "since": "1.0.0", "group": "server", "complexity": "O(N) when saving, where N is the total number of keys in all databases when saving data, otherwise O(1)", @@ -11667,18 +12485,20 @@ "arity": -1, "arguments": [ { - "name": "nosave_save", + "name": "save-selector", "type": "oneof", "optional": true, "arguments": [ { "name": "nosave", "type": "pure-token", + "display_text": "nosave", "token": "NOSAVE" }, { "name": "save", "type": "pure-token", + "display_text": "save", "token": "SAVE" } ] @@ -11686,6 +12506,7 @@ { "name": "now", "type": "pure-token", + "display_text": "now", "token": "NOW", "since": "7.0.0", "optional": true @@ -11693,6 +12514,7 @@ { "name": "force", "type": "pure-token", + "display_text": "force", "token": "FORCE", "since": "7.0.0", "optional": true @@ -11700,6 +12522,7 @@ { "name": "abort", "type": "pure-token", + "display_text": "abort", "token": "ABORT", "since": "7.0.0", "optional": true @@ -11715,7 +12538,7 @@ ] }, "SINTER": { - "summary": "Intersect multiple sets", + "summary": "Returns the intersect of multiple sets.", "since": "1.0.0", "group": "set", "complexity": "O(N*M) worst case where N is the cardinality of the smallest set and M is the number of sets.", @@ -11749,6 +12572,7 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0, "multiple": true } @@ -11761,7 +12585,7 @@ ] }, "SINTERCARD": { - "summary": "Intersect multiple sets and return the cardinality of the result", + "summary": "Returns the number of members of the intersect of multiple sets.", "since": "7.0.0", "group": "set", "complexity": "O(N*M) worst case where N is the cardinality of the smallest set and M is the number of sets.", @@ -11794,17 +12618,20 @@ "arguments": [ { "name": "numkeys", - "type": "integer" + "type": "integer", + "display_text": "numkeys" }, { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0, "multiple": true }, { "name": "limit", "type": "integer", + "display_text": "limit", "token": "LIMIT", "optional": true } @@ -11815,7 +12642,7 @@ ] }, "SINTERSTORE": { - "summary": "Intersect multiple sets and store the resulting set in a key", + "summary": "Stores the intersect of multiple sets in a key.", "since": "1.0.0", "group": "set", "complexity": "O(N*M) worst case where N is the cardinality of the smallest set and M is the number of sets.", @@ -11867,11 +12694,13 @@ { "name": "destination", "type": "key", + "display_text": "destination", "key_spec_index": 0 }, { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 1, "multiple": true } @@ -11882,7 +12711,7 @@ ] }, "SISMEMBER": { - "summary": "Determine if a given value is a member of a set", + "summary": "Determines whether a member belongs to a set.", "since": "1.0.0", "group": "set", "complexity": "O(1)", @@ -11915,11 +12744,13 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "member", - "type": "string" + "type": "string", + "display_text": "member" } ], "command_flags": [ @@ -11928,7 +12759,7 @@ ] }, "SLAVEOF": { - "summary": "Make the server a replica of another instance, or promote it as master.", + "summary": "Sets a Redis server as a replica of another, or promotes it to being a master.", "since": "1.0.0", "group": "server", "complexity": "O(1)", @@ -11942,12 +12773,44 @@ "arity": 3, "arguments": [ { - "name": "host", - "type": "string" - }, - { - "name": "port", - "type": "integer" + "name": "args", + "type": "oneof", + "arguments": [ + { + "name": "host-port", + "type": "block", + "arguments": [ + { + "name": "host", + "type": "string", + "display_text": "host" + }, + { + "name": "port", + "type": "integer", + "display_text": "port" + } + ] + }, + { + "name": "no-one", + "type": "block", + "arguments": [ + { + "name": "no", + "type": "pure-token", + "display_text": "no", + "token": "NO" + }, + { + "name": "one", + "type": "pure-token", + "display_text": "one", + "token": "ONE" + } + ] + } + ] } ], "command_flags": [ @@ -11961,7 +12824,7 @@ ] }, "SLOWLOG": { - "summary": "A container for slow log commands", + "summary": "A container for slow log commands.", "since": "2.2.12", "group": "server", "complexity": "Depends on subcommand.", @@ -11971,7 +12834,7 @@ "arity": -2 }, "SLOWLOG GET": { - "summary": "Get the slow log's entries", + "summary": "Returns the slow log's entries.", "since": "2.2.12", "group": "server", "complexity": "O(N) where N is the number of entries returned", @@ -11991,6 +12854,7 @@ { "name": "count", "type": "integer", + "display_text": "count", "optional": true } ], @@ -12019,7 +12883,7 @@ ] }, "SLOWLOG LEN": { - "summary": "Get the slow log's length", + "summary": "Returns the number of entries in the slow log.", "since": "2.2.12", "group": "server", "complexity": "O(1)", @@ -12041,7 +12905,7 @@ ] }, "SLOWLOG RESET": { - "summary": "Clear all entries from the slow log", + "summary": "Clears all entries from the slow log.", "since": "2.2.12", "group": "server", "complexity": "O(N) where N is the number of entries in the slowlog", @@ -12062,7 +12926,7 @@ ] }, "SMEMBERS": { - "summary": "Get all the members in a set", + "summary": "Returns all members of a set.", "since": "1.0.0", "group": "set", "complexity": "O(N) where N is the set cardinality.", @@ -12096,6 +12960,7 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 } ], @@ -12107,7 +12972,7 @@ ] }, "SMISMEMBER": { - "summary": "Returns the membership associated with the given elements for a set", + "summary": "Determines whether multiple members belong to a set.", "since": "6.2.0", "group": "set", "complexity": "O(N) where N is the number of elements being checked for membership", @@ -12141,11 +13006,13 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "member", "type": "string", + "display_text": "member", "multiple": true } ], @@ -12155,7 +13022,7 @@ ] }, "SMOVE": { - "summary": "Move a member from one set to another", + "summary": "Moves a member from one set to another.", "since": "1.0.0", "group": "set", "complexity": "O(1)", @@ -12208,16 +13075,19 @@ { "name": "source", "type": "key", + "display_text": "source", "key_spec_index": 0 }, { "name": "destination", "type": "key", + "display_text": "destination", "key_spec_index": 1 }, { "name": "member", - "type": "string" + "type": "string", + "display_text": "member" } ], "command_flags": [ @@ -12226,7 +13096,7 @@ ] }, "SORT": { - "summary": "Sort the elements in a list, set or sorted set", + "summary": "Sorts the elements in a list, a set, or a sorted set, optionally storing the result.", "since": "1.0.0", "group": "generic", "complexity": "O(N+M*log(M)) where N is the number of elements in the list or set to sort, and M the number of returned elements. When the elements are not sorted, complexity is O(N).", @@ -12289,34 +13159,39 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { - "name": "pattern", + "name": "by-pattern", "type": "pattern", + "display_text": "pattern", "key_spec_index": 1, "token": "BY", "optional": true }, { - "name": "offset_count", + "name": "limit", "type": "block", "token": "LIMIT", "optional": true, "arguments": [ { "name": "offset", - "type": "integer" + "type": "integer", + "display_text": "offset" }, { "name": "count", - "type": "integer" + "type": "integer", + "display_text": "count" } ] }, { - "name": "pattern", + "name": "get-pattern", "type": "pattern", + "display_text": "pattern", "key_spec_index": 1, "token": "GET", "optional": true, @@ -12331,11 +13206,13 @@ { "name": "asc", "type": "pure-token", + "display_text": "asc", "token": "ASC" }, { "name": "desc", "type": "pure-token", + "display_text": "desc", "token": "DESC" } ] @@ -12343,12 +13220,14 @@ { "name": "sorting", "type": "pure-token", + "display_text": "sorting", "token": "ALPHA", "optional": true }, { "name": "destination", "type": "key", + "display_text": "destination", "key_spec_index": 2, "token": "STORE", "optional": true @@ -12361,7 +13240,7 @@ ] }, "SORT_RO": { - "summary": "Sort the elements in a list, set or sorted set. Read-only variant of SORT.", + "summary": "Returns the sorted elements of a list, a set, or a sorted set.", "since": "7.0.0", "group": "generic", "complexity": "O(N+M*log(M)) where N is the number of elements in the list or set to sort, and M the number of returned elements. When the elements are not sorted, complexity is O(N).", @@ -12411,34 +13290,39 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { - "name": "pattern", + "name": "by-pattern", "type": "pattern", + "display_text": "pattern", "key_spec_index": 1, "token": "BY", "optional": true }, { - "name": "offset_count", + "name": "limit", "type": "block", "token": "LIMIT", "optional": true, "arguments": [ { "name": "offset", - "type": "integer" + "type": "integer", + "display_text": "offset" }, { "name": "count", - "type": "integer" + "type": "integer", + "display_text": "count" } ] }, { - "name": "pattern", + "name": "get-pattern", "type": "pattern", + "display_text": "pattern", "key_spec_index": 1, "token": "GET", "optional": true, @@ -12453,11 +13337,13 @@ { "name": "asc", "type": "pure-token", + "display_text": "asc", "token": "ASC" }, { "name": "desc", "type": "pure-token", + "display_text": "desc", "token": "DESC" } ] @@ -12465,6 +13351,7 @@ { "name": "sorting", "type": "pure-token", + "display_text": "sorting", "token": "ALPHA", "optional": true } @@ -12475,7 +13362,7 @@ ] }, "SPOP": { - "summary": "Remove and return one or multiple random members from a set", + "summary": "Returns one or more random members from a set after removing them. Deletes the set if the last member was popped.", "since": "1.0.0", "group": "set", "complexity": "Without the count argument O(1), otherwise O(N) where N is the value of the passed count.", @@ -12516,11 +13403,13 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "count", "type": "integer", + "display_text": "count", "since": "3.2.0", "optional": true } @@ -12565,11 +13454,13 @@ "arguments": [ { "name": "shardchannel", - "type": "string" + "type": "string", + "display_text": "shardchannel" }, { "name": "message", - "type": "string" + "type": "string", + "display_text": "message" } ], "command_flags": [ @@ -12620,11 +13511,13 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "count", "type": "integer", + "display_text": "count", "since": "2.6.0", "optional": true } @@ -12637,7 +13530,7 @@ ] }, "SREM": { - "summary": "Remove one or more members from a set", + "summary": "Removes one or more members from a set. Deletes the set if the last member was removed.", "since": "1.0.0", "group": "set", "complexity": "O(N) where N is the number of members to be removed.", @@ -12677,11 +13570,13 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "member", "type": "string", + "display_text": "member", "multiple": true } ], @@ -12691,10 +13586,10 @@ ] }, "SSCAN": { - "summary": "Incrementally iterate Set elements", + "summary": "Iterates over members of a set.", "since": "2.8.0", "group": "set", - "complexity": "O(1) for every call. O(N) for a complete iteration, including enough command calls for the cursor to return back to 0. N is the number of elements inside the collection..", + "complexity": "O(1) for every call. O(N) for a complete iteration, including enough command calls for the cursor to return back to 0. N is the number of elements inside the collection.", "acl_categories": [ "@read", "@set", @@ -12725,21 +13620,25 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "cursor", - "type": "integer" + "type": "integer", + "display_text": "cursor" }, { "name": "pattern", "type": "pattern", + "display_text": "pattern", "token": "MATCH", "optional": true }, { "name": "count", "type": "integer", + "display_text": "count", "token": "COUNT", "optional": true } @@ -12752,7 +13651,7 @@ ] }, "SSUBSCRIBE": { - "summary": "Listen for messages published to the given shard channels", + "summary": "Listens for messages published to shard channels.", "since": "7.0.0", "group": "pubsub", "complexity": "O(N) where N is the number of shard channels to subscribe to.", @@ -12784,6 +13683,7 @@ { "name": "shardchannel", "type": "string", + "display_text": "shardchannel", "multiple": true } ], @@ -12795,7 +13695,7 @@ ] }, "STRLEN": { - "summary": "Get the length of the value stored in a key", + "summary": "Returns the length of a string value.", "since": "2.2.0", "group": "string", "complexity": "O(1)", @@ -12828,6 +13728,7 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 } ], @@ -12837,7 +13738,7 @@ ] }, "SUBSCRIBE": { - "summary": "Listen for messages published to the given channels", + "summary": "Listens for messages published to channels.", "since": "2.0.0", "group": "pubsub", "complexity": "O(N) where N is the number of channels to subscribe to.", @@ -12850,6 +13751,7 @@ { "name": "channel", "type": "string", + "display_text": "channel", "multiple": true } ], @@ -12861,7 +13763,7 @@ ] }, "SUBSTR": { - "summary": "Get a substring of the string stored at a key", + "summary": "Returns a substring from a string value.", "since": "1.0.0", "group": "string", "complexity": "O(N) where N is the length of the returned string. The complexity is ultimately determined by the returned length, but because creating a substring from an existing string is very cheap, it can be considered O(1) for small strings.", @@ -12897,15 +13799,18 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "start", - "type": "integer" + "type": "integer", + "display_text": "start" }, { "name": "end", - "type": "integer" + "type": "integer", + "display_text": "end" } ], "command_flags": [ @@ -12916,7 +13821,7 @@ ] }, "SUNION": { - "summary": "Add multiple sets", + "summary": "Returns the union of multiple sets.", "since": "1.0.0", "group": "set", "complexity": "O(N) where N is the total number of elements in all given sets.", @@ -12950,6 +13855,7 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0, "multiple": true } @@ -12962,7 +13868,7 @@ ] }, "SUNIONSTORE": { - "summary": "Add multiple sets and store the resulting set in a key", + "summary": "Stores the union of multiple sets in a key.", "since": "1.0.0", "group": "set", "complexity": "O(N) where N is the total number of elements in all given sets.", @@ -13014,11 +13920,13 @@ { "name": "destination", "type": "key", + "display_text": "destination", "key_spec_index": 0 }, { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 1, "multiple": true } @@ -13029,10 +13937,10 @@ ] }, "SUNSUBSCRIBE": { - "summary": "Stop listening for messages posted to the given shard channels", + "summary": "Stops listening to messages posted to shard channels.", "since": "7.0.0", "group": "pubsub", - "complexity": "O(N) where N is the number of clients already subscribed to a shard channel.", + "complexity": "O(N) where N is the number of shard channels to unsubscribe.", "acl_categories": [ "@pubsub", "@slow" @@ -13061,6 +13969,7 @@ { "name": "shardchannel", "type": "string", + "display_text": "shardchannel", "optional": true, "multiple": true } @@ -13073,7 +13982,7 @@ ] }, "SWAPDB": { - "summary": "Swaps two Redis databases", + "summary": "Swaps two Redis databases.", "since": "4.0.0", "group": "server", "complexity": "O(N) where N is the count of clients watching or blocking on keys from both databases.", @@ -13087,11 +13996,13 @@ "arguments": [ { "name": "index1", - "type": "integer" + "type": "integer", + "display_text": "index1" }, { "name": "index2", - "type": "integer" + "type": "integer", + "display_text": "index2" } ], "command_flags": [ @@ -13100,7 +14011,7 @@ ] }, "SYNC": { - "summary": "Internal command used for replication", + "summary": "An internal command used in replication.", "since": "1.0.0", "group": "server", "acl_categories": [ @@ -13117,7 +14028,7 @@ ] }, "TIME": { - "summary": "Return the current server time", + "summary": "Returns the server time.", "since": "2.6.0", "group": "server", "complexity": "O(1)", @@ -13135,7 +14046,7 @@ ] }, "TOUCH": { - "summary": "Alters the last access time of a key(s). Returns the number of existing keys specified.", + "summary": "Returns the number of existing keys out of those specified after updating the time they were last accessed.", "since": "3.2.1", "group": "generic", "complexity": "O(N) where N is the number of keys that will be touched.", @@ -13168,6 +14079,7 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0, "multiple": true } @@ -13182,7 +14094,7 @@ ] }, "TTL": { - "summary": "Get the time to live for a key in seconds", + "summary": "Returns the expiration time in seconds of a key.", "since": "1.0.0", "group": "generic", "complexity": "O(1)", @@ -13222,6 +14134,7 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 } ], @@ -13234,7 +14147,7 @@ ] }, "TYPE": { - "summary": "Determine the type stored at key", + "summary": "Determines the type of value stored at a key.", "since": "1.0.0", "group": "generic", "complexity": "O(1)", @@ -13267,6 +14180,7 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 } ], @@ -13276,7 +14190,7 @@ ] }, "UNLINK": { - "summary": "Delete a key asynchronously in another thread. Otherwise it is just as DEL, but non blocking.", + "summary": "Asynchronously deletes one or more keys.", "since": "4.0.0", "group": "generic", "complexity": "O(1) for each key removed regardless of its size. Then the command does O(N) work in a different thread in order to reclaim memory, where N is the number of allocations the deleted objects where composed of.", @@ -13310,6 +14224,7 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0, "multiple": true } @@ -13324,10 +14239,10 @@ ] }, "UNSUBSCRIBE": { - "summary": "Stop listening for messages posted to the given channels", + "summary": "Stops listening to messages posted to channels.", "since": "2.0.0", "group": "pubsub", - "complexity": "O(N) where N is the number of clients already subscribed to a channel.", + "complexity": "O(N) where N is the number of channels to unsubscribe.", "acl_categories": [ "@pubsub", "@slow" @@ -13337,6 +14252,7 @@ { "name": "channel", "type": "string", + "display_text": "channel", "optional": true, "multiple": true } @@ -13349,7 +14265,7 @@ ] }, "UNWATCH": { - "summary": "Forget about all watched keys", + "summary": "Forgets about watched keys of a transaction.", "since": "2.2.0", "group": "transactions", "complexity": "O(1)", @@ -13367,7 +14283,7 @@ ] }, "WAIT": { - "summary": "Wait for the synchronous replication of all the write commands sent in the context of the current connection", + "summary": "Blocks until the asynchronous replication of all preceding write commands sent by the connection is completed.", "since": "3.0.0", "group": "generic", "complexity": "O(1)", @@ -13379,11 +14295,45 @@ "arguments": [ { "name": "numreplicas", - "type": "integer" + "type": "integer", + "display_text": "numreplicas" + }, + { + "name": "timeout", + "type": "integer", + "display_text": "timeout" + } + ], + "hints": [ + "request_policy:all_shards", + "response_policy:agg_min" + ] + }, + "WAITAOF": { + "summary": "Blocks until all of the preceding write commands sent by the connection are written to the append-only file of the master and/or replicas.", + "since": "7.2.0", + "group": "generic", + "complexity": "O(1)", + "acl_categories": [ + "@slow", + "@connection" + ], + "arity": 4, + "arguments": [ + { + "name": "numlocal", + "type": "integer", + "display_text": "numlocal" + }, + { + "name": "numreplicas", + "type": "integer", + "display_text": "numreplicas" }, { "name": "timeout", - "type": "integer" + "type": "integer", + "display_text": "timeout" } ], "command_flags": [ @@ -13395,7 +14345,7 @@ ] }, "WATCH": { - "summary": "Watch the given keys to determine execution of the MULTI/EXEC block", + "summary": "Monitors changes to keys to determine the execution of a transaction.", "since": "2.2.0", "group": "transactions", "complexity": "O(1) for every key.", @@ -13427,6 +14377,7 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0, "multiple": true } @@ -13440,7 +14391,7 @@ ] }, "XACK": { - "summary": "Marks a pending message as correctly processed, effectively removing it from the pending entries list of the consumer group. Return value of the command is the number of messages successfully acknowledged, that is, the IDs we were actually able to resolve in the PEL.", + "summary": "Returns the number of messages that were successfully acknowledged by the consumer group member of a stream.", "since": "5.0.0", "group": "stream", "complexity": "O(1) for each message ID processed.", @@ -13474,15 +14425,18 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "group", - "type": "string" + "type": "string", + "display_text": "group" }, { "name": "id", "type": "string", + "display_text": "id", "multiple": true } ], @@ -13492,7 +14446,7 @@ ] }, "XADD": { - "summary": "Appends a new entry to a stream", + "summary": "Appends a new message to a stream. Creates the key if it doesn't exist.", "since": "5.0.0", "group": "stream", "complexity": "O(1) when adding a new entry, O(N) when trimming where N being the number of entries evicted.", @@ -13537,11 +14491,13 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "nomkstream", "type": "pure-token", + "display_text": "nomkstream", "token": "NOMKSTREAM", "since": "6.2.0", "optional": true @@ -13558,11 +14514,13 @@ { "name": "maxlen", "type": "pure-token", + "display_text": "maxlen", "token": "MAXLEN" }, { "name": "minid", "type": "pure-token", + "display_text": "minid", "token": "MINID", "since": "6.2.0" } @@ -13576,22 +14534,26 @@ { "name": "equal", "type": "pure-token", + "display_text": "equal", "token": "=" }, { "name": "approximately", "type": "pure-token", + "display_text": "approximately", "token": "~" } ] }, { "name": "threshold", - "type": "string" + "type": "string", + "display_text": "threshold" }, { "name": "count", "type": "integer", + "display_text": "count", "token": "LIMIT", "since": "6.2.0", "optional": true @@ -13599,32 +14561,36 @@ ] }, { - "name": "id_or_auto", + "name": "id-selector", "type": "oneof", "arguments": [ { - "name": "auto_id", + "name": "auto-id", "type": "pure-token", + "display_text": "auto-id", "token": "*" }, { "name": "id", - "type": "string" + "type": "string", + "display_text": "id" } ] }, { - "name": "field_value", + "name": "data", "type": "block", "multiple": true, "arguments": [ { "name": "field", - "type": "string" + "type": "string", + "display_text": "field" }, { "name": "value", - "type": "string" + "type": "string", + "display_text": "value" } ] } @@ -13639,7 +14605,7 @@ ] }, "XAUTOCLAIM": { - "summary": "Changes (or acquires) ownership of messages in a consumer group, as if the messages were delivered to the specified consumer.", + "summary": "Changes, or acquires, ownership of messages in a consumer group, as if the messages were delivered to as consumer group member.", "since": "6.2.0", "group": "stream", "complexity": "O(1) if COUNT is small.", @@ -13679,33 +14645,40 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "group", - "type": "string" + "type": "string", + "display_text": "group" }, { "name": "consumer", - "type": "string" + "type": "string", + "display_text": "consumer" }, { "name": "min-idle-time", - "type": "string" + "type": "string", + "display_text": "min-idle-time" }, { "name": "start", - "type": "string" + "type": "string", + "display_text": "start" }, { "name": "count", "type": "integer", + "display_text": "count", "token": "COUNT", "optional": true }, { "name": "justid", "type": "pure-token", + "display_text": "justid", "token": "JUSTID", "optional": true } @@ -13719,7 +14692,7 @@ ] }, "XCLAIM": { - "summary": "Changes (or acquires) ownership of a message in a consumer group, as if the message was delivered to the specified consumer.", + "summary": "Changes, or acquires, ownership of a message in a consumer group, as if the message was delivered a consumer group member.", "since": "5.0.0", "group": "stream", "complexity": "O(log N) with N being the number of messages in the PEL of the consumer group.", @@ -13753,54 +14726,71 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "group", - "type": "string" + "type": "string", + "display_text": "group" }, { "name": "consumer", - "type": "string" + "type": "string", + "display_text": "consumer" }, { "name": "min-idle-time", - "type": "string" + "type": "string", + "display_text": "min-idle-time" }, { "name": "id", "type": "string", + "display_text": "id", "multiple": true }, { "name": "ms", "type": "integer", + "display_text": "ms", "token": "IDLE", "optional": true }, { "name": "unix-time-milliseconds", "type": "unix-time", + "display_text": "unix-time-milliseconds", "token": "TIME", "optional": true }, { "name": "count", "type": "integer", + "display_text": "count", "token": "RETRYCOUNT", "optional": true }, { "name": "force", "type": "pure-token", + "display_text": "force", "token": "FORCE", "optional": true }, { "name": "justid", "type": "pure-token", + "display_text": "justid", "token": "JUSTID", "optional": true + }, + { + "name": "lastid", + "type": "string", + "display_text": "lastid", + "token": "LASTID", + "optional": true } ], "command_flags": [ @@ -13812,7 +14802,7 @@ ] }, "XDEL": { - "summary": "Removes the specified entries from the stream. Returns the number of items actually deleted, that may be different from the number of IDs passed in case certain IDs do not exist.", + "summary": "Returns the number of messages after removing them from a stream.", "since": "5.0.0", "group": "stream", "complexity": "O(1) for each single item to delete in the stream, regardless of the stream size.", @@ -13846,11 +14836,13 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "id", "type": "string", + "display_text": "id", "multiple": true } ], @@ -13860,7 +14852,7 @@ ] }, "XGROUP": { - "summary": "A container for consumer groups commands", + "summary": "A container for consumer groups commands.", "since": "5.0.0", "group": "stream", "complexity": "Depends on subcommand.", @@ -13870,7 +14862,7 @@ "arity": -2 }, "XGROUP CREATE": { - "summary": "Create a consumer group.", + "summary": "Creates a consumer group.", "since": "5.0.0", "group": "stream", "complexity": "O(1)", @@ -13910,23 +14902,27 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { - "name": "groupname", - "type": "string" + "name": "group", + "type": "string", + "display_text": "group" }, { - "name": "id", + "name": "id-selector", "type": "oneof", "arguments": [ { "name": "id", - "type": "string" + "type": "string", + "display_text": "id" }, { - "name": "new_id", + "name": "new-id", "type": "pure-token", + "display_text": "new-id", "token": "$" } ] @@ -13934,12 +14930,14 @@ { "name": "mkstream", "type": "pure-token", + "display_text": "mkstream", "token": "MKSTREAM", "optional": true }, { - "name": "entries_read", + "name": "entries-read", "type": "integer", + "display_text": "entries-read", "token": "ENTRIESREAD", "optional": true } @@ -13950,7 +14948,7 @@ ] }, "XGROUP CREATECONSUMER": { - "summary": "Create a consumer in a consumer group.", + "summary": "Creates a consumer in a consumer group.", "since": "6.2.0", "group": "stream", "complexity": "O(1)", @@ -13984,15 +14982,18 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { - "name": "groupname", - "type": "string" + "name": "group", + "type": "string", + "display_text": "group" }, { - "name": "consumername", - "type": "string" + "name": "consumer", + "type": "string", + "display_text": "consumer" } ], "command_flags": [ @@ -14001,7 +15002,7 @@ ] }, "XGROUP DELCONSUMER": { - "summary": "Delete a consumer from a consumer group.", + "summary": "Deletes a consumer from a consumer group.", "since": "5.0.0", "group": "stream", "complexity": "O(1)", @@ -14035,15 +15036,18 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { - "name": "groupname", - "type": "string" + "name": "group", + "type": "string", + "display_text": "group" }, { - "name": "consumername", - "type": "string" + "name": "consumer", + "type": "string", + "display_text": "consumer" } ], "command_flags": [ @@ -14051,7 +15055,7 @@ ] }, "XGROUP DESTROY": { - "summary": "Destroy a consumer group.", + "summary": "Destroys a consumer group.", "since": "5.0.0", "group": "stream", "complexity": "O(N) where N is the number of entries in the group's pending entries list (PEL).", @@ -14085,11 +15089,13 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { - "name": "groupname", - "type": "string" + "name": "group", + "type": "string", + "display_text": "group" } ], "command_flags": [ @@ -14097,7 +15103,7 @@ ] }, "XGROUP HELP": { - "summary": "Show helpful text about the different subcommands", + "summary": "Returns helpful text about the different subcommands.", "since": "5.0.0", "group": "stream", "complexity": "O(1)", @@ -14112,7 +15118,7 @@ ] }, "XGROUP SETID": { - "summary": "Set a consumer group to an arbitrary last delivered ID value.", + "summary": "Sets the last-delivered ID of a consumer group.", "since": "5.0.0", "group": "stream", "complexity": "O(1)", @@ -14152,30 +15158,35 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { - "name": "groupname", - "type": "string" + "name": "group", + "type": "string", + "display_text": "group" }, { - "name": "id", + "name": "id-selector", "type": "oneof", "arguments": [ { "name": "id", - "type": "string" + "type": "string", + "display_text": "id" }, { - "name": "new_id", + "name": "new-id", "type": "pure-token", + "display_text": "new-id", "token": "$" } ] }, { - "name": "entries_read", + "name": "entriesread", "type": "integer", + "display_text": "entries-read", "token": "ENTRIESREAD", "optional": true } @@ -14185,7 +15196,7 @@ ] }, "XINFO": { - "summary": "A container for stream introspection commands", + "summary": "A container for stream introspection commands.", "since": "5.0.0", "group": "stream", "complexity": "Depends on subcommand.", @@ -14195,10 +15206,16 @@ "arity": -2 }, "XINFO CONSUMERS": { - "summary": "List the consumers in a consumer group", + "summary": "Returns a list of the consumers in a consumer group.", "since": "5.0.0", "group": "stream", "complexity": "O(1)", + "history": [ + [ + "7.2.0", + "Added the `inactive` field." + ] + ], "acl_categories": [ "@read", "@stream", @@ -14229,11 +15246,13 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { - "name": "groupname", - "type": "string" + "name": "group", + "type": "string", + "display_text": "group" } ], "command_flags": [ @@ -14244,7 +15263,7 @@ ] }, "XINFO GROUPS": { - "summary": "List the consumer groups of a stream", + "summary": "Returns a list of the consumer groups of a stream.", "since": "5.0.0", "group": "stream", "complexity": "O(1)", @@ -14284,6 +15303,7 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 } ], @@ -14292,7 +15312,7 @@ ] }, "XINFO HELP": { - "summary": "Show helpful text about the different subcommands", + "summary": "Returns helpful text about the different subcommands.", "since": "5.0.0", "group": "stream", "complexity": "O(1)", @@ -14307,7 +15327,7 @@ ] }, "XINFO STREAM": { - "summary": "Get information about a stream", + "summary": "Returns information about a stream.", "since": "5.0.0", "group": "stream", "complexity": "O(1)", @@ -14319,6 +15339,10 @@ [ "7.0.0", "Added the `max-deleted-entry-id`, `entries-added`, `recorded-first-entry-id`, `entries-read` and `lag` fields" + ], + [ + "7.2.0", + "Added the `active-time` field, and changed the meaning of `seen-time`." ] ], "acl_categories": [ @@ -14351,17 +15375,24 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { - "name": "full", + "name": "full-block", "type": "block", - "token": "FULL", "optional": true, "arguments": [ + { + "name": "full", + "type": "pure-token", + "display_text": "full", + "token": "FULL" + }, { "name": "count", "type": "integer", + "display_text": "count", "token": "COUNT", "optional": true } @@ -14373,7 +15404,7 @@ ] }, "XLEN": { - "summary": "Return the number of entries in a stream", + "summary": "Return the number of messages in a stream.", "since": "5.0.0", "group": "stream", "complexity": "O(1)", @@ -14406,6 +15437,7 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 } ], @@ -14415,7 +15447,7 @@ ] }, "XPENDING": { - "summary": "Return information and entries from a stream consumer group pending entries list, that are messages fetched but never acknowledged.", + "summary": "Returns the information and entries from a stream consumer group's pending entries list.", "since": "5.0.0", "group": "stream", "complexity": "O(N) with N being the number of elements returned, so asking for a small fixed number of entries per call is O(1). O(M), where M is the total number of entries scanned when used with the IDLE filter. When the command returns just the summary and the list of consumers is small, it runs in O(1) time; otherwise, an additional O(N) time for iterating every consumer.", @@ -14455,11 +15487,13 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "group", - "type": "string" + "type": "string", + "display_text": "group" }, { "name": "filters", @@ -14469,25 +15503,30 @@ { "name": "min-idle-time", "type": "integer", + "display_text": "min-idle-time", "token": "IDLE", "since": "6.2.0", "optional": true }, { "name": "start", - "type": "string" + "type": "string", + "display_text": "start" }, { "name": "end", - "type": "string" + "type": "string", + "display_text": "end" }, { "name": "count", - "type": "integer" + "type": "integer", + "display_text": "count" }, { "name": "consumer", "type": "string", + "display_text": "consumer", "optional": true } ] @@ -14501,7 +15540,7 @@ ] }, "XRANGE": { - "summary": "Return a range of elements in a stream, with IDs matching the specified IDs interval", + "summary": "Returns the messages from a stream within a range of IDs.", "since": "5.0.0", "group": "stream", "complexity": "O(N) with N being the number of elements being returned. If N is constant (e.g. always asking for the first 10 elements with COUNT), you can consider it O(1).", @@ -14541,19 +15580,23 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "start", - "type": "string" + "type": "string", + "display_text": "start" }, { "name": "end", - "type": "string" + "type": "string", + "display_text": "end" }, { "name": "count", "type": "integer", + "display_text": "count", "token": "COUNT", "optional": true } @@ -14563,10 +15606,9 @@ ] }, "XREAD": { - "summary": "Return never seen elements in multiple streams, with IDs greater than the ones reported by the caller for each stream. Can block.", + "summary": "Returns messages from multiple streams with IDs greater than the ones requested. Blocks until a message is available otherwise.", "since": "5.0.0", "group": "stream", - "complexity": "For each stream mentioned: O(N) with N being the number of elements being returned, it means that XREAD-ing with a fixed COUNT is O(1). Note that when the BLOCK option is used, XADD will pay O(M) time in order to serve the M clients blocked on the stream getting new data.", "acl_categories": [ "@read", "@stream", @@ -14599,12 +15641,14 @@ { "name": "count", "type": "integer", + "display_text": "count", "token": "COUNT", "optional": true }, { "name": "milliseconds", "type": "integer", + "display_text": "milliseconds", "token": "BLOCK", "optional": true }, @@ -14616,12 +15660,14 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0, "multiple": true }, { "name": "id", "type": "string", + "display_text": "id", "multiple": true } ] @@ -14634,7 +15680,7 @@ ] }, "XREADGROUP": { - "summary": "Return new entries from a stream using a consumer group, or access the history of the pending entries for a given consumer. Can block.", + "summary": "Returns new or historical messages from a stream for a consumer in a group. Blocks until a message is available otherwise.", "since": "5.0.0", "group": "stream", "complexity": "For each stream mentioned: O(M) with M being the number of elements returned. If M is constant (e.g. always asking for the first 10 elements with COUNT), you can consider it O(1). On the other side when XREADGROUP blocks, XADD will pay the O(N) time in order to serve the N clients blocked on the stream getting new data.", @@ -14668,35 +15714,40 @@ ], "arguments": [ { - "name": "group_consumer", + "name": "group-block", "type": "block", "token": "GROUP", "arguments": [ { "name": "group", - "type": "string" + "type": "string", + "display_text": "group" }, { "name": "consumer", - "type": "string" + "type": "string", + "display_text": "consumer" } ] }, { "name": "count", "type": "integer", + "display_text": "count", "token": "COUNT", "optional": true }, { "name": "milliseconds", "type": "integer", + "display_text": "milliseconds", "token": "BLOCK", "optional": true }, { "name": "noack", "type": "pure-token", + "display_text": "noack", "token": "NOACK", "optional": true }, @@ -14708,12 +15759,14 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0, "multiple": true }, { "name": "id", "type": "string", + "display_text": "id", "multiple": true } ] @@ -14726,7 +15779,7 @@ ] }, "XREVRANGE": { - "summary": "Return a range of elements in a stream, with IDs matching the specified IDs interval, in reverse order (from greater to smaller IDs) compared to XRANGE", + "summary": "Returns the messages from a stream within a range of IDs in reverse order.", "since": "5.0.0", "group": "stream", "complexity": "O(N) with N being the number of elements returned. If N is constant (e.g. always asking for the first 10 elements with COUNT), you can consider it O(1).", @@ -14766,19 +15819,23 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "end", - "type": "string" + "type": "string", + "display_text": "end" }, { "name": "start", - "type": "string" + "type": "string", + "display_text": "start" }, { "name": "count", "type": "integer", + "display_text": "count", "token": "COUNT", "optional": true } @@ -14788,7 +15845,7 @@ ] }, "XSETID": { - "summary": "An internal command for replicating stream values", + "summary": "An internal command for replicating stream values.", "since": "5.0.0", "group": "stream", "complexity": "O(1)", @@ -14828,22 +15885,28 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "last-id", - "type": "string" + "type": "string", + "display_text": "last-id" }, { - "name": "entries_added", + "name": "entries-added", "type": "integer", + "display_text": "entries-added", "token": "ENTRIESADDED", + "since": "7.0.0", "optional": true }, { - "name": "max_deleted_entry_id", + "name": "max-deleted-id", "type": "string", + "display_text": "max-deleted-id", "token": "MAXDELETEDID", + "since": "7.0.0", "optional": true } ], @@ -14854,7 +15917,7 @@ ] }, "XTRIM": { - "summary": "Trims the stream to (approximately if '~' is passed) a certain size", + "summary": "Deletes messages from the beginning of a stream.", "since": "5.0.0", "group": "stream", "complexity": "O(N), with N being the number of evicted entries. Constant times are very small however, since entries are organized in macro nodes containing multiple entries that can be released with a single deallocation.", @@ -14894,6 +15957,7 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { @@ -14907,11 +15971,13 @@ { "name": "maxlen", "type": "pure-token", + "display_text": "maxlen", "token": "MAXLEN" }, { "name": "minid", "type": "pure-token", + "display_text": "minid", "token": "MINID", "since": "6.2.0" } @@ -14925,22 +15991,26 @@ { "name": "equal", "type": "pure-token", + "display_text": "equal", "token": "=" }, { "name": "approximately", "type": "pure-token", + "display_text": "approximately", "token": "~" } ] }, { "name": "threshold", - "type": "string" + "type": "string", + "display_text": "threshold" }, { "name": "count", "type": "integer", + "display_text": "count", "token": "LIMIT", "since": "6.2.0", "optional": true @@ -14956,7 +16026,7 @@ ] }, "ZADD": { - "summary": "Add one or more members to a sorted set, or update its score if it already exists", + "summary": "Adds one or more members to a sorted set, or updates their scores. Creates the key if it doesn't exist.", "since": "1.2.0", "group": "sorted-set", "complexity": "O(log(N)) for each item added, where N is the number of elements in the sorted set.", @@ -15004,6 +16074,7 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { @@ -15015,11 +16086,13 @@ { "name": "nx", "type": "pure-token", + "display_text": "nx", "token": "NX" }, { "name": "xx", "type": "pure-token", + "display_text": "xx", "token": "XX" } ] @@ -15033,11 +16106,13 @@ { "name": "gt", "type": "pure-token", + "display_text": "gt", "token": "GT" }, { "name": "lt", "type": "pure-token", + "display_text": "lt", "token": "LT" } ] @@ -15045,6 +16120,7 @@ { "name": "change", "type": "pure-token", + "display_text": "change", "token": "CH", "since": "3.0.2", "optional": true @@ -15052,22 +16128,25 @@ { "name": "increment", "type": "pure-token", + "display_text": "increment", "token": "INCR", "since": "3.0.2", "optional": true }, { - "name": "score_member", + "name": "data", "type": "block", "multiple": true, "arguments": [ { "name": "score", - "type": "double" + "type": "double", + "display_text": "score" }, { "name": "member", - "type": "string" + "type": "string", + "display_text": "member" } ] } @@ -15079,7 +16158,7 @@ ] }, "ZCARD": { - "summary": "Get the number of members in a sorted set", + "summary": "Returns the number of members in a sorted set.", "since": "1.2.0", "group": "sorted-set", "complexity": "O(1)", @@ -15112,6 +16191,7 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 } ], @@ -15121,7 +16201,7 @@ ] }, "ZCOUNT": { - "summary": "Count the members in a sorted set with scores within the given values", + "summary": "Returns the count of members in a sorted set that have scores within a range.", "since": "2.0.0", "group": "sorted-set", "complexity": "O(log(N)) with N being the number of elements in the sorted set.", @@ -15155,15 +16235,18 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "min", - "type": "double" + "type": "double", + "display_text": "min" }, { "name": "max", - "type": "double" + "type": "double", + "display_text": "max" } ], "command_flags": [ @@ -15172,7 +16255,7 @@ ] }, "ZDIFF": { - "summary": "Subtract multiple sorted sets", + "summary": "Returns the difference between multiple sorted sets.", "since": "6.2.0", "group": "sorted-set", "complexity": "O(L + (N-K)log(N)) worst case where L is the total number of elements in all the sets, N is the size of the first set, and K is the size of the result set.", @@ -15205,17 +16288,20 @@ "arguments": [ { "name": "numkeys", - "type": "integer" + "type": "integer", + "display_text": "numkeys" }, { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0, "multiple": true }, { "name": "withscores", "type": "pure-token", + "display_text": "withscores", "token": "WITHSCORES", "optional": true } @@ -15226,7 +16312,7 @@ ] }, "ZDIFFSTORE": { - "summary": "Subtract multiple sorted sets and store the resulting sorted set in a new key", + "summary": "Stores the difference of multiple sorted sets in a key.", "since": "6.2.0", "group": "sorted-set", "complexity": "O(L + (N-K)log(N)) worst case where L is the total number of elements in all the sets, N is the size of the first set, and K is the size of the result set.", @@ -15278,15 +16364,18 @@ { "name": "destination", "type": "key", + "display_text": "destination", "key_spec_index": 0 }, { "name": "numkeys", - "type": "integer" + "type": "integer", + "display_text": "numkeys" }, { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 1, "multiple": true } @@ -15298,7 +16387,7 @@ ] }, "ZINCRBY": { - "summary": "Increment the score of a member in a sorted set", + "summary": "Increments the score of a member in a sorted set.", "since": "1.2.0", "group": "sorted-set", "complexity": "O(log(N)) where N is the number of elements in the sorted set.", @@ -15333,15 +16422,18 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "increment", - "type": "integer" + "type": "integer", + "display_text": "increment" }, { "name": "member", - "type": "string" + "type": "string", + "display_text": "member" } ], "command_flags": [ @@ -15351,7 +16443,7 @@ ] }, "ZINTER": { - "summary": "Intersect multiple sorted sets", + "summary": "Returns the intersect of multiple sorted sets.", "since": "6.2.0", "group": "sorted-set", "complexity": "O(N*K)+O(M*log(M)) worst case with N being the smallest input sorted set, K being the number of input sorted sets and M being the number of elements in the resulting sorted set.", @@ -15384,17 +16476,20 @@ "arguments": [ { "name": "numkeys", - "type": "integer" + "type": "integer", + "display_text": "numkeys" }, { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0, "multiple": true }, { "name": "weight", "type": "integer", + "display_text": "weight", "token": "WEIGHTS", "optional": true, "multiple": true @@ -15408,16 +16503,19 @@ { "name": "sum", "type": "pure-token", + "display_text": "sum", "token": "SUM" }, { "name": "min", "type": "pure-token", + "display_text": "min", "token": "MIN" }, { "name": "max", "type": "pure-token", + "display_text": "max", "token": "MAX" } ] @@ -15425,6 +16523,7 @@ { "name": "withscores", "type": "pure-token", + "display_text": "withscores", "token": "WITHSCORES", "optional": true } @@ -15435,7 +16534,7 @@ ] }, "ZINTERCARD": { - "summary": "Intersect multiple sorted sets and return the cardinality of the result", + "summary": "Returns the number of members of the intersect of multiple sorted sets.", "since": "7.0.0", "group": "sorted-set", "complexity": "O(N*K) worst case with N being the smallest input sorted set, K being the number of input sorted sets.", @@ -15468,17 +16567,20 @@ "arguments": [ { "name": "numkeys", - "type": "integer" + "type": "integer", + "display_text": "numkeys" }, { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0, "multiple": true }, { "name": "limit", "type": "integer", + "display_text": "limit", "token": "LIMIT", "optional": true } @@ -15489,7 +16591,7 @@ ] }, "ZINTERSTORE": { - "summary": "Intersect multiple sorted sets and store the resulting sorted set in a new key", + "summary": "Stores the intersect of multiple sorted sets in a key.", "since": "2.0.0", "group": "sorted-set", "complexity": "O(N*K)+O(M*log(M)) worst case with N being the smallest input sorted set, K being the number of input sorted sets and M being the number of elements in the resulting sorted set.", @@ -15541,21 +16643,25 @@ { "name": "destination", "type": "key", + "display_text": "destination", "key_spec_index": 0 }, { "name": "numkeys", - "type": "integer" + "type": "integer", + "display_text": "numkeys" }, { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 1, "multiple": true }, { "name": "weight", "type": "integer", + "display_text": "weight", "token": "WEIGHTS", "optional": true, "multiple": true @@ -15569,16 +16675,19 @@ { "name": "sum", "type": "pure-token", + "display_text": "sum", "token": "SUM" }, { "name": "min", "type": "pure-token", + "display_text": "min", "token": "MIN" }, { "name": "max", "type": "pure-token", + "display_text": "max", "token": "MAX" } ] @@ -15591,7 +16700,7 @@ ] }, "ZLEXCOUNT": { - "summary": "Count the number of members in a sorted set between a given lexicographical range", + "summary": "Returns the number of members in a sorted set within a lexicographical range.", "since": "2.8.9", "group": "sorted-set", "complexity": "O(log(N)) with N being the number of elements in the sorted set.", @@ -15625,15 +16734,18 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "min", - "type": "string" + "type": "string", + "display_text": "min" }, { "name": "max", - "type": "string" + "type": "string", + "display_text": "max" } ], "command_flags": [ @@ -15642,10 +16754,10 @@ ] }, "ZMPOP": { - "summary": "Remove and return members with scores in a sorted set", + "summary": "Returns the highest- or lowest-scoring members from one or more sorted sets after removing them. Deletes the sorted set if the last member was popped.", "since": "7.0.0", "group": "sorted-set", - "complexity": "O(K) + O(N*log(M)) where K is the number of provided keys, N being the number of elements in the sorted set, and M being the number of elements popped.", + "complexity": "O(K) + O(M*log(N)) where K is the number of provided keys, N being the number of elements in the sorted set, and M being the number of elements popped.", "acl_categories": [ "@write", "@sortedset", @@ -15676,11 +16788,13 @@ "arguments": [ { "name": "numkeys", - "type": "integer" + "type": "integer", + "display_text": "numkeys" }, { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0, "multiple": true }, @@ -15691,11 +16805,13 @@ { "name": "min", "type": "pure-token", + "display_text": "min", "token": "MIN" }, { "name": "max", "type": "pure-token", + "display_text": "max", "token": "MAX" } ] @@ -15703,6 +16819,7 @@ { "name": "count", "type": "integer", + "display_text": "count", "token": "COUNT", "optional": true } @@ -15713,7 +16830,7 @@ ] }, "ZMSCORE": { - "summary": "Get the score associated with the given members in a sorted set", + "summary": "Returns the score of one or more members in a sorted set.", "since": "6.2.0", "group": "sorted-set", "complexity": "O(N) where N is the number of members being requested.", @@ -15747,11 +16864,13 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "member", "type": "string", + "display_text": "member", "multiple": true } ], @@ -15761,7 +16880,7 @@ ] }, "ZPOPMAX": { - "summary": "Remove and return members with the highest scores in a sorted set", + "summary": "Returns the highest-scoring members from a sorted set after removing them. Deletes the sorted set if the last member was popped.", "since": "5.0.0", "group": "sorted-set", "complexity": "O(log(N)*M) with N being the number of elements in the sorted set, and M being the number of elements popped.", @@ -15796,11 +16915,13 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "count", "type": "integer", + "display_text": "count", "optional": true } ], @@ -15810,7 +16931,7 @@ ] }, "ZPOPMIN": { - "summary": "Remove and return members with the lowest scores in a sorted set", + "summary": "Returns the lowest-scoring members from a sorted set after removing them. Deletes the sorted set if the last member was popped.", "since": "5.0.0", "group": "sorted-set", "complexity": "O(log(N)*M) with N being the number of elements in the sorted set, and M being the number of elements popped.", @@ -15845,11 +16966,13 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "count", "type": "integer", + "display_text": "count", "optional": true } ], @@ -15859,10 +16982,10 @@ ] }, "ZRANDMEMBER": { - "summary": "Get one or multiple random elements from a sorted set", + "summary": "Returns one or more random members from a sorted set.", "since": "6.2.0", "group": "sorted-set", - "complexity": "O(N) where N is the number of elements returned", + "complexity": "O(N) where N is the number of members returned", "acl_categories": [ "@read", "@sortedset", @@ -15893,6 +17016,7 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { @@ -15902,11 +17026,13 @@ "arguments": [ { "name": "count", - "type": "integer" + "type": "integer", + "display_text": "count" }, { "name": "withscores", "type": "pure-token", + "display_text": "withscores", "token": "WITHSCORES", "optional": true } @@ -15921,7 +17047,7 @@ ] }, "ZRANGE": { - "summary": "Return a range of members in a sorted set", + "summary": "Returns members in a sorted set within a range of indexes.", "since": "1.2.0", "group": "sorted-set", "complexity": "O(log(N)+M) with N being the number of elements in the sorted set and M the number of elements returned.", @@ -15961,15 +17087,18 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "start", - "type": "string" + "type": "string", + "display_text": "start" }, { "name": "stop", - "type": "string" + "type": "string", + "display_text": "stop" }, { "name": "sortby", @@ -15980,11 +17109,13 @@ { "name": "byscore", "type": "pure-token", + "display_text": "byscore", "token": "BYSCORE" }, { "name": "bylex", "type": "pure-token", + "display_text": "bylex", "token": "BYLEX" } ] @@ -15992,12 +17123,13 @@ { "name": "rev", "type": "pure-token", + "display_text": "rev", "token": "REV", "since": "6.2.0", "optional": true }, { - "name": "offset_count", + "name": "limit", "type": "block", "token": "LIMIT", "since": "6.2.0", @@ -16005,17 +17137,20 @@ "arguments": [ { "name": "offset", - "type": "integer" + "type": "integer", + "display_text": "offset" }, { "name": "count", - "type": "integer" + "type": "integer", + "display_text": "count" } ] }, { "name": "withscores", "type": "pure-token", + "display_text": "withscores", "token": "WITHSCORES", "optional": true } @@ -16025,7 +17160,7 @@ ] }, "ZRANGEBYLEX": { - "summary": "Return a range of members in a sorted set, by lexicographical range", + "summary": "Returns members in a sorted set within a lexicographical range.", "since": "2.8.9", "group": "sorted-set", "complexity": "O(log(N)+M) with N being the number of elements in the sorted set and M the number of elements being returned. If M is constant (e.g. always asking for the first 10 elements with LIMIT), you can consider it O(log(N)).", @@ -16061,29 +17196,34 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "min", - "type": "string" + "type": "string", + "display_text": "min" }, { "name": "max", - "type": "string" + "type": "string", + "display_text": "max" }, { - "name": "offset_count", + "name": "limit", "type": "block", "token": "LIMIT", "optional": true, "arguments": [ { "name": "offset", - "type": "integer" + "type": "integer", + "display_text": "offset" }, { "name": "count", - "type": "integer" + "type": "integer", + "display_text": "count" } ] } @@ -16096,7 +17236,7 @@ ] }, "ZRANGEBYSCORE": { - "summary": "Return a range of members in a sorted set, by score", + "summary": "Returns members in a sorted set within a range of scores.", "since": "1.0.5", "group": "sorted-set", "complexity": "O(log(N)+M) with N being the number of elements in the sorted set and M the number of elements being returned. If M is constant (e.g. always asking for the first 10 elements with LIMIT), you can consider it O(log(N)).", @@ -16138,36 +17278,42 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "min", - "type": "double" + "type": "double", + "display_text": "min" }, { "name": "max", - "type": "double" + "type": "double", + "display_text": "max" }, { "name": "withscores", "type": "pure-token", + "display_text": "withscores", "token": "WITHSCORES", "since": "2.0.0", "optional": true }, { - "name": "offset_count", + "name": "limit", "type": "block", "token": "LIMIT", "optional": true, "arguments": [ { "name": "offset", - "type": "integer" + "type": "integer", + "display_text": "offset" }, { "name": "count", - "type": "integer" + "type": "integer", + "display_text": "count" } ] } @@ -16180,7 +17326,7 @@ ] }, "ZRANGESTORE": { - "summary": "Store a range of members from sorted set into another key", + "summary": "Stores a range of members from sorted set in a key.", "since": "6.2.0", "group": "sorted-set", "complexity": "O(log(N)+M) with N being the number of elements in the sorted set and M the number of elements stored into the destination key.", @@ -16232,20 +17378,24 @@ { "name": "dst", "type": "key", + "display_text": "dst", "key_spec_index": 0 }, { "name": "src", "type": "key", + "display_text": "src", "key_spec_index": 1 }, { "name": "min", - "type": "string" + "type": "string", + "display_text": "min" }, { "name": "max", - "type": "string" + "type": "string", + "display_text": "max" }, { "name": "sortby", @@ -16255,11 +17405,13 @@ { "name": "byscore", "type": "pure-token", + "display_text": "byscore", "token": "BYSCORE" }, { "name": "bylex", "type": "pure-token", + "display_text": "bylex", "token": "BYLEX" } ] @@ -16267,22 +17419,25 @@ { "name": "rev", "type": "pure-token", + "display_text": "rev", "token": "REV", "optional": true }, { - "name": "offset_count", + "name": "limit", "type": "block", "token": "LIMIT", "optional": true, "arguments": [ { "name": "offset", - "type": "integer" + "type": "integer", + "display_text": "offset" }, { "name": "count", - "type": "integer" + "type": "integer", + "display_text": "count" } ] } @@ -16293,16 +17448,22 @@ ] }, "ZRANK": { - "summary": "Determine the index of a member in a sorted set", + "summary": "Returns the index of a member in a sorted set ordered by ascending scores.", "since": "2.0.0", "group": "sorted-set", "complexity": "O(log(N))", + "history": [ + [ + "7.2.0", + "Added the optional `WITHSCORE` argument." + ] + ], "acl_categories": [ "@read", "@sortedset", "@fast" ], - "arity": 3, + "arity": -3, "key_specs": [ { "begin_search": { @@ -16327,11 +17488,20 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "member", - "type": "string" + "type": "string", + "display_text": "member" + }, + { + "name": "withscore", + "type": "pure-token", + "display_text": "withscore", + "token": "WITHSCORE", + "optional": true } ], "command_flags": [ @@ -16340,7 +17510,7 @@ ] }, "ZREM": { - "summary": "Remove one or more members from a sorted set", + "summary": "Removes one or more members from a sorted set. Deletes the sorted set if all members were removed.", "since": "1.2.0", "group": "sorted-set", "complexity": "O(M*log(N)) with N being the number of elements in the sorted set and M the number of elements to be removed.", @@ -16380,11 +17550,13 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "member", "type": "string", + "display_text": "member", "multiple": true } ], @@ -16394,7 +17566,7 @@ ] }, "ZREMRANGEBYLEX": { - "summary": "Remove all members in a sorted set between the given lexicographical range", + "summary": "Removes members in a sorted set within a lexicographical range. Deletes the sorted set if all members were removed.", "since": "2.8.9", "group": "sorted-set", "complexity": "O(log(N)+M) with N being the number of elements in the sorted set and M the number of elements removed by the operation.", @@ -16428,15 +17600,18 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "min", - "type": "string" + "type": "string", + "display_text": "min" }, { "name": "max", - "type": "string" + "type": "string", + "display_text": "max" } ], "command_flags": [ @@ -16444,7 +17619,7 @@ ] }, "ZREMRANGEBYRANK": { - "summary": "Remove all members in a sorted set within the given indexes", + "summary": "Removes members in a sorted set within a range of indexes. Deletes the sorted set if all members were removed.", "since": "2.0.0", "group": "sorted-set", "complexity": "O(log(N)+M) with N being the number of elements in the sorted set and M the number of elements removed by the operation.", @@ -16478,15 +17653,18 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "start", - "type": "integer" + "type": "integer", + "display_text": "start" }, { "name": "stop", - "type": "integer" + "type": "integer", + "display_text": "stop" } ], "command_flags": [ @@ -16494,7 +17672,7 @@ ] }, "ZREMRANGEBYSCORE": { - "summary": "Remove all members in a sorted set within the given scores", + "summary": "Removes members in a sorted set within a range of scores. Deletes the sorted set if all members were removed.", "since": "1.2.0", "group": "sorted-set", "complexity": "O(log(N)+M) with N being the number of elements in the sorted set and M the number of elements removed by the operation.", @@ -16528,15 +17706,18 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "min", - "type": "double" + "type": "double", + "display_text": "min" }, { "name": "max", - "type": "double" + "type": "double", + "display_text": "max" } ], "command_flags": [ @@ -16544,7 +17725,7 @@ ] }, "ZREVRANGE": { - "summary": "Return a range of members in a sorted set, by index, with scores ordered from high to low", + "summary": "Returns members in a sorted set within a range of indexes in reverse order.", "since": "1.2.0", "group": "sorted-set", "complexity": "O(log(N)+M) with N being the number of elements in the sorted set and M the number of elements returned.", @@ -16580,19 +17761,23 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "start", - "type": "integer" + "type": "integer", + "display_text": "start" }, { "name": "stop", - "type": "integer" + "type": "integer", + "display_text": "stop" }, { "name": "withscores", "type": "pure-token", + "display_text": "withscores", "token": "WITHSCORES", "optional": true } @@ -16605,7 +17790,7 @@ ] }, "ZREVRANGEBYLEX": { - "summary": "Return a range of members in a sorted set, by lexicographical range, ordered from higher to lower strings.", + "summary": "Returns members in a sorted set within a lexicographical range in reverse order.", "since": "2.8.9", "group": "sorted-set", "complexity": "O(log(N)+M) with N being the number of elements in the sorted set and M the number of elements being returned. If M is constant (e.g. always asking for the first 10 elements with LIMIT), you can consider it O(log(N)).", @@ -16641,29 +17826,34 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "max", - "type": "string" + "type": "string", + "display_text": "max" }, { "name": "min", - "type": "string" + "type": "string", + "display_text": "min" }, { - "name": "offset_count", + "name": "limit", "type": "block", "token": "LIMIT", "optional": true, "arguments": [ { "name": "offset", - "type": "integer" + "type": "integer", + "display_text": "offset" }, { "name": "count", - "type": "integer" + "type": "integer", + "display_text": "count" } ] } @@ -16676,7 +17866,7 @@ ] }, "ZREVRANGEBYSCORE": { - "summary": "Return a range of members in a sorted set, by score, with scores ordered from high to low", + "summary": "Returns members in a sorted set within a range of scores in reverse order.", "since": "2.2.0", "group": "sorted-set", "complexity": "O(log(N)+M) with N being the number of elements in the sorted set and M the number of elements being returned. If M is constant (e.g. always asking for the first 10 elements with LIMIT), you can consider it O(log(N)).", @@ -16718,35 +17908,41 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "max", - "type": "double" + "type": "double", + "display_text": "max" }, { "name": "min", - "type": "double" + "type": "double", + "display_text": "min" }, { "name": "withscores", "type": "pure-token", + "display_text": "withscores", "token": "WITHSCORES", "optional": true }, { - "name": "offset_count", + "name": "limit", "type": "block", "token": "LIMIT", "optional": true, "arguments": [ { "name": "offset", - "type": "integer" + "type": "integer", + "display_text": "offset" }, { "name": "count", - "type": "integer" + "type": "integer", + "display_text": "count" } ] } @@ -16759,16 +17955,22 @@ ] }, "ZREVRANK": { - "summary": "Determine the index of a member in a sorted set, with scores ordered from high to low", + "summary": "Returns the index of a member in a sorted set ordered by descending scores.", "since": "2.0.0", "group": "sorted-set", "complexity": "O(log(N))", + "history": [ + [ + "7.2.0", + "Added the optional `WITHSCORE` argument." + ] + ], "acl_categories": [ "@read", "@sortedset", "@fast" ], - "arity": 3, + "arity": -3, "key_specs": [ { "begin_search": { @@ -16793,11 +17995,20 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "member", - "type": "string" + "type": "string", + "display_text": "member" + }, + { + "name": "withscore", + "type": "pure-token", + "display_text": "withscore", + "token": "WITHSCORE", + "optional": true } ], "command_flags": [ @@ -16806,10 +18017,10 @@ ] }, "ZSCAN": { - "summary": "Incrementally iterate sorted sets elements and associated scores", + "summary": "Iterates over members and scores of a sorted set.", "since": "2.8.0", "group": "sorted-set", - "complexity": "O(1) for every call. O(N) for a complete iteration, including enough command calls for the cursor to return back to 0. N is the number of elements inside the collection..", + "complexity": "O(1) for every call. O(N) for a complete iteration, including enough command calls for the cursor to return back to 0. N is the number of elements inside the collection.", "acl_categories": [ "@read", "@sortedset", @@ -16840,21 +18051,25 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "cursor", - "type": "integer" + "type": "integer", + "display_text": "cursor" }, { "name": "pattern", "type": "pattern", + "display_text": "pattern", "token": "MATCH", "optional": true }, { "name": "count", "type": "integer", + "display_text": "count", "token": "COUNT", "optional": true } @@ -16867,7 +18082,7 @@ ] }, "ZSCORE": { - "summary": "Get the score associated with the given member in a sorted set", + "summary": "Returns the score of a member in a sorted set.", "since": "1.2.0", "group": "sorted-set", "complexity": "O(1)", @@ -16901,11 +18116,13 @@ { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0 }, { "name": "member", - "type": "string" + "type": "string", + "display_text": "member" } ], "command_flags": [ @@ -16914,7 +18131,7 @@ ] }, "ZUNION": { - "summary": "Add multiple sorted sets", + "summary": "Returns the union of multiple sorted sets.", "since": "6.2.0", "group": "sorted-set", "complexity": "O(N)+O(M*log(M)) with N being the sum of the sizes of the input sorted sets, and M being the number of elements in the resulting sorted set.", @@ -16947,17 +18164,20 @@ "arguments": [ { "name": "numkeys", - "type": "integer" + "type": "integer", + "display_text": "numkeys" }, { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 0, "multiple": true }, { "name": "weight", "type": "integer", + "display_text": "weight", "token": "WEIGHTS", "optional": true, "multiple": true @@ -16971,16 +18191,19 @@ { "name": "sum", "type": "pure-token", + "display_text": "sum", "token": "SUM" }, { "name": "min", "type": "pure-token", + "display_text": "min", "token": "MIN" }, { "name": "max", "type": "pure-token", + "display_text": "max", "token": "MAX" } ] @@ -16988,6 +18211,7 @@ { "name": "withscores", "type": "pure-token", + "display_text": "withscores", "token": "WITHSCORES", "optional": true } @@ -16998,7 +18222,7 @@ ] }, "ZUNIONSTORE": { - "summary": "Add multiple sorted sets and store the resulting sorted set in a new key", + "summary": "Stores the union of multiple sorted sets in a key.", "since": "2.0.0", "group": "sorted-set", "complexity": "O(N)+O(M log(M)) with N being the sum of the sizes of the input sorted sets, and M being the number of elements in the resulting sorted set.", @@ -17050,21 +18274,25 @@ { "name": "destination", "type": "key", + "display_text": "destination", "key_spec_index": 0 }, { "name": "numkeys", - "type": "integer" + "type": "integer", + "display_text": "numkeys" }, { "name": "key", "type": "key", + "display_text": "key", "key_spec_index": 1, "multiple": true }, { "name": "weight", "type": "integer", + "display_text": "weight", "token": "WEIGHTS", "optional": true, "multiple": true @@ -17078,16 +18306,19 @@ { "name": "sum", "type": "pure-token", + "display_text": "sum", "token": "SUM" }, { "name": "min", "type": "pure-token", + "display_text": "min", "token": "MIN" }, { "name": "max", "type": "pure-token", + "display_text": "max", "token": "MAX" } ] diff --git a/commands/acl-cat.md b/commands/acl-cat.md index 0eb256fab8..97e35d16fa 100644 --- a/commands/acl-cat.md +++ b/commands/acl-cat.md @@ -76,7 +76,3 @@ Then we may want to know what commands are part of a given category: 30) "psync" 31) "sort" ``` - -@return - -@array-reply: a list of ACL categories or a list of commands inside a given category. The command may return an error if an invalid category name is given as argument. diff --git a/commands/acl-deluser.md b/commands/acl-deluser.md index e3f443e4d7..620183db8f 100644 --- a/commands/acl-deluser.md +++ b/commands/acl-deluser.md @@ -4,10 +4,6 @@ removed from the system, this is the default user that every new connection is authenticated with. The list of users may include usernames that do not exist, in such case no operation is performed for the non existing users. -@return - -@integer-reply: The number of users that were deleted. This number will not always match the number of arguments since certain users may not exist. - @examples ``` diff --git a/commands/acl-dryrun.md b/commands/acl-dryrun.md index 4afb3cd1de..78cca1e76d 100644 --- a/commands/acl-dryrun.md +++ b/commands/acl-dryrun.md @@ -1,11 +1,6 @@ Simulate the execution of a given command by a given user. This command can be used to test the permissions of a given user without having to enable the user or cause the side effects of running the command. -@return - -@simple-string-reply: `OK` on success. -@bulk-string-reply: An error describing why the user can't execute the command. - @examples ``` @@ -13,6 +8,6 @@ This command can be used to test the permissions of a given user without having "OK" > ACL DRYRUN VIRGINIA SET foo bar "OK" -> ACL DRYRUN VIRGINIA GET foo bar -"This user has no permissions to run the 'GET' command" +> ACL DRYRUN VIRGINIA GET foo +"User VIRGINIA has no permissions to run the 'get' command" ``` diff --git a/commands/acl-genpass.md b/commands/acl-genpass.md index 2afbaecf5e..0b7a274334 100644 --- a/commands/acl-genpass.md +++ b/commands/acl-genpass.md @@ -25,10 +25,6 @@ rounded to the next multiple of 4. So for instance asking for just 1 bit password will result in 4 bits to be emitted, in the form of a single hex character. -@return - -@bulk-string-reply: by default 64 bytes string representing 256 bits of pseudorandom data. Otherwise if an argument if needed, the output string length is the number of specified bits (rounded to the next multiple of 4) divided by 4. - @examples ``` diff --git a/commands/acl-getuser.md b/commands/acl-getuser.md index 6c2eeedb07..c952e6483a 100644 --- a/commands/acl-getuser.md +++ b/commands/acl-getuser.md @@ -9,10 +9,6 @@ Note: This description of command rules reflects the user's effective permission Selectors are listed in the order they were applied to the user, and include information about commands, key patterns, and channel patterns. -@array-reply: a list of ACL rule definitions for the user. - -If `user` does not exist a @nil-reply is returned. - @examples Here's an example configuration for a user diff --git a/commands/acl-help.md b/commands/acl-help.md index ddb9432f3c..bccd37b243 100644 --- a/commands/acl-help.md +++ b/commands/acl-help.md @@ -1,5 +1,2 @@ The `ACL HELP` command returns a helpful text describing the different subcommands. -@return - -@array-reply: a list of subcommands and their descriptions diff --git a/commands/acl-list.md b/commands/acl-list.md index e21e710437..df7cecde26 100644 --- a/commands/acl-list.md +++ b/commands/acl-list.md @@ -4,10 +4,6 @@ same used in the redis.conf file or the external ACL file, so you can cut and paste what is returned by the ACL LIST command directly inside a configuration file if you wish (but make sure to check `ACL SAVE`). -@return - -An array of strings. - @examples ``` diff --git a/commands/acl-load.md b/commands/acl-load.md index 521c1a6594..f425937ef5 100644 --- a/commands/acl-load.md +++ b/commands/acl-load.md @@ -6,12 +6,6 @@ sure to have an *all or nothing* behavior, that is: * If every line in the file is valid, all the ACLs are loaded. * If one or more line in the file is not valid, nothing is loaded, and the old ACL rules defined in the server memory continue to be used. -@return - -@simple-string-reply: `OK` on success. - -The command may fail with an error for several reasons: if the file is not readable, if there is an error inside the file, and in such case the error will be reported to the user in the error. Finally the command will fail if the server is not configured to use an external ACL file. - @examples ``` diff --git a/commands/acl-log.md b/commands/acl-log.md index adeaf8d540..eb102f66a6 100644 --- a/commands/acl-log.md +++ b/commands/acl-log.md @@ -8,16 +8,6 @@ The optional argument specifies how many entries to show. By default up to ten failures are returned. The special `RESET` argument clears the log. Entries are displayed starting from the most recent. -@return - -When called to show security events: - -@array-reply: a list of ACL security events. - -When called with `RESET`: - -@simple-string-reply: `OK` if the security log was cleared. - @examples ``` @@ -35,7 +25,26 @@ When called with `RESET`: 9) "username" 10) "someuser" 11) "age-seconds" - 12) "4.0960000000000001" + 12) "8.038" 13) "client-info" - 14) "id=6 addr=127.0.0.1:63026 fd=8 name= age=9 idle=0 flags=N db=0 sub=0 psub=0 multi=-1 qbuf=48 qbuf-free=32720 obl=0 oll=0 omem=0 events=r cmd=auth user=default" + 14) "id=3 addr=127.0.0.1:57275 laddr=127.0.0.1:6379 fd=8 name= age=16 idle=0 flags=N db=0 sub=0 psub=0 ssub=0 multi=-1 qbuf=48 qbuf-free=16842 argv-mem=25 multi-mem=0 rbs=1024 rbp=0 obl=0 oll=0 omem=0 tot-mem=18737 events=r cmd=auth user=default redir=-1 resp=2" + 15) "entry-id" + 16) (integer) 0 + 17) "timestamp-created" + 18) (integer) 1675361492408 + 19) "timestamp-last-updated" + 20) (integer) 1675361492408 ``` + +Each log entry is composed of the following fields: + +1. `count`: The number of security events detected within a 60 second period that are represented by this entry. +2. `reason`: The reason that the security events were logged. Either `command`, `key`, `channel`, or `auth`. +3. `context`: The context that the security events were detected in. Either `toplevel`, `multi`, `lua`, or `module`. +4. `object`: The resource that the user had insufficient permissions to access. `auth` when the reason is `auth`. +5. `username`: The username that executed the command that caused the security events or the username that had a failed authentication attempt. +6. `age-seconds`: Age of the log entry in seconds. +7. `client-info`: Displays the client info of a client which caused one of the security events. +8. `entry-id`: The sequence number of the entry (starting at 0) since the server process started. Can also be used to check if items were “lost”, if they fell between periods. +9. `timestamp-created`: A UNIX timestamp in `milliseconds` at the time the entry was first created. +10. `timestamp-last-updated`: A UNIX timestamp in `milliseconds` at the time the entry was last updated. \ No newline at end of file diff --git a/commands/acl-save.md b/commands/acl-save.md index 57badc8b78..bfa59a5969 100644 --- a/commands/acl-save.md +++ b/commands/acl-save.md @@ -1,12 +1,6 @@ When Redis is configured to use an ACL file (with the `aclfile` configuration option), this command will save the currently defined ACLs from the server memory to the ACL file. -@return - -@simple-string-reply: `OK` on success. - -The command may fail with an error for several reasons: if the file cannot be written or if the server is not configured to use an external ACL file. - @examples ``` diff --git a/commands/acl-setuser.md b/commands/acl-setuser.md index a6ae4a8e03..3bc31cc0ee 100644 --- a/commands/acl-setuser.md +++ b/commands/acl-setuser.md @@ -1,32 +1,31 @@ Create an ACL user with the specified rules or modify the rules of an -existing user. This is the main interface in order to manipulate Redis ACL -users interactively: if the username does not exist, the command creates -the username without any privilege, then reads from left to right all the -rules provided as successive arguments, setting the user ACL rules as specified. +existing user. +Manipulate Redis ACL users interactively. +If the username does not exist, the command creates the username without any privilege. +It then reads from left to right all the [rules](#acl-rules) provided as successive arguments, setting the user ACL rules as specified. If the user already exists, the provided ACL rules are simply applied *in addition* to the rules already set. For example: ACL SETUSER virginia on allkeys +set -The above command will create a user called `virginia` that is active -(the on rule), can access any key (allkeys rule), and can call the -set command (+set rule). Then another SETUSER call can modify the user rules: +The above command creates a user called `virginia` who is active(the _on_ rule), can access any key (_allkeys_ rule), and can call the set command (_+set_ rule). +Then, you can use another `ACL SETUSER` call to modify the user rules: ACL SETUSER virginia +get -The above rule will not apply the new rule to the user virginia, so other than `SET`, the user virginia will now be able to also use the `GET` command. +The above rule applies the new rule to the user `virginia`, so other than `SET`, the user `virginia` can now also use the `GET` command. -Starting from Redis 7.0, ACL rules can also be grouped into multiple distinct sets of rules, called selectors. +Starting from Redis 7.0, ACL rules can also be grouped into multiple distinct sets of rules, called _selectors_. Selectors are added by wrapping the rules in parentheses and providing them just like any other rule. In order to execute a command, either the root permissions (rules defined outside of parenthesis) or any of the selectors (rules defined inside parenthesis) must match the given command. For example: ACL SETUSER virginia on +GET allkeys (+SET ~app1*) -This sets a user with two sets of permission, one defined on the user and one defined with a selector. -The root user permissions only allows executing the get command, but can be executed on any keys. -The selector then grants a secondary set of permissions: access to the `SET` command to be executed on any key that starts with "app1". +This sets a user with two sets of permissions, one defined on the user and one defined with a selector. +The root user permissions only allow executing the get command, but can be executed on any keys. +The selector then grants a secondary set of permissions: access to the `SET` command to be executed on any key that starts with `app1`. Using multiple selectors allows you to grant permissions that are different depending on what keys are being accessed. When we want to be sure to define a user from scratch, without caring if @@ -35,9 +34,7 @@ it had previously defined rules associated, we can use the special rule ACL SETUSER antirez reset [... other rules ...] -After resetting a user, it returns back to the status it has when it -was just created: non active (off rule), can't execute any command, can't -access any key: +After resetting a user, its ACL rules revert to the default: inactive, passwordless, can't execute any command nor access any key or channel: > ACL SETUSER antirez reset +OK @@ -50,48 +47,42 @@ another string (without any space in between), like "+SET". The following documentation is a reference manual about the capabilities of this command, however our [ACL tutorial](/topics/acl) may be a more gentle introduction to how the ACL system works in general. -## List of rules +## ACL rules -Redis ACL rules are split into two categories: rules that define command permissions, "Command rules", and rules that define user state, "User management rules". +Redis ACL rules are split into two categories: rules that define command permissions or _command rules_, and rules that define the user state or _user management rules_. This is a list of all the supported Redis ACL rules: ### Command rules -* `~`: add the specified key pattern (glob style pattern, like in the `KEYS` command), to the list of key patterns accessible by the user. This grants both read and write permissions to keys that match the pattern. You can add multiple key patterns to the same user. Example: `~objects:*` -* `%R~`: (Available in Redis 7.0 and later) Add the specified read key pattern. This behaves similar to the regular key pattern but only grants permission to read from keys that match the given pattern. See [key permissions](/topics/acl#key-permissions) for more information. -* `%W~`: (Available in Redis 7.0 and later) Add the specified write key pattern. This behaves similar to the regular key pattern but only grants permission to write to keys that match the given pattern. See [key permissions](/topics/acl#key-permissions) for more information. +* `~`: Adds the specified key pattern (glob style pattern, like in the `KEYS` command), to the list of key patterns accessible by the user. This grants both read and write permissions to keys that match the pattern. You can add multiple key patterns to the same user. Example: `~objects:*` +* `%R~`: (Available in Redis 7.0 and later) Adds the specified read key pattern. This behaves similar to the regular key pattern but only grants permission to read from keys that match the given pattern. See [key permissions](/topics/acl#key-permissions) for more information. +* `%W~`: (Available in Redis 7.0 and later) Adds the specified write key pattern. This behaves similar to the regular key pattern but only grants permission to write to keys that match the given pattern. See [key permissions](/topics/acl#key-permissions) for more information. * `%RW~`: (Available in Redis 7.0 and later) Alias for `~`. -* `allkeys`: alias for `~*`, it allows the user to access all the keys. -* `resetkeys`: removes all the key patterns from the list of key patterns the user can access. -* `&`: (Available in Redis 6.2 and later) add the specified glob style pattern to the list of Pub/Sub channel patterns accessible by the user. You can add multiple channel patterns to the same user. Example: `&chatroom:*` -* `allchannels`: alias for `&*`, it allows the user to access all Pub/Sub channels. -* `resetchannels`: removes all channel patterns from the list of Pub/Sub channel patterns the user can access. -* `+`: Add the command to the list of commands the user can call. Can be used with `|` for allowing subcommands (e.g "+config|get"). -* `+@`: add all the commands in the specified category to the list of commands the user is able to execute. Example: `+@string` (adds all the string commands). For a list of categories check the `ACL CAT` command. -* `+|first-arg`: Allow a specific first argument of an otherwise disabled command. It is only supported on commands with no sub-commands, and is not allowed as negative form like -SELECT|1, only additive starting with "+". This feature is deprecated and may be removed in the future. -* `allcommands`: alias of `+@all`. Adds all the commands there are in the server, including *future commands* loaded via module, to be executed by this user. -* `-`: Remove the command to the list of commands the user can call. Starting Redis 7.0, it can be used with `|` for blocking subcommands (e.g "-config|set"). +* `allkeys`: Alias for `~*`, it allows the user to access all the keys. +* `resetkeys`: Removes all the key patterns from the list of key patterns the user can access. +* `&`: (Available in Redis 6.2 and later) Adds the specified glob style pattern to the list of Pub/Sub channel patterns accessible by the user. You can add multiple channel patterns to the same user. Example: `&chatroom:*` +* `allchannels`: Alias for `&*`, it allows the user to access all Pub/Sub channels. +* `resetchannels`: Removes all channel patterns from the list of Pub/Sub channel patterns the user can access. +* `+`: Adds the command to the list of commands the user can call. Can be used with `|` for allowing subcommands (e.g "+config|get"). +* `+@`: Adds all the commands in the specified category to the list of commands the user is able to execute. Example: `+@string` (adds all the string commands). For a list of categories, check the `ACL CAT` command. +* `+|first-arg`: Allows a specific first argument of an otherwise disabled command. It is only supported on commands with no sub-commands, and is not allowed as negative form like -SELECT|1, only additive starting with "+". This feature is deprecated and may be removed in the future. +* `allcommands`: Alias of `+@all`. Adds all the commands there are in the server, including *future commands* loaded via module, to be executed by this user. +* `-`: Remove the command to the list of commands the user can call. Starting Redis 7.0, it can be used with `|` for blocking subcommands (e.g., "-config|set"). * `-@`: Like `+@` but removes all the commands in the category instead of adding them. -* `nocommands`: alias for `-@all`. Removes all the commands, the user will no longer be able to execute anything. +* `nocommands`: Alias for `-@all`. Removes all the commands, and the user is no longer able to execute anything. ### User management rules -* `on`: set the user as active, it will be possible to authenticate as this user using `AUTH `. -* `off`: set user as not active, it will be impossible to log as this user. Please note that if a user gets disabled (set to off) after there are connections already authenticated with such a user, the connections will continue to work as expected. To also kill the old connections you can use `CLIENT KILL` with the user option. An alternative is to delete the user with `ACL DELUSER`, that will result in all the connections authenticated as the deleted user to be disconnected. -* `nopass`: the user is set as a "no password" user. It means that it will be possible to authenticate as such user with any password. By default, the `default` special user is set as "nopass". The `nopass` rule will also reset all the configured passwords for the user. -* `>password`: Add the specified clear text password as a hashed password in the list of the users passwords. Every user can have many active passwords, so that password rotation will be simpler. The specified password is not stored as clear text inside the server. Example: `>mypassword`. -* `#`: Add the specified hashed password to the list of user passwords. A Redis hashed password is hashed with SHA256 and translated into a hexadecimal string. Example: `#c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2`. +* `on`: Set the user as active, it will be possible to authenticate as this user using `AUTH `. +* `off`: Set user as not active, it will be impossible to log as this user. Please note that if a user gets disabled (set to off) after there are connections already authenticated with such a user, the connections will continue to work as expected. To also kill the old connections you can use `CLIENT KILL` with the user option. An alternative is to delete the user with `ACL DELUSER`, that will result in all the connections authenticated as the deleted user to be disconnected. +* `nopass`: The user is set as a _no password_ user. It means that it will be possible to authenticate as such user with any password. By default, the `default` special user is set as "nopass". The `nopass` rule will also reset all the configured passwords for the user. +* `>password`: Adds the specified clear text password as a hashed password in the list of the users passwords. Every user can have many active passwords, so that password rotation will be simpler. The specified password is not stored as clear text inside the server. Example: `>mypassword`. +* `#`: Adds the specified hashed password to the list of user passwords. A Redis hashed password is hashed with SHA256 and translated into a hexadecimal string. Example: `#c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2`. * `password` but removes the password instead of adding it. * `!`: Like `#` but removes the password instead of adding it. -* `()`: (Available in Redis 7.0 and later) Create a new selector to match rules against. Selectors are evaluated after the user permissions, and are evaluated according to the order they are defined. If a command matches either the user permissions or any selector, it is allowed. See [selectors](/topics/acl#selectors) for more information. -* `clearselectors`: (Available in Redis 7.0 and later) Delete all of the selectors attached to the user. -* `reset`: Remove any capability from the user. It is set to off, without passwords, unable to execute any command, unable to access any key. - -@return - -@simple-string-reply: `OK` on success. - -If the rules contain errors, the error is returned. +* `()`: (Available in Redis 7.0 and later) Creates a new selector to match rules against. Selectors are evaluated after the user permissions, and are evaluated according to the order they are defined. If a command matches either the user permissions or any selector, it is allowed. See [selectors](/docs/management/security/acl#selectors) for more information. +* `clearselectors`: (Available in Redis 7.0 and later) Deletes all of the selectors attached to the user. +* `reset`: Removes any capability from the user. They are set to off, without passwords, unable to execute any command, unable to access any key. @examples diff --git a/commands/acl-users.md b/commands/acl-users.md index 9b0fe1bf38..b8a40c4c16 100644 --- a/commands/acl-users.md +++ b/commands/acl-users.md @@ -1,10 +1,6 @@ The command shows a list of all the usernames of the currently configured users in the Redis ACL system. -@return - -An array of strings. - @examples ``` diff --git a/commands/acl-whoami.md b/commands/acl-whoami.md index 5ec7b8485b..04a759477b 100644 --- a/commands/acl-whoami.md +++ b/commands/acl-whoami.md @@ -2,10 +2,6 @@ Return the username the current connection is authenticated with. New connections are authenticated with the "default" user. They can change user using `AUTH`. -@return - -@bulk-string-reply: the username of the current connection. - @examples ``` diff --git a/commands/acl.md b/commands/acl.md index eb9277cb40..7e60f2e6b0 100644 --- a/commands/acl.md +++ b/commands/acl.md @@ -1,3 +1,3 @@ -This is a container command for [Access Control List](/docs/manual/security/acl/) commands. +This is a container command for [Access Control List](/docs/management/security/acl/) commands. To see the list of available commands you can call `ACL HELP`. diff --git a/commands/append.md b/commands/append.md index 2c8bd7432f..2f10c8c1e8 100644 --- a/commands/append.md +++ b/commands/append.md @@ -3,10 +3,6 @@ end of the string. If `key` does not exist it is created and set as an empty string, so `APPEND` will be similar to `SET` in this special case. -@return - -@integer-reply: the length of the string after the append operation. - @examples ```cli diff --git a/commands/asking.md b/commands/asking.md index d98643c25c..39b0acb72b 100644 --- a/commands/asking.md +++ b/commands/asking.md @@ -4,7 +4,3 @@ This is normally done automatically by cluster clients. If an `-ASK` redirect is received during a transaction, only one ASKING command needs to be sent to the target node before sending the complete transaction to the target node. See [ASK redirection in the Redis Cluster Specification](/topics/cluster-spec#ask-redirection) for details. - -@return - -@simple-string-reply: `OK`. diff --git a/commands/auth.md b/commands/auth.md index 7c1e02a800..144aec9a21 100644 --- a/commands/auth.md +++ b/commands/auth.md @@ -1,7 +1,7 @@ The AUTH command authenticates the current connection in two cases: 1. If the Redis server is password protected via the `requirepass` option. -2. If a Redis 6.0 instance, or greater, is using the [Redis ACL system](/topics/acl). +2. A Redis 6.0 instance, or greater, is using the [Redis ACL system](/topics/acl). Redis versions prior of Redis 6 were only able to understand the one argument version of the command: @@ -30,7 +30,3 @@ Because of the high performance nature of Redis, it is possible to try a lot of passwords in parallel in very short time, so make sure to generate a strong and very long password so that this attack is infeasible. A good way to generate strong passwords is via the `ACL GENPASS` command. - -@return - -@simple-string-reply or an error if the password, or username/password pair, is invalid. diff --git a/commands/bgrewriteaof.md b/commands/bgrewriteaof.md index 85f52040b4..2424a8631f 100644 --- a/commands/bgrewriteaof.md +++ b/commands/bgrewriteaof.md @@ -23,8 +23,3 @@ Please refer to the [persistence documentation][tp] for detailed information. [tp]: /topics/persistence -@return - -@simple-string-reply: A simple string reply indicating that the rewriting started or is about to start ASAP, when the call is executed with success. - -The command may reply with an error in certain cases, as documented above. diff --git a/commands/bgsave.md b/commands/bgsave.md index 714d960716..f6f676326e 100644 --- a/commands/bgsave.md +++ b/commands/bgsave.md @@ -19,6 +19,3 @@ Please refer to the [persistence documentation][tp] for detailed information. [tp]: /topics/persistence -@return - -@simple-string-reply: `Background saving started` if `BGSAVE` started correctly or `Background saving scheduled` when used with the `SCHEDULE` subcommand. diff --git a/commands/bitcount.md b/commands/bitcount.md index 95bd3a3882..3b33703751 100644 --- a/commands/bitcount.md +++ b/commands/bitcount.md @@ -15,12 +15,6 @@ We can use an additional argument `BIT` to specify a bit index. So 0 is the first bit, 1 is the second bit, and so forth. For negative values, -1 is the last bit, -2 is the penultimate, and so forth. -@return - -@integer-reply - -The number of bits set to 1. - @examples ```cli diff --git a/commands/bitfield.md b/commands/bitfield.md index 6609c85b13..0a549acff9 100644 --- a/commands/bitfield.md +++ b/commands/bitfield.md @@ -78,12 +78,6 @@ By default, **WRAP** is used if not otherwise specified. 1) (integer) 0 2) (integer) 3 -## Return value - -The command returns an array with each entry being the corresponding result of -the sub command given at the same position. `OVERFLOW` subcommands don't count -as generating a reply. - The following is an example of `OVERFLOW FAIL` returning NULL. > BITFIELD mykey OVERFLOW FAIL incrby u2 102 1 diff --git a/commands/bitfield_ro.md b/commands/bitfield_ro.md index 94057a1183..26ec064ded 100644 --- a/commands/bitfield_ro.md +++ b/commands/bitfield_ro.md @@ -13,7 +13,3 @@ See original `BITFIELD` for more details. ``` BITFIELD_RO hello GET i8 16 ``` - -@return - -@array-reply: An array with each entry being the corresponding result of the subcommand given at the same position. diff --git a/commands/bitop.md b/commands/bitop.md index d35c756302..e679b449bf 100644 --- a/commands/bitop.md +++ b/commands/bitop.md @@ -24,13 +24,6 @@ zero-padded up to the length of the longest string. The same holds true for non-existent keys, that are considered as a stream of zero bytes up to the length of the longest string. -@return - -@integer-reply - -The size of the string stored in the destination key, that is equal to the -size of the longest input string. - @examples ```cli @@ -58,5 +51,5 @@ bitmaps][hbgc212fermurb]" for an interesting use cases. Care should be taken when running it against long input strings. For real-time metrics and statistics involving large inputs a good approach is -to use a replica (with read-only option disabled) where the bit-wise +to use a replica (with replica-read-only option enabled) where the bit-wise operations are performed to avoid blocking the master instance. diff --git a/commands/bitpos.md b/commands/bitpos.md index 1016941418..689926dbc0 100644 --- a/commands/bitpos.md +++ b/commands/bitpos.md @@ -22,20 +22,6 @@ bit, -2 is the penultimate, and so forth. Non-existent keys are treated as empty strings. -@return - -@integer-reply - -The command returns the position of the first bit set to 1 or 0 according to the request. - -If we look for set bits (the bit argument is 1) and the string is empty or composed of just zero bytes, -1 is returned. - -If we look for clear bits (the bit argument is 0) and the string only contains bit set to 1, the function returns the first bit not part of the string on the right. So if the string is three bytes set to the value `0xff` the command `BITPOS key 0` will return 24, since up to bit 23 all the bits are 1. - -Basically, the function considers the right of the string as padded with zeros if you look for clear bits and specify no range or the _start_ argument **only**. - -However, this behavior changes if you are looking for clear bits and specify a range with both __start__ and __end__. If no clear bit is found in the specified range, the function returns -1 as the user specified a clear range and there are no 0 bits in that range. - @examples ```cli diff --git a/commands/blmove.md b/commands/blmove.md index 463a2dca28..7a45b13e89 100644 --- a/commands/blmove.md +++ b/commands/blmove.md @@ -10,11 +10,6 @@ This command comes in place of the now deprecated `BRPOPLPUSH`. Doing See `LMOVE` for more information. -@return - -@bulk-string-reply: the element being popped from `source` and pushed to `destination`. -If `timeout` is reached, a @nil-reply is returned. - ## Pattern: Reliable queue Please see the pattern description in the `LMOVE` documentation. diff --git a/commands/blmpop.md b/commands/blmpop.md index 262713ef31..73bac9d284 100644 --- a/commands/blmpop.md +++ b/commands/blmpop.md @@ -6,10 +6,3 @@ When all lists are empty, Redis will block the connection until another client p A `timeout` of zero can be used to block indefinitely. See `LMPOP` for more information. - -@return - -@array-reply: specifically: - -* A `nil` when no element could be popped, and timeout is reached. -* A two-element array with the first element being the name of the key from which elements were popped, and the second element is an array of elements. diff --git a/commands/blpop.md b/commands/blpop.md index 1d73fff19d..5ce6ffb447 100644 --- a/commands/blpop.md +++ b/commands/blpop.md @@ -82,15 +82,6 @@ thing that happens when the timeout is reached. If you like science fiction, think of time flowing at infinite speed inside a `MULTI` / `EXEC` block... -@return - -@array-reply: specifically: - -* A `nil` multi-bulk when no element could be popped and the timeout expired. -* A two-element multi-bulk with the first element being the name of the key - where an element was popped and the second element being the value of the - popped element. - @examples ``` diff --git a/commands/brpop.md b/commands/brpop.md index dfa2b91cac..08806cff53 100644 --- a/commands/brpop.md +++ b/commands/brpop.md @@ -10,15 +10,6 @@ the tail of a list instead of popping from the head. [cb]: /commands/blpop -@return - -@array-reply: specifically: - -* A `nil` multi-bulk when no element could be popped and the timeout expired. -* A two-element multi-bulk with the first element being the name of the key - where an element was popped and the second element being the value of the - popped element. - @examples ``` diff --git a/commands/brpoplpush.md b/commands/brpoplpush.md index 9a6fe376d9..3989fd67fe 100644 --- a/commands/brpoplpush.md +++ b/commands/brpoplpush.md @@ -7,11 +7,6 @@ A `timeout` of zero can be used to block indefinitely. See `RPOPLPUSH` for more information. -@return - -@bulk-string-reply: the element being popped from `source` and pushed to `destination`. -If `timeout` is reached, a @nil-reply is returned. - ## Pattern: Reliable queue Please see the pattern description in the `RPOPLPUSH` documentation. diff --git a/commands/bzmpop.md b/commands/bzmpop.md index dc0c077d96..50c25709c0 100644 --- a/commands/bzmpop.md +++ b/commands/bzmpop.md @@ -6,11 +6,3 @@ When all sorted sets are empty, Redis will block the connection until another cl A `timeout` of zero can be used to block indefinitely. See `ZMPOP` for more information. - -@return - -@array-reply: specifically: - -* A `nil` when no element could be popped. -* A two-element array with the first element being the name of the key from which elements were popped, and the second element is an array of the popped elements. Every entry in the elements array is also an array that contains the member and its score. - diff --git a/commands/bzpopmax.md b/commands/bzpopmax.md index 8155ed8b99..c6d726f4db 100644 --- a/commands/bzpopmax.md +++ b/commands/bzpopmax.md @@ -14,15 +14,6 @@ with the highest scores instead of popping the ones with the lowest scores. [cb]: /commands/bzpopmin -@return - -@array-reply: specifically: - -* A `nil` multi-bulk when no element could be popped and the timeout expired. -* A three-element multi-bulk with the first element being the name of the key - where a member was popped, the second element is the popped member itself, - and the third element is the score of the popped element. - @examples ``` diff --git a/commands/bzpopmin.md b/commands/bzpopmin.md index b48a4fb759..936154d051 100644 --- a/commands/bzpopmin.md +++ b/commands/bzpopmin.md @@ -14,15 +14,6 @@ popped from. [cl]: /commands/blpop -@return - -@array-reply: specifically: - -* A `nil` multi-bulk when no element could be popped and the timeout expired. -* A three-element multi-bulk with the first element being the name of the key - where a member was popped, the second element is the popped member itself, - and the third element is the score of the popped element. - @examples ``` diff --git a/commands/client-caching.md b/commands/client-caching.md index 1f4b8b8a3e..e3bf90ee86 100644 --- a/commands/client-caching.md +++ b/commands/client-caching.md @@ -16,7 +16,3 @@ tracked using `CLIENT CACHING no`. Basically the command sets a state in the connection, that is valid only for the next command execution, that will modify the behavior of client tracking. - -@return - -@simple-string-reply: `OK` or an error if the argument is not yes or no. diff --git a/commands/client-getname.md b/commands/client-getname.md index f60539dd02..0991c6eea9 100644 --- a/commands/client-getname.md +++ b/commands/client-getname.md @@ -1,5 +1 @@ The `CLIENT GETNAME` returns the name of the current connection as set by `CLIENT SETNAME`. Since every new connection starts without an associated name, if no name was assigned a null bulk reply is returned. - -@return - -@bulk-string-reply: The connection name, or a null bulk reply if no name is set. diff --git a/commands/client-getredir.md b/commands/client-getredir.md index 2cc326957f..dcc623c1a8 100644 --- a/commands/client-getredir.md +++ b/commands/client-getredir.md @@ -5,7 +5,3 @@ order to avoid forcing client libraries implementations to remember the ID notifications are redirected to, this command exists in order to improve introspection and allow clients to check later if redirection is active and towards which client ID. - -@return - -@integer-reply: the ID of the client we are redirecting the notifications to. The command returns `-1` if client tracking is not enabled, or `0` if client tracking is enabled but we are not redirecting the notifications to any client. diff --git a/commands/client-help.md b/commands/client-help.md index 964a625740..6745f6cf24 100644 --- a/commands/client-help.md +++ b/commands/client-help.md @@ -1,5 +1 @@ The `CLIENT HELP` command returns a helpful text describing the different subcommands. - -@return - -@array-reply: a list of subcommands and their descriptions diff --git a/commands/client-id.md b/commands/client-id.md index fe6723c513..53fcac5016 100644 --- a/commands/client-id.md +++ b/commands/client-id.md @@ -12,9 +12,3 @@ introduced also in Redis 5 together with `CLIENT ID`. Check the `CLIENT UNBLOCK` ```cli CLIENT ID ``` - -@return - -@integer-reply - -The id of the client. diff --git a/commands/client-info.md b/commands/client-info.md index f60592e175..5eea826ba8 100644 --- a/commands/client-info.md +++ b/commands/client-info.md @@ -7,7 +7,3 @@ The reply format is identical to that of `CLIENT LIST`, and the content consists ```cli CLIENT INFO ``` - -@return - -@bulk-string-reply: a unique string, as described at the `CLIENT LIST` page, for the current client. diff --git a/commands/client-kill.md b/commands/client-kill.md index ea65aaf3ea..e54529a7c8 100644 --- a/commands/client-kill.md +++ b/commands/client-kill.md @@ -17,6 +17,7 @@ instead of killing just by address. The following filters are available: * `CLIENT KILL TYPE type`, where *type* is one of `normal`, `master`, `replica` and `pubsub`. This closes the connections of **all the clients** in the specified class. Note that clients blocked into the `MONITOR` command are considered to belong to the `normal` class. * `CLIENT KILL USER username`. Closes all the connections that are authenticated with the specified [ACL](/topics/acl) username, however it returns an error if the username does not map to an existing ACL user. * `CLIENT KILL SKIPME yes/no`. By default this option is set to `yes`, that is, the client calling the command will not get killed, however setting this option to `no` will have the effect of also killing the client calling the command. +* `CLIENT KILL MAXAGE maxage`. Closes all the connections that are older than the specified age, in seconds. It is possible to provide multiple filters at the same time. The command will handle multiple filters via logical AND. For example: @@ -41,13 +42,3 @@ the client point of view, the connection can never be closed in the middle of the execution of a command. However, the client will notice the connection has been closed only when the next command is sent (and results in network error). - -@return - -When called with the three arguments format: - -@simple-string-reply: `OK` if the connection exists and has been closed - -When called with the filter / value format: - -@integer-reply: the number of clients killed. diff --git a/commands/client-list.md b/commands/client-list.md index 6241425ba7..653e75a8ef 100644 --- a/commands/client-list.md +++ b/commands/client-list.md @@ -5,14 +5,6 @@ You can use one of the optional subcommands to filter the list. The `TYPE type` The `ID` filter only returns entries for clients with IDs matching the `client-id` arguments. -@return - -@bulk-string-reply: a unique string, formatted as follows: - -* One client connection per line (separated by LF) -* Each line is composed of a succession of `property=value` fields separated - by a space character. - Here is the meaning of the fields: * `id`: a unique 64-bit client ID @@ -28,6 +20,7 @@ Here is the meaning of the fields: * `psub`: number of pattern matching subscriptions * `ssub`: number of shard channel subscriptions. Added in Redis 7.0.3 * `multi`: number of commands in a MULTI/EXEC context +* `watch`: number of keys this client is currently watching. Added in Redis 8.0 * `qbuf`: query buffer length (0 means no query pending) * `qbuf-free`: free space of the query buffer (0 means the buffer is full) * `argv-mem`: incomplete arguments for the next command (already extracted from query buffer) @@ -49,6 +42,7 @@ A: connection to be closed ASAP b: the client is waiting in a blocking operation c: connection to be closed after writing entire reply d: a watched keys has been modified - EXEC will fail +e: the client is excluded from the client eviction mechanism i: the client is waiting for a VM I/O (deprecated) M: the client is a master N: no specific flag set @@ -60,6 +54,7 @@ u: the client is unblocked U: the client is connected via a Unix domain socket x: the client is in a MULTI/EXEC context t: the client enabled keys tracking in order to perform client side caching +T: the client will not touch the LRU/LFU of the keys it accesses R: the client tracking target client is invalid B: the client enabled broadcast tracking mode ``` diff --git a/commands/client-no-evict.md b/commands/client-no-evict.md index 70070a6abb..2a18898e41 100644 --- a/commands/client-no-evict.md +++ b/commands/client-no-evict.md @@ -5,7 +5,3 @@ When turned on and client eviction is configured, the current connection will be When turned off, the current client will be re-included in the pool of potential clients to be evicted (and evicted if needed). See [client eviction](/topics/clients#client-eviction) for more details. - -@return - -@simple-string-reply: `OK`. diff --git a/commands/client-no-touch.md b/commands/client-no-touch.md new file mode 100644 index 0000000000..da723058bc --- /dev/null +++ b/commands/client-no-touch.md @@ -0,0 +1,5 @@ +The `CLIENT NO-TOUCH` command controls whether commands sent by the client will alter the LRU/LFU of the keys they access. + +When turned on, the current client will not change LFU/LRU stats, unless it sends the `TOUCH` command. + +When turned off, the client touches LFU/LRU stats just as a normal client. diff --git a/commands/client-pause.md b/commands/client-pause.md index 6f778da1e7..d7c381f9c0 100644 --- a/commands/client-pause.md +++ b/commands/client-pause.md @@ -35,10 +35,6 @@ Since Redis 3.2.10 / 4.0.0, this command also prevents keys to be evicted or expired during the time clients are paused. This way the dataset is guaranteed to be static not just from the point of view of clients not being able to write, but also from the point of view of internal operations. -@return - -@simple-string-reply: The command returns OK or an error if the timeout is invalid. - ## Behavior change history * `>= 3.2.0`: Client pause prevents client pause and key eviction as well. \ No newline at end of file diff --git a/commands/client-reply.md b/commands/client-reply.md index f2c3ed8b42..63608e60a9 100644 --- a/commands/client-reply.md +++ b/commands/client-reply.md @@ -5,9 +5,3 @@ The `CLIENT REPLY` command controls whether the server will reply the client's c * `ON`. This is the default mode in which the server returns a reply to every command. * `OFF`. In this mode the server will not reply to client commands. * `SKIP`. This mode skips the reply of command immediately after it. - -@return - -When called with either `OFF` or `SKIP` subcommands, no reply is made. When called with `ON`: - -@simple-string-reply: `OK`. diff --git a/commands/client-setinfo.md b/commands/client-setinfo.md new file mode 100644 index 0000000000..64a66f3734 --- /dev/null +++ b/commands/client-setinfo.md @@ -0,0 +1,12 @@ +The `CLIENT SETINFO` command assigns various info attributes to the current connection which are displayed in the output of `CLIENT LIST` and `CLIENT INFO`. + +Client libraries are expected to pipeline this command after authentication on all connections +and ignore failures since they could be connected to an older version that doesn't support them. + +Currently the supported attributes are: +* `lib-name` - meant to hold the name of the client library that's in use. +* `lib-ver` - meant to hold the client library's version. + +There is no limit to the length of these attributes. However it is not possible to use spaces, newlines, or other non-printable characters that would violate the format of the `CLIENT LIST` reply. + +Note that these attributes are **not** cleared by the RESET command. diff --git a/commands/client-setname.md b/commands/client-setname.md index c1e70af259..f0a147921b 100644 --- a/commands/client-setname.md +++ b/commands/client-setname.md @@ -13,7 +13,3 @@ The connection name can be inspected using `CLIENT GETNAME`. Every new connection starts without an assigned name. Tip: setting names to connections is a good way to debug connection leaks due to bugs in the application using Redis. - -@return - -@simple-string-reply: `OK` if the connection name was successfully set. diff --git a/commands/client-tracking.md b/commands/client-tracking.md index e77f7d907e..503d4dca62 100644 --- a/commands/client-tracking.md +++ b/commands/client-tracking.md @@ -27,7 +27,3 @@ command when enabling tracking: * `OPTIN`: when broadcasting is NOT active, normally don't track keys in read only commands, unless they are called immediately after a `CLIENT CACHING yes` command. * `OPTOUT`: when broadcasting is NOT active, normally track keys in read only commands, unless they are called immediately after a `CLIENT CACHING no` command. * `NOLOOP`: don't send notifications about keys modified by this connection itself. - -@return - -@simple-string-reply: `OK` if the connection was successfully put in tracking mode or if the tracking mode was successfully disabled. Otherwise an error is returned. diff --git a/commands/client-trackinginfo.md b/commands/client-trackinginfo.md index 82de43e255..6cc0df14ab 100644 --- a/commands/client-trackinginfo.md +++ b/commands/client-trackinginfo.md @@ -1,8 +1,6 @@ The command returns information about the current client connection's use of the [server assisted client side caching](/topics/client-side-caching) feature. -@return - -@array-reply: a list of tracking information sections and their respective values, specifically: +Here's the list of tracking information sections and their respective values: * **flags**: A list of tracking flags used by the connection. The flags and their meanings are as follows: * `off`: The connection isn't using server assisted client side caching. diff --git a/commands/client-unblock.md b/commands/client-unblock.md index 11dff98ce7..36e70b2924 100644 --- a/commands/client-unblock.md +++ b/commands/client-unblock.md @@ -49,10 +49,3 @@ NULL > BRPOP key1 key2 key3 key4 0 (client is blocked again) ``` - -@return - -@integer-reply, specifically: - -* `1` if the client was unblocked successfully. -* `0` if the client wasn't unblocked. diff --git a/commands/client-unpause.md b/commands/client-unpause.md index c43848522f..9878ee1b3c 100644 --- a/commands/client-unpause.md +++ b/commands/client-unpause.md @@ -1,5 +1 @@ `CLIENT UNPAUSE` is used to resume command processing for all clients that were paused by `CLIENT PAUSE`. - -@return - -@simple-string-reply: The command returns `OK` diff --git a/commands/cluster-addslots.md b/commands/cluster-addslots.md index 060406661d..101f671506 100644 --- a/commands/cluster-addslots.md +++ b/commands/cluster-addslots.md @@ -29,7 +29,7 @@ are already assigned: This command only works in cluster mode and is useful in the following Redis Cluster operations: -1. To create a new cluster ADDSLOTS is used in order to initially setup master nodes splitting the available hash slots among them. +1. To create a new `cluster ADDSLOTS` is used in order to initially setup master nodes splitting the available hash slots among them. 2. In order to fix a broken cluster where certain slots are unassigned. ## Information about slots propagation and warnings @@ -45,7 +45,3 @@ This means that this command should be used with care only by applications orchestrating Redis Cluster, like `redis-cli`, and the command if used out of the right context can leave the cluster in a wrong state or cause data loss. - -@return - -@simple-string-reply: `OK` if the command was successful. Otherwise an error is returned. diff --git a/commands/cluster-addslotsrange.md b/commands/cluster-addslotsrange.md index a00e23f709..1fc1cd3d8b 100644 --- a/commands/cluster-addslotsrange.md +++ b/commands/cluster-addslotsrange.md @@ -1,15 +1,15 @@ The `CLUSTER ADDSLOTSRANGE` is similar to the `CLUSTER ADDSLOTS` command in that they both assign hash slots to nodes. -The difference between the two commands is that `ADDSLOTS` takes a list of slots to assign to the node, while `ADDSLOTSRANGE` takes a list of slot ranges (specified by start and end slots) to assign to the node. +The difference between the two commands is that `CLUSTER ADDSLOTS` takes a list of slots to assign to the node, while `CLUSTER ADDSLOTSRANGE` takes a list of slot ranges (specified by start and end slots) to assign to the node. ## Example -To assign slots 1 2 3 4 5 to the node, the `ADDSLOTS` command is: +To assign slots 1 2 3 4 5 to the node, the `CLUSTER ADDSLOTS` command is: > CLUSTER ADDSLOTS 1 2 3 4 5 OK -The same operation can be completed with the following `ADDSLOTSRANGE` command: +The same operation can be completed with the following `CLUSTER ADDSLOTSRANGE` command: > CLUSTER ADDSLOTSRANGE 1 5 OK @@ -19,9 +19,5 @@ The same operation can be completed with the following `ADDSLOTSRANGE` command: This command only works in cluster mode and is useful in the following Redis Cluster operations: -1. To create a new cluster ADDSLOTSRANGE is used in order to initially setup master nodes splitting the available hash slots among them. +1. To create a new cluster, `CLUSTER ADDSLOTSRANGE` is used to initially set up master nodes splitting the available hash slots among them. 2. In order to fix a broken cluster where certain slots are unassigned. - -@return - -@simple-string-reply: `OK` if the command was successful. Otherwise an error is returned. diff --git a/commands/cluster-bumpepoch.md b/commands/cluster-bumpepoch.md index b05694a442..68bce48797 100644 --- a/commands/cluster-bumpepoch.md +++ b/commands/cluster-bumpepoch.md @@ -3,7 +3,3 @@ Advances the cluster config epoch. The `CLUSTER BUMPEPOCH` command triggers an increment to the cluster's config epoch from the connected node. The epoch will be incremented if the node's config epoch is zero, or if it is less than the cluster's greatest epoch. **Note:** config epoch management is performed internally by the cluster, and relies on obtaining a consensus of nodes. The `CLUSTER BUMPEPOCH` attempts to increment the config epoch **WITHOUT** getting the consensus, so using it may violate the "last failover wins" rule. Use it with caution. - -@return - -@simple-string-reply: `BUMPED` if the epoch was incremented, or `STILL` if the node already has the greatest config epoch in the cluster. diff --git a/commands/cluster-count-failure-reports.md b/commands/cluster-count-failure-reports.md index ac1ef71c0e..399a6bc000 100644 --- a/commands/cluster-count-failure-reports.md +++ b/commands/cluster-count-failure-reports.md @@ -16,7 +16,3 @@ This command returns the number of failure reports for the current node which ar This command is mainly useful for debugging, when the failure detector of Redis Cluster is not operating as we believe it should. - -@return - -@integer-reply: the number of active failure reports for the node. diff --git a/commands/cluster-countkeysinslot.md b/commands/cluster-countkeysinslot.md index 0bffec84b5..2a6e6af4f8 100644 --- a/commands/cluster-countkeysinslot.md +++ b/commands/cluster-countkeysinslot.md @@ -7,7 +7,3 @@ zero being returned. > CLUSTER COUNTKEYSINSLOT 7000 (integer) 50341 ``` - -@return - -@integer-reply: The number of keys in the specified hash slot, or an error if the hash slot is invalid. diff --git a/commands/cluster-delslots.md b/commands/cluster-delslots.md index 77204e1e95..4fdb4a5180 100644 --- a/commands/cluster-delslots.md +++ b/commands/cluster-delslots.md @@ -41,8 +41,3 @@ This command only works in cluster mode and may be useful for debugging and in order to manually orchestrate a cluster configuration when a new cluster is created. It is currently not used by `redis-cli`, and mainly exists for API completeness. - -@return - -@simple-string-reply: `OK` if the command was successful. Otherwise -an error is returned. diff --git a/commands/cluster-delslotsrange.md b/commands/cluster-delslotsrange.md index e4c1f2b89d..af902ff586 100644 --- a/commands/cluster-delslotsrange.md +++ b/commands/cluster-delslotsrange.md @@ -25,8 +25,3 @@ This command only works in cluster mode and may be useful for debugging and in order to manually orchestrate a cluster configuration when a new cluster is created. It is currently not used by `redis-cli`, and mainly exists for API completeness. - -@return - -@simple-string-reply: `OK` if the command was successful. Otherwise -an error is returned. diff --git a/commands/cluster-failover.md b/commands/cluster-failover.md index 911eaea894..85834cba67 100644 --- a/commands/cluster-failover.md +++ b/commands/cluster-failover.md @@ -61,7 +61,3 @@ Because of this the **TAKEOVER** option should be used with care. To check that the masters are aware of a new replica, you can send `CLUSTER NODES` or `CLUSTER REPLICAS` to each of the master nodes and check that it appears as a replica, before sending `CLUSTER FAILOVER` to the replica. * To check that the failover has actually happened you can use `ROLE`, `INFO REPLICATION` (which indicates "role:master" after successful failover), or `CLUSTER NODES` to verify that the state of the cluster has changed sometime after the command was sent. * To check if the failover has failed, check the replica's log for "Manual failover timed out", which is logged if the replica has given up after a few seconds. - -@return - -@simple-string-reply: `OK` if the command was accepted and a manual failover is going to be attempted. An error if the operation cannot be executed, for example if we are talking with a node which is already a master. diff --git a/commands/cluster-flushslots.md b/commands/cluster-flushslots.md index b0b3fdfba6..0c8984ff23 100644 --- a/commands/cluster-flushslots.md +++ b/commands/cluster-flushslots.md @@ -1,7 +1,3 @@ Deletes all slots from a node. The `CLUSTER FLUSHSLOTS` deletes all information about slots from the connected node. It can only be called when the database is empty. - -@return - -@simple-string-reply: `OK` diff --git a/commands/cluster-forget.md b/commands/cluster-forget.md index 6bff5061fe..afc06414df 100644 --- a/commands/cluster-forget.md +++ b/commands/cluster-forget.md @@ -51,7 +51,3 @@ The command does not succeed and returns an error in the following cases: 1. The specified node ID is not found in the nodes table. 2. The node receiving the command is a replica, and the specified node ID identifies its current master. 3. The node ID identifies the same node we are sending the command to. - -@return - -@simple-string-reply: `OK` if the command was executed successfully, otherwise an error is returned. diff --git a/commands/cluster-getkeysinslot.md b/commands/cluster-getkeysinslot.md index 120bf4412d..dec113df05 100644 --- a/commands/cluster-getkeysinslot.md +++ b/commands/cluster-getkeysinslot.md @@ -14,7 +14,3 @@ of the `CLUSTER SETSLOT` command documentation. 2) "key_89793" 3) "key_92937" ``` - -@return - -@array-reply: From 0 to *count* key names in a Redis array reply. diff --git a/commands/cluster-help.md b/commands/cluster-help.md index 3b1e159006..85579bd446 100644 --- a/commands/cluster-help.md +++ b/commands/cluster-help.md @@ -1,5 +1 @@ The `CLUSTER HELP` command returns a helpful text describing the different subcommands. - -@return - -@array-reply: a list of subcommands and their descriptions diff --git a/commands/cluster-info.md b/commands/cluster-info.md index 372cc73906..3f49280aca 100644 --- a/commands/cluster-info.md +++ b/commands/cluster-info.md @@ -46,7 +46,3 @@ Here are the explanation of these fields: * `cluster_stats_messages_publishshard_sent` and `cluster_stats_messages_publishshard_received`: Pub/Sub Publish shard propagation, see [Sharded Pubsub](/topics/pubsub#sharded-pubsub). More information about the Current Epoch and Config Epoch variables are available in the [Redis Cluster specification document](/topics/cluster-spec#cluster-current-epoch). - -@return - -@bulk-string-reply: A map between named fields and values in the form of `:` lines separated by newlines composed by the two bytes `CRLF`. diff --git a/commands/cluster-keyslot.md b/commands/cluster-keyslot.md index 7e0358785c..1556874255 100644 --- a/commands/cluster-keyslot.md +++ b/commands/cluster-keyslot.md @@ -18,7 +18,3 @@ Example use cases for this command: ``` Note that the command implements the full hashing algorithm, including support for **hash tags**, that is the special property of Redis Cluster key hashing algorithm, of hashing just what is between `{` and `}` if such a pattern is found inside the key name, in order to force multiple keys to be handled by the same node. - -@return - -@integer-reply: The hash slot number. diff --git a/commands/cluster-links.md b/commands/cluster-links.md index 7b3376298c..12e7630e68 100644 --- a/commands/cluster-links.md +++ b/commands/cluster-links.md @@ -42,7 +42,3 @@ Each map is composed of the following attributes of the corresponding cluster li 4. `events`: Events currently registered for the link. `r` means readable event, `w` means writable event. 5. `send-buffer-allocated`: Allocated size of the link's send buffer, which is used to buffer outgoing messages toward the peer. 6. `send-buffer-used`: Size of the portion of the link's send buffer that is currently holding data(messages). - -@return - -@array-reply: An array of maps where each map contains various attributes and their values of a cluster link. diff --git a/commands/cluster-meet.md b/commands/cluster-meet.md index b33c9fb749..0ce7c212f6 100644 --- a/commands/cluster-meet.md +++ b/commands/cluster-meet.md @@ -36,7 +36,3 @@ the node to force the receiver to accept it as a trusted node, it sends a `MEET` packet instead of a `PING` packet. The two packets have exactly the same format, but the former forces the receiver to acknowledge the node as trusted. - -@return - -@simple-string-reply: `OK` if the command was successful. If the address or port specified are invalid an error is returned. diff --git a/commands/cluster-myid.md b/commands/cluster-myid.md index 02e8b1d3b6..594c28b3f4 100644 --- a/commands/cluster-myid.md +++ b/commands/cluster-myid.md @@ -1,7 +1,3 @@ Returns the node's id. The `CLUSTER MYID` command returns the unique, auto-generated identifier that is associated with the connected cluster node. - -@return - -@bulk-string-reply: The node id. \ No newline at end of file diff --git a/commands/cluster-myshardid.md b/commands/cluster-myshardid.md new file mode 100644 index 0000000000..6ffb3c5b9b --- /dev/null +++ b/commands/cluster-myshardid.md @@ -0,0 +1,3 @@ +Returns the node's shard id. + +The `CLUSTER MYSHARDID` command returns the unique, auto-generated identifier that is associated with the shard to which the connected cluster node belongs. diff --git a/commands/cluster-nodes.md b/commands/cluster-nodes.md index 2ec706c580..a802fe7cff 100644 --- a/commands/cluster-nodes.md +++ b/commands/cluster-nodes.md @@ -17,40 +17,42 @@ It is also used by `redis-cli` in order to manage a cluster. ## Serialization format The output of the command is just a space-separated CSV string, where -each line represents a node in the cluster. The following is an example -of output: +each line represents a node in the cluster. The following +is an example of output on Redis 7.2.0. ``` -07c37dfeb235213a872192d90877d0cd55635b91 127.0.0.1:30004@31004 slave e7d1eecce10fd6bb5eb35b9f99a514335d9ba9ca 0 1426238317239 4 connected -67ed2db8d677e59ec4a4cefb06858cf2a1a89fa1 127.0.0.1:30002@31002 master - 0 1426238316232 2 connected 5461-10922 -292f8b365bb7edb5e285caf0b7e6ddc7265d2f4f 127.0.0.1:30003@31003 master - 0 1426238318243 3 connected 10923-16383 -6ec23923021cf3ffec47632106199cb7f496ce01 127.0.0.1:30005@31005 slave 67ed2db8d677e59ec4a4cefb06858cf2a1a89fa1 0 1426238316232 5 connected -824fe116063bc5fcf9f4ffd895bc17aee7731ac3 127.0.0.1:30006@31006 slave 292f8b365bb7edb5e285caf0b7e6ddc7265d2f4f 0 1426238317741 6 connected -e7d1eecce10fd6bb5eb35b9f99a514335d9ba9ca 127.0.0.1:30001@31001 myself,master - 0 0 1 connected 0-5460 +07c37dfeb235213a872192d90877d0cd55635b91 127.0.0.1:30004@31004,hostname4 slave e7d1eecce10fd6bb5eb35b9f99a514335d9ba9ca 0 1426238317239 4 connected +67ed2db8d677e59ec4a4cefb06858cf2a1a89fa1 127.0.0.1:30002@31002,hostname2 master - 0 1426238316232 2 connected 5461-10922 +292f8b365bb7edb5e285caf0b7e6ddc7265d2f4f 127.0.0.1:30003@31003,hostname3 master - 0 1426238318243 3 connected 10923-16383 +6ec23923021cf3ffec47632106199cb7f496ce01 127.0.0.1:30005@31005,hostname5 slave 67ed2db8d677e59ec4a4cefb06858cf2a1a89fa1 0 1426238316232 5 connected +824fe116063bc5fcf9f4ffd895bc17aee7731ac3 127.0.0.1:30006@31006,hostname6 slave 292f8b365bb7edb5e285caf0b7e6ddc7265d2f4f 0 1426238317741 6 connected +e7d1eecce10fd6bb5eb35b9f99a514335d9ba9ca 127.0.0.1:30001@31001,hostname1 myself,master - 0 0 1 connected 0-5460 ``` Each line is composed of the following fields: ``` - ... + ... ``` -The meaning of each filed is the following: +The meaning of each field is the following: -1. `id`: The node ID, a 40 characters random string generated when a node is created and never changed again (unless `CLUSTER RESET HARD` is used). -2. `ip:port@cport`: The node address where clients should contact the node to run queries. -3. `flags`: A list of comma separated flags: `myself`, `master`, `slave`, `fail?`, `fail`, `handshake`, `noaddr`, `nofailover`, `noflags`. Flags are explained in detail in the next section. -4. `master`: If the node is a replica, and the master is known, the master node ID, otherwise the "-" character. -5. `ping-sent`: Milliseconds unix time at which the currently active ping was sent, or zero if there are no pending pings. -6. `pong-recv`: Milliseconds unix time the last pong was received. -7. `config-epoch`: The configuration epoch (or version) of the current node (or of the current master if the node is a replica). Each time there is a failover, a new, unique, monotonically increasing configuration epoch is created. If multiple nodes claim to serve the same hash slots, the one with higher configuration epoch wins. -8. `link-state`: The state of the link used for the node-to-node cluster bus. We use this link to communicate with the node. Can be `connected` or `disconnected`. -9. `slot`: A hash slot number or range. Starting from argument number 9, but there may be up to 16384 entries in total (limit never reached). This is the list of hash slots served by this node. If the entry is just a number, is parsed as such. If it is a range, it is in the form `start-end`, and means that the node is responsible for all the hash slots from `start` to `end` including the start and end values. +1. `id`: The node ID, a 40-character globally unique string generated when a node is created and never changed again (unless `CLUSTER RESET HARD` is used). +2. `ip:port@cport`: The node address that clients should contact to run queries, along with the used cluster bus port. + `:0@0` can be expected when the address is no longer known for this node ID, hence flagged with `noaddr`. +3. `hostname`: A human readable string that can be configured via the `cluster-annouce-hostname` setting. The max length of the string is 256 characters, excluding the null terminator. The name can contain ASCII alphanumeric characters, '-', and '.' only. +5. `flags`: A list of comma separated flags: `myself`, `master`, `slave`, `fail?`, `fail`, `handshake`, `noaddr`, `nofailover`, `noflags`. Flags are explained below. +6. `master`: If the node is a replica, and the primary is known, the primary node ID, otherwise the "-" character. +7. `ping-sent`: Unix time at which the currently active ping was sent, or zero if there are no pending pings, in milliseconds. +8. `pong-recv`: Unix time the last pong was received, in milliseconds. +9. `config-epoch`: The configuration epoch (or version) of the current node (or of the current primary if the node is a replica). Each time there is a failover, a new, unique, monotonically increasing configuration epoch is created. If multiple nodes claim to serve the same hash slots, the one with the higher configuration epoch wins. +10. `link-state`: The state of the link used for the node-to-node cluster bus. Use this link to communicate with the node. Can be `connected` or `disconnected`. +11. `slot`: A hash slot number or range. Starting from argument number 9, but there may be up to 16384 entries in total (limit never reached). This is the list of hash slots served by this node. If the entry is just a number, it is parsed as such. If it is a range, it is in the form `start-end`, and means that the node is responsible for all the hash slots from `start` to `end` including the start and end values. -Meaning of the flags (field number 3): +Flags are: * `myself`: The node you are contacting. -* `master`: Node is a master. +* `master`: Node is a primary. * `slave`: Node is a replica. * `fail?`: Node is in `PFAIL` state. Not reachable for the node you are contacting, but still logically reachable (not in `FAIL` state). * `fail`: Node is in `FAIL` state. It was not reachable for multiple nodes that promoted the `PFAIL` state to `FAIL`. @@ -61,13 +63,13 @@ Meaning of the flags (field number 3): ## Notes on published config epochs -Replicas broadcast their master's config epochs (in order to get an `UPDATE` +Replicas broadcast their primary's config epochs (in order to get an `UPDATE` message if they are found to be stale), so the real config epoch of the replica (which is meaningless more or less, since they don't serve hash slots) can be only obtained checking the node flagged as `myself`, which is the entry of the node we are asking to generate `CLUSTER NODES` output. The other replicas epochs reflect what they publish in heartbeat packets, which is, the -configuration epoch of the masters they are currently replicating. +configuration epoch of the primaries they are currently replicating. ## Special slot entries @@ -103,8 +105,4 @@ Note that: 1. Migration and importing slots are only added to the node flagged as `myself`. This information is local to a node, for its own slots. 2. Importing and migrating slots are provided as **additional info**. If the node has a given hash slot assigned, it will be also a plain number in the list of hash slots, so clients that don't have a clue about hash slots migrations can just skip this special fields. -@return - -@bulk-string-reply: The serialized cluster configuration. - **A note about the word slave used in this man page and command name**: Starting with Redis 5, if not for backward compatibility, the Redis project no longer uses the word slave. Unfortunately in this command the word slave is part of the protocol, so we'll be able to remove such occurrences only when this API will be naturally deprecated. diff --git a/commands/cluster-replicas.md b/commands/cluster-replicas.md index 4e6192e117..6d0e63708e 100644 --- a/commands/cluster-replicas.md +++ b/commands/cluster-replicas.md @@ -9,7 +9,3 @@ and we ask `CLUSTER REPLICAS` to a node that has not yet received the configuration update, it may show stale information. However eventually (in a matter of seconds if there are no network partitions) all the nodes will agree about the set of nodes associated with a given master. - -@return - -The command returns data in the same format as `CLUSTER NODES`. diff --git a/commands/cluster-replicate.md b/commands/cluster-replicate.md index 5b403aaa8b..9d3c36d280 100644 --- a/commands/cluster-replicate.md +++ b/commands/cluster-replicate.md @@ -20,7 +20,3 @@ only if the following additional conditions are met: 2. The node is empty, no keys are stored at all in the key space. If the command succeeds the new replica will immediately try to contact its master in order to replicate from it. - -@return - -@simple-string-reply: `OK` if the command was executed successfully, otherwise an error is returned. diff --git a/commands/cluster-reset.md b/commands/cluster-reset.md index 02ffe9eb95..1d76229342 100644 --- a/commands/cluster-reset.md +++ b/commands/cluster-reset.md @@ -19,7 +19,3 @@ is also extensively used by the Redis Cluster testing framework in order to reset the state of the cluster every time a new test unit is executed. If no reset type is specified, the default is **soft**. - -@return - -@simple-string-reply: `OK` if the command was successful. Otherwise an error is returned. diff --git a/commands/cluster-saveconfig.md b/commands/cluster-saveconfig.md index 31308c2028..3f23701963 100644 --- a/commands/cluster-saveconfig.md +++ b/commands/cluster-saveconfig.md @@ -9,7 +9,3 @@ configuration via the `CLUSTER` command in order to ensure the new configuration is persisted on disk, however all the commands should normally be able to auto schedule to persist the configuration on disk when it is important to do so for the correctness of the system in the event of a restart. - -@return - -@simple-string-reply: `OK` or an error if the operation fails. diff --git a/commands/cluster-set-config-epoch.md b/commands/cluster-set-config-epoch.md index 71f458f33e..5eb7fca262 100644 --- a/commands/cluster-set-config-epoch.md +++ b/commands/cluster-set-config-epoch.md @@ -19,7 +19,3 @@ configuration epoch. So, using `CLUSTER SET-CONFIG-EPOCH`, when a new cluster is created, we can assign a different progressive configuration epoch to each node before joining the cluster together. - -@return - -@simple-string-reply: `OK` if the command was executed successfully, otherwise an error is returned. diff --git a/commands/cluster-setslot.md b/commands/cluster-setslot.md index e712d36ba6..ab32c6772c 100644 --- a/commands/cluster-setslot.md +++ b/commands/cluster-setslot.md @@ -60,10 +60,6 @@ command: It is important to note that step 3 is the only time when a Redis Cluster node will create a new config epoch without agreement from other nodes. This only happens when a manual configuration is operated. However it is impossible that this creates a non-transient setup where two nodes have the same config epoch, since Redis Cluster uses a config epoch collision resolution algorithm. -@return - -@simple-string-reply: All the subcommands return `OK` if the command was successful. Otherwise an error is returned. - ## Redis Cluster live resharding explained The `CLUSTER SETSLOT` command is an important piece used by Redis Cluster in order to migrate all the keys contained in one hash slot from one node to another. This is how the migration is orchestrated, with the help of other commands as well. We'll call the node that has the current ownership of the hash slot the `source` node, and the node where we want to migrate the `destination` node. diff --git a/commands/cluster-shards.md b/commands/cluster-shards.md index bca6d1c13f..a6989a3910 100644 --- a/commands/cluster-shards.md +++ b/commands/cluster-shards.md @@ -14,7 +14,7 @@ The command returns an array of shards, with each shard containing two fields, ' The 'slots' field is a list of slot ranges served by this shard, stored as pair of integers representing the inclusive start and end slots of the ranges. For example, if a node owns the slots 1, 2, 3, 5, 7, 8 and 9, the slots ranges would be stored as [1-3], [5-5], [7-9]. -The slots field would therefor be represented by the following list of integers. +The slots field would therefore be represented by the following list of integers. ``` 1) 1) "slots" @@ -45,109 +45,109 @@ The endpoint, along with the port, defines the location that clients should use A NULL value for the endpoint indicates the node has an unknown endpoint and the client should connect to the same endpoint it used to send the `CLUSTER SHARDS` command but with the port returned from the command. This unknown endpoint configuration is useful when the Redis nodes are behind a load balancer that Redis doesn't know the endpoint of. Which endpoint is set is determined by the `cluster-preferred-endpoint-type` config. - -@return - -@array-reply: nested list of a map of hash ranges and shard nodes. +An empty string `""` is another abnormal value of the endpoint field, as well as for the ip field, which is returned if the node doesn't know its own IP address. +This can happen in a cluster that consists of only one node or the node has not yet been joined with the rest of the cluster. +The value `?` is displayed if the node is incorrectly configured to use announced hostnames but no hostname is configured using `cluster-announce-hostname`. +Clients may treat the empty string in the same way as NULL, that is the same endpoint it used to send the current command to, while `"?"` should be treated as an unknown node, not necessarily the same node as the one serving the current command. @examples ``` > CLUSTER SHARDS 1) 1) "slots" - 2) 1) (integer) 10923 - 2) (integer) 11110 - 3) (integer) 11113 - 4) (integer) 16111 - 5) (integer) 16113 - 6) (integer) 16383 + 2) 1) (integer) 0 + 2) (integer) 5460 3) "nodes" 4) 1) 1) "id" - 2) "71f058078c142a73b94767a4e78e9033d195dfb4" + 2) "e10b7051d6bf2d5febd39a2be297bbaea6084111" 3) "port" - 4) (integer) 6381 + 4) (integer) 30001 5) "ip" 6) "127.0.0.1" - 7) "role" - 8) "primary" - 9) "replication-offset" - 10) (integer) 1500 - 11) "health" - 12) "online" + 7) "endpoint" + 8) "127.0.0.1" + 9) "role" + 10) "master" + 11) "replication-offset" + 12) (integer) 72156 + 13) "health" + 14) "online" 2) 1) "id" - 2) "1461967c62eab0e821ed54f2c98e594fccfd8736" + 2) "1901f5962d865341e81c85f9f596b1e7160c35ce" 3) "port" - 4) (integer) 7381 + 4) (integer) 30006 5) "ip" 6) "127.0.0.1" - 7) "role" - 8) "replica" - 9) "replication-offset" - 10) (integer) 700 - 11) "health" - 12) "fail" + 7) "endpoint" + 8) "127.0.0.1" + 9) "role" + 10) "replica" + 11) "replication-offset" + 12) (integer) 72156 + 13) "health" + 14) "online" 2) 1) "slots" - 2) 1) (integer) 5461 - 2) (integer) 10922 + 2) 1) (integer) 10923 + 2) (integer) 16383 3) "nodes" 4) 1) 1) "id" - 2) "9215e30cd4a71070088778080565de6ef75fd459" + 2) "fd20502fe1b32fc32c15b69b0a9537551f162f1f" 3) "port" - 4) (integer) 6380 + 4) (integer) 30003 5) "ip" 6) "127.0.0.1" - 7) "role" - 8) "primary" - 9) "replication-offset" - 10) (integer) 1200 - 11) "health" - 12) "online" + 7) "endpoint" + 8) "127.0.0.1" + 9) "role" + 10) "master" + 11) "replication-offset" + 12) (integer) 72156 + 13) "health" + 14) "online" 2) 1) "id" - 2) "877fa59da72cb902d0563d3d8def3437fc3a0196" + 2) "6daa25c08025a0c7e4cc0d1ab255949ce6cee902" 3) "port" - 4) (integer) 7380 + 4) (integer) 30005 5) "ip" 6) "127.0.0.1" - 7) "role" - 8) "replica" - 9) "replication-offset" - 10) (integer) 1100 - 11) "health" - 12) "loading" + 7) "endpoint" + 8) "127.0.0.1" + 9) "role" + 10) "replica" + 11) "replication-offset" + 12) (integer) 72156 + 13) "health" + 14) "online" 3) 1) "slots" - 2) 1) (integer) 0 - 2) (integer) 5460 - 3) (integer) 11111 - 4) (integer) 11112 - 3) (integer) 16112 - 4) (integer) 16112 + 2) 1) (integer) 5461 + 2) (integer) 10922 3) "nodes" 4) 1) 1) "id" - 2) "b7e9acc0def782aabe6b596f67f06c73c2ffff93" + 2) "a4a3f445ead085eb3eb9ee7d8c644ec4481ec9be" 3) "port" - 4) (integer) 7379 + 4) (integer) 30002 5) "ip" 6) "127.0.0.1" - 7) "hostname" - 8) "example.com" + 7) "endpoint" + 8) "127.0.0.1" 9) "role" - 10) "replica" + 10) "master" 11) "replication-offset" - 12) "primary" + 12) (integer) 72156 13) "health" 14) "online" 2) 1) "id" - 2) "e2acf1a97c055fd09dcc2c0dcc62b19a6905dbc8" + 2) "da6d5847aa019e9b9d2a8aa24a75f856fd3456cc" 3) "port" - 4) (integer) 6379 + 4) (integer) 30004 5) "ip" 6) "127.0.0.1" - 7) "hostname" - 8) "example.com" + 7) "endpoint" + 8) "127.0.0.1" 9) "role" 10) "replica" 11) "replication-offset" - 12) (integer) 0 + 12) (integer) 72156 13) "health" - 14) "loading" -``` \ No newline at end of file + 14) "online" +``` diff --git a/commands/cluster-slaves.md b/commands/cluster-slaves.md index d90eaf38c5..2f4f9628af 100644 --- a/commands/cluster-slaves.md +++ b/commands/cluster-slaves.md @@ -11,7 +11,3 @@ and we ask `CLUSTER SLAVES` to a node that has not yet received the configuration update, it may show stale information. However eventually (in a matter of seconds if there are no network partitions) all the nodes will agree about the set of nodes associated with a given master. - -@return - -The command returns data in the same format as `CLUSTER NODES`. diff --git a/commands/cluster-slots.md b/commands/cluster-slots.md index 68901fe81c..be07af62e8 100644 --- a/commands/cluster-slots.md +++ b/commands/cluster-slots.md @@ -12,6 +12,10 @@ The preferred endpoint, along with the port, defines the location that clients s A NULL value for the endpoint indicates the node has an unknown endpoint and the client should connect to the same endpoint it used to send the `CLUSTER SLOTS` command but with the port returned from the command. This unknown endpoint configuration is useful when the Redis nodes are behind a load balancer that Redis doesn't know the endpoint of. Which endpoint is set as preferred is determined by the `cluster-preferred-endpoint-type` config. +An empty string `""` is another abnormal value of the endpoint field, as well as for the ip field, which is returned if the node doesn't know its own IP address. +This can happen in a cluster that consists of only one node or the node has not yet been joined with the rest of the cluster. +The value `?` is displayed if the node is incorrectly configured to use announced hostnames but no hostname is configured using `cluster-announce-hostname`. +Clients may treat the empty string in the same way as NULL, that is the same endpoint it used to send the current command to, while `"?"` should be treated as an unknown node, not necessarily the same node as the one serving the current command. Additional networking metadata is provided as a map on the fourth argument for each node. The following networking metadata may be returned: @@ -37,12 +41,6 @@ All networking information after the third nested reply are replicas of the mast If a cluster instance has non-contiguous slots (e.g. 1-400,900,1800-6000) then master and replica networking information results will be duplicated for each top-level slot range reply. -@return - -@array-reply: nested list of slot ranges with networking information. - -@examples - ``` > CLUSTER SLOTS 1) 1) (integer) 0 @@ -89,4 +87,4 @@ Similarly a client library should try if possible to cope with the fact that old ## Behavior change history -* `>= 7.0.0`: Added support for hostnames and unknown endpoints in first field of node response. \ No newline at end of file +* `>= 7.0.0`: Added support for hostnames and unknown endpoints in first field of node response. diff --git a/commands/command-count.md b/commands/command-count.md index a198dd35ac..2eee36f22e 100644 --- a/commands/command-count.md +++ b/commands/command-count.md @@ -1,9 +1,5 @@ Returns @integer-reply of number of total commands in this Redis server. -@return - -@integer-reply: number of commands returned by `COMMAND` - @examples ```cli diff --git a/commands/command-docs.md b/commands/command-docs.md index 35ea017f2a..98943d73a7 100644 --- a/commands/command-docs.md +++ b/commands/command-docs.md @@ -35,7 +35,7 @@ The following keys may be included in the mapped reply: - _syscmd:_ a system command that isn't meant to be called by users. * **deprecated_since:** the Redis version that deprecated the command (or for module commands, the module version).. * **replaced_by:** the alternative for a deprecated command. -* **history:** an array of historical notes describing changes to the command's behavior or arguments. +* **history:** an array of historical notes describing changes to the command's output or arguments. It should not contain information about behavioral changes. Each entry is an array itself, made up of two elements: 1. The Redis version that the entry applies to. 2. The description of the change. @@ -44,10 +44,6 @@ The following keys may be included in the mapped reply: [td]: /topics/command-arguments -@return - -@array-reply: a map as a flattened array as described above. - @examples ```cli diff --git a/commands/command-getkeys.md b/commands/command-getkeys.md index 6b8f300253..6e9b756adf 100644 --- a/commands/command-getkeys.md +++ b/commands/command-getkeys.md @@ -7,11 +7,6 @@ from a full Redis command. but in some cases it's not possible to find keys of certain commands and then the entire command must be parsed to discover some / all key names. You can use `COMMAND GETKEYS` or `COMMAND GETKEYSANDFLAGS` to discover key names directly from how Redis parses the commands. - -@return - -@array-reply: list of keys from your command. - @examples ```cli diff --git a/commands/command-getkeysandflags.md b/commands/command-getkeysandflags.md index 3fa479d02d..0f83afa7db 100644 --- a/commands/command-getkeysandflags.md +++ b/commands/command-getkeysandflags.md @@ -8,11 +8,6 @@ You can use `COMMAND GETKEYS` or `COMMAND GETKEYSANDFLAGS` to discover key names Refer to [key specifications](/topics/key-specs#logical-operation-flags) for information about the meaning of the key flags. -@return - -@array-reply: list of keys from your command. -Each element of the array is an array containing key name in the first entry, and flags in the second. - @examples ```cli diff --git a/commands/command-help.md b/commands/command-help.md index 73d4cc4812..80aa033dea 100644 --- a/commands/command-help.md +++ b/commands/command-help.md @@ -1,5 +1 @@ The `COMMAND HELP` command returns a helpful text describing the different subcommands. - -@return - -@array-reply: a list of subcommands and their descriptions diff --git a/commands/command-info.md b/commands/command-info.md index e16a5550c7..568f70a47b 100644 --- a/commands/command-info.md +++ b/commands/command-info.md @@ -6,11 +6,6 @@ get returned. If you request details about non-existing commands, their return position will be nil. - -@return - -@array-reply: nested list of command details. - @examples ```cli diff --git a/commands/command-list.md b/commands/command-list.md index 5c0a4a772c..60b6981b43 100644 --- a/commands/command-list.md +++ b/commands/command-list.md @@ -3,9 +3,5 @@ Return an array of the server's command names. You can use the optional _FILTERBY_ modifier to apply one of the following filters: - **MODULE module-name**: get the commands that belong to the module specified by _module-name_. - - **ACLCAT category**: get the commands in the [ACL category](/docs/manual/security/acl/#command-categories) specified by _category_. + - **ACLCAT category**: get the commands in the [ACL category](/docs/management/security/acl/#command-categories) specified by _category_. - **PATTERN pattern**: get the commands that match the given glob-like _pattern_. - -@return - -@array-reply: a list of command names. diff --git a/commands/command.md b/commands/command.md index 37545f9bd9..9a66f6cd07 100644 --- a/commands/command.md +++ b/commands/command.md @@ -196,12 +196,6 @@ Each element in the array represents one subcommand and follows the same specifi [td]: /topics/key-specs [tr]: /topics/key-specs -@return - -@array-reply: a nested list of command details. - -The order of commands in the array is random. - @examples The following is `COMMAND`'s output for the `GET` command: diff --git a/commands/config-get.md b/commands/config-get.md index d2e85a38c7..312abd4137 100644 --- a/commands/config-get.md +++ b/commands/config-get.md @@ -38,8 +38,3 @@ configuration parameter used in the [redis.conf][hgcarr22rc] file: Note that you should look at the redis.conf file relevant to the version you're working with as configuration options might change between versions. The link above is to the latest development version. - - -@return - -The return type of the command is a @array-reply. diff --git a/commands/config-help.md b/commands/config-help.md index 5f8bc48248..b45bebd154 100644 --- a/commands/config-help.md +++ b/commands/config-help.md @@ -1,5 +1 @@ The `CONFIG HELP` command returns a helpful text describing the different subcommands. - -@return - -@array-reply: a list of subcommands and their descriptions diff --git a/commands/config-resetstat.md b/commands/config-resetstat.md index cb0232b4e7..0c8789e7d0 100644 --- a/commands/config-resetstat.md +++ b/commands/config-resetstat.md @@ -1,16 +1,10 @@ -Resets the statistics reported by Redis using the `INFO` command. +Resets the statistics reported by Redis using the `INFO` and `LATENCY HISTOGRAM` commands. -These are the counters that are reset: +The following is a non-exhaustive list of values that are reset: -* Keyspace hits -* Keyspace misses -* Number of commands processed -* Number of connections received +* Keyspace hits and misses * Number of expired keys -* Number of rejected connections -* Latest fork(2) time -* The `aof_delayed_fsync` counter - -@return - -@simple-string-reply: always `OK`. +* Command and error statistics +* Connections received, rejected and evicted +* Persistence statistics +* Active defragmentation statistics diff --git a/commands/config-rewrite.md b/commands/config-rewrite.md index c1031561bf..f4714975fd 100644 --- a/commands/config-rewrite.md +++ b/commands/config-rewrite.md @@ -13,8 +13,3 @@ CONFIG REWRITE is also able to rewrite the configuration file from scratch if th ## Atomic rewrite process In order to make sure the redis.conf file is always consistent, that is, on errors or crashes you always end with the old file, or the new one, the rewrite is performed with a single `write(2)` call that has enough content to be at least as big as the old file. Sometimes additional padding in the form of comments is added in order to make sure the resulting file is big enough, and later the file gets truncated to remove the padding at the end. - -@return - -@simple-string-reply: `OK` when the configuration was rewritten properly. -Otherwise an error is returned. diff --git a/commands/config-set.md b/commands/config-set.md index 4b0841e049..02576fadc4 100644 --- a/commands/config-set.md +++ b/commands/config-set.md @@ -34,8 +34,3 @@ Redis server that started with AOF turned on since the start. You can have both the AOF enabled with RDB snapshotting if you want, the two options are not mutually exclusive. - -@return - -@simple-string-reply: `OK` when the configuration was set properly. -Otherwise an error is returned. diff --git a/commands/copy.md b/commands/copy.md index 2803d2a00c..247c6c6478 100644 --- a/commands/copy.md +++ b/commands/copy.md @@ -5,20 +5,13 @@ By default, the `destination` key is created in the logical database used by the connection. The `DB` option allows specifying an alternative logical database index for the destination key. -The command returns an error when the `destination` key already exists. The +The command returns zero when the `destination` key already exists. The `REPLACE` option removes the `destination` key before copying the value to it. -@return - -@integer-reply, specifically: - -* `1` if `source` was copied. -* `0` if `source` was not copied. - @examples ``` SET dolly "sheep" COPY dolly clone GET clone -``` \ No newline at end of file +``` diff --git a/commands/dbsize.md b/commands/dbsize.md index fe82aa78cb..7aa2fb857e 100644 --- a/commands/dbsize.md +++ b/commands/dbsize.md @@ -1,5 +1 @@ Return the number of keys in the currently-selected database. - -@return - -@integer-reply diff --git a/commands/decr.md b/commands/decr.md index cda121a932..ca6150a40e 100644 --- a/commands/decr.md +++ b/commands/decr.md @@ -6,10 +6,6 @@ This operation is limited to **64 bit signed integers**. See `INCR` for extra information on increment/decrement operations. -@return - -@integer-reply: the value of `key` after the decrement - @examples ```cli diff --git a/commands/decrby.md b/commands/decrby.md index b2e823b7e6..b0b4ebadb3 100644 --- a/commands/decrby.md +++ b/commands/decrby.md @@ -6,10 +6,6 @@ This operation is limited to 64 bit signed integers. See `INCR` for extra information on increment/decrement operations. -@return - -@integer-reply: the value of `key` after the decrement - @examples ```cli diff --git a/commands/del.md b/commands/del.md index d5fcbaced5..b20b1e863f 100644 --- a/commands/del.md +++ b/commands/del.md @@ -1,10 +1,6 @@ Removes the specified keys. A key is ignored if it does not exist. -@return - -@integer-reply: The number of keys that were removed. - @examples ```cli diff --git a/commands/discard.md b/commands/discard.md index d84b50331c..a4064ddd74 100644 --- a/commands/discard.md +++ b/commands/discard.md @@ -4,7 +4,3 @@ connection state to normal. [tt]: /topics/transactions If `WATCH` was used, `DISCARD` unwatches all keys watched by the connection. - -@return - -@simple-string-reply: always `OK`. diff --git a/commands/dump.md b/commands/dump.md index d74003314e..e06b501911 100644 --- a/commands/dump.md +++ b/commands/dump.md @@ -21,13 +21,11 @@ should be used. If `key` does not exist a nil bulk reply is returned. -@return - -@bulk-string-reply: the serialized value. - @examples -```cli -SET mykey 10 -DUMP mykey +``` +> SET mykey 10 +OK +> DUMP mykey +"\x00\xc0\n\n\x00n\x9fWE\x0e\xaec\xbb" ``` diff --git a/commands/echo.md b/commands/echo.md index 642d0f3a1d..e158e8910d 100644 --- a/commands/echo.md +++ b/commands/echo.md @@ -1,9 +1,5 @@ Returns `message`. -@return - -@bulk-string-reply - @examples ```cli diff --git a/commands/eval.md b/commands/eval.md index 079edeb001..4dd5c8f452 100644 --- a/commands/eval.md +++ b/commands/eval.md @@ -12,6 +12,12 @@ to ensure the correct execution of scripts, both in standalone and clustered dep The script **should only** access keys whose names are given as input arguments. Scripts **should never** access keys with programmatically-generated names or based on the contents of data structures stored in the database. +**Note:** +in some cases, users will abuse Lua EVAL by embedding values in the script instead of providing them as argument, and thus generating a different script on each call to EVAL. +These are added to the Lua interpreter and cached to redis-server, consuming a large amount of memory over time. +Starting from Redis 8.0, scripts loaded with `EVAL` or `EVAL_RO` will be deleted from redis after a certain number (least recently used order). +The number of evicted scripts can be viewed through `INFO`'s `evicted_scripts`. + Please refer to the [Redis Programmability](/topics/programmability) and [Introduction to Eval Scripts](/topics/eval-intro) for more information about Lua scripts. @examples diff --git a/commands/eval_ro.md b/commands/eval_ro.md index bbbdb8deb4..bc166c84dd 100644 --- a/commands/eval_ro.md +++ b/commands/eval_ro.md @@ -1,6 +1,6 @@ This is a read-only variant of the `EVAL` command that cannot execute commands that modify data. -For more information about when to use this command vs `EVAL`, please refer to [Read-only scripts](/docs/manual/programmability/#read-only_scripts). +For more information about when to use this command vs `EVAL`, please refer to [Read-only scripts](/docs/manual/programmability/#read-only-scripts). For more information about `EVAL` scripts please refer to [Introduction to Eval Scripts](/topics/eval-intro). diff --git a/commands/evalsha_ro.md b/commands/evalsha_ro.md index ccb45d6722..b6164b3303 100644 --- a/commands/evalsha_ro.md +++ b/commands/evalsha_ro.md @@ -1,5 +1,5 @@ This is a read-only variant of the `EVALSHA` command that cannot execute commands that modify data. -For more information about when to use this command vs `EVALSHA`, please refer to [Read-only scripts](/docs/manual/programmability/#read-only_scripts). +For more information about when to use this command vs `EVALSHA`, please refer to [Read-only scripts](/docs/manual/programmability/#read-only-scripts). For more information about `EVALSHA` scripts please refer to [Introduction to Eval Scripts](/topics/eval-intro). diff --git a/commands/exec.md b/commands/exec.md index b2f58fef8b..2dada72b72 100644 --- a/commands/exec.md +++ b/commands/exec.md @@ -7,10 +7,3 @@ When using `WATCH`, `EXEC` will execute commands only if the watched keys were not modified, allowing for a [check-and-set mechanism][ttc]. [ttc]: /topics/transactions#cas - -@return - -@array-reply: each element being the reply to each of the commands in the -atomic transaction. - -When using `WATCH`, `EXEC` can return a @nil-reply if the execution was aborted. diff --git a/commands/exists.md b/commands/exists.md index a9a89afbad..c8a20239d7 100644 --- a/commands/exists.md +++ b/commands/exists.md @@ -2,10 +2,6 @@ Returns if `key` exists. The user should be aware that if the same existing key is mentioned in the arguments multiple times, it will be counted multiple times. So if `somekey` exists, `EXISTS somekey somekey` will return 2. -@return - -@integer-reply, specifically the number of keys that exist from those specified as arguments. - @examples ```cli diff --git a/commands/expire.md b/commands/expire.md index ddb4c9ad12..7aca62f81f 100644 --- a/commands/expire.md +++ b/commands/expire.md @@ -60,13 +60,6 @@ are now fixed. `EXPIRE` would return 0 and not alter the timeout for a key with a timeout set. -@return - -@integer-reply, specifically: - -* `1` if the timeout was set. -* `0` if the timeout was not set. e.g. key doesn't exist, or operation skipped due to the provided arguments. - @examples ```cli diff --git a/commands/expireat.md b/commands/expireat.md index cbc10c6770..fed6bff7a8 100644 --- a/commands/expireat.md +++ b/commands/expireat.md @@ -27,13 +27,6 @@ The `EXPIREAT` command supports a set of options: A non-volatile key is treated as an infinite TTL for the purpose of `GT` and `LT`. The `GT`, `LT` and `NX` options are mutually exclusive. -@return - -@integer-reply, specifically: - -* `1` if the timeout was set. -* `0` if the timeout was not set. e.g. key doesn't exist, or operation skipped due to the provided arguments. - @examples ```cli diff --git a/commands/expiretime.md b/commands/expiretime.md index b524dcc34f..afcd37b8b9 100644 --- a/commands/expiretime.md +++ b/commands/expiretime.md @@ -2,13 +2,6 @@ Returns the absolute Unix timestamp (since January 1, 1970) in seconds at which See also the `PEXPIRETIME` command which returns the same information with milliseconds resolution. -@return - -@integer-reply: Expiration Unix timestamp in seconds, or a negative value in order to signal an error (see the description below). - -* The command returns `-1` if the key exists but has no associated expiration time. -* The command returns `-2` if the key does not exist. - @examples ```cli diff --git a/commands/failover.md b/commands/failover.md index 719d19943b..dd4ce43399 100644 --- a/commands/failover.md +++ b/commands/failover.md @@ -42,7 +42,3 @@ The command has no side effects if issued in the `waiting-for-sync` state but ca If a multi-master scenario is encountered, you will need to manually identify which master has the latest data and designate it as the master and have the other replicas. NOTE: `REPLICAOF` is disabled while a failover is in progress, this is to prevent unintended interactions with the failover that might cause data loss. - -@return - -@simple-string-reply: `OK` if the command was accepted and a coordinated failover is in progress. An error if the operation cannot be executed. diff --git a/commands/flushall.md b/commands/flushall.md index 5a562d0cf4..68ec53c0b5 100644 --- a/commands/flushall.md +++ b/commands/flushall.md @@ -11,10 +11,6 @@ It is possible to use one of the following modifiers to dictate the flushing mod Note: an asynchronous `FLUSHALL` command only deletes keys that were present at the time the command was invoked. Keys created during an asynchronous flush will be unaffected. -@return - -@simple-string-reply - ## Behavior change history * `>= 6.2.0`: Default flush behavior now configurable by the **lazyfree-lazy-user-flush** configuration directive. \ No newline at end of file diff --git a/commands/flushdb.md b/commands/flushdb.md index f8235639b2..112a9db3eb 100644 --- a/commands/flushdb.md +++ b/commands/flushdb.md @@ -11,10 +11,6 @@ It is possible to use one of the following modifiers to dictate the flushing mod Note: an asynchronous `FLUSHDB` command only deletes keys that were present at the time the command was invoked. Keys created during an asynchronous flush will be unaffected. -@return - -@simple-string-reply - ## Behavior change history * `>= 6.2.0`: Default flush behavior now configurable by the **lazyfree-lazy-user-flush** configuration directive. \ No newline at end of file diff --git a/commands/function-delete.md b/commands/function-delete.md index 5b90f81733..557ce4d1c3 100644 --- a/commands/function-delete.md +++ b/commands/function-delete.md @@ -5,15 +5,11 @@ If the library doesn't exist, the server returns an error. For more information please refer to [Introduction to Redis Functions](/topics/functions-intro). -@return - -@simple-string-reply - @examples ``` -redis> FUNCTION LOAD Lua mylib "redis.register_function('myfunc', function(keys, args) return 'hello' end)" -OK +redis> FUNCTION LOAD "#!lua name=mylib \n redis.register_function('myfunc', function(keys, args) return 'hello' end)" +"mylib" redis> FCALL myfunc 0 "hello" redis> FUNCTION DELETE mylib diff --git a/commands/function-dump.md b/commands/function-dump.md index cf144bc0ad..167001c3cb 100644 --- a/commands/function-dump.md +++ b/commands/function-dump.md @@ -3,32 +3,30 @@ You can restore the serialized payload later with the `FUNCTION RESTORE` command For more information please refer to [Introduction to Redis Functions](/topics/functions-intro). -@return - -@bulk-string-reply: the serialized payload - @examples The following example shows how to dump loaded libraries using `FUNCTION DUMP` and then it calls `FUNCTION FLUSH` deletes all the libraries. Then, it restores the original libraries from the serialized payload with `FUNCTION RESTORE`. ``` +redis> FUNCTION LOAD "#!lua name=mylib \n redis.register_function('myfunc', function(keys, args) return args[1] end)" +"mylib" redis> FUNCTION DUMP -"\xf6\x05mylib\x03LUA\x00\xc3@D@J\x1aredis.register_function('my@\x0b\x02', @\x06`\x12\x11keys, args) return`\x0c\a[1] end)\n\x00@\n)\x11\xc8|\x9b\xe4" +"\xf5\xc3@X@]\x1f#!lua name=mylib \n redis.registe\rr_function('my@\x0b\x02', @\x06`\x12\nkeys, args) 6\x03turn`\x0c\a[1] end)\x0c\x00\xba\x98\xc2\xa2\x13\x0e$\a" redis> FUNCTION FLUSH OK -redis> FUNCTION RESTORE "\xf6\x05mylib\x03LUA\x00\xc3@D@J\x1aredis.register_function('my@\x0b\x02', @\x06`\x12\x11keys, args) return`\x0c\a[1] end)\n\x00@\n)\x11\xc8|\x9b\xe4" +redis> FUNCTION RESTORE "\xf5\xc3@X@]\x1f#!lua name=mylib \n redis.registe\rr_function('my@\x0b\x02', @\x06`\x12\nkeys, args) 6\x03turn`\x0c\a[1] end)\x0c\x00\xba\x98\xc2\xa2\x13\x0e$\a" OK redis> FUNCTION LIST 1) 1) "library_name" 2) "mylib" 3) "engine" 4) "LUA" - 5) "description" - 6) (nil) - 7) "functions" - 8) 1) 1) "name" + 5) "functions" + 6) 1) 1) "name" 2) "myfunc" 3) "description" 4) (nil) + 5) "flags" + 6) (empty array) ``` diff --git a/commands/function-flush.md b/commands/function-flush.md index 38c412a19a..7d9a2836a0 100644 --- a/commands/function-flush.md +++ b/commands/function-flush.md @@ -6,7 +6,3 @@ Unless called with the optional mode argument, the `lazyfree-lazy-user-flush` co * `!SYNC`: Synchronously flush the libraries. For more information please refer to [Introduction to Redis Functions](/topics/functions-intro). - -@return - -@simple-string-reply diff --git a/commands/function-help.md b/commands/function-help.md index 38c300d590..9190a9b082 100644 --- a/commands/function-help.md +++ b/commands/function-help.md @@ -1,5 +1 @@ The `FUNCTION HELP` command returns a helpful text describing the different subcommands. - -@return - -@array-reply: a list of subcommands and their descriptions diff --git a/commands/function-kill.md b/commands/function-kill.md index 2db5ea58bc..1a61c9eb91 100644 --- a/commands/function-kill.md +++ b/commands/function-kill.md @@ -4,7 +4,3 @@ Kill a function that is currently executing. The `FUNCTION KILL` command can be used only on functions that did not modify the dataset during their execution (since stopping a read-only function does not violate the scripting engine's guaranteed atomicity). For more information please refer to [Introduction to Redis Functions](/topics/functions-intro). - -@return - -@simple-string-reply diff --git a/commands/function-list.md b/commands/function-list.md index bb66dba4ed..7cd3853d7f 100644 --- a/commands/function-list.md +++ b/commands/function-list.md @@ -15,7 +15,3 @@ The following information is provided for each of the libraries in the response: * **library_code:** the library's source code (when given the `WITHCODE` modifier). For more information please refer to [Introduction to Redis Functions](/topics/functions-intro). - -@return - -@array-reply diff --git a/commands/function-load.md b/commands/function-load.md index 16f125baff..2bb36d3e71 100644 --- a/commands/function-load.md +++ b/commands/function-load.md @@ -20,10 +20,6 @@ The command will return an error in the following circumstances: For more information please refer to [Introduction to Redis Functions](/topics/functions-intro). -@return - -@string - the library name that was loaded - @examples The following example will create a library named `mylib` with a single function, `myfunc`, that returns the first argument it gets. diff --git a/commands/function-restore.md b/commands/function-restore.md index 2868d160b6..f50edda58f 100644 --- a/commands/function-restore.md +++ b/commands/function-restore.md @@ -9,7 +9,3 @@ The following policies are allowed: * **REPLACE:** appends the restored libraries to the existing libraries, replacing any existing ones in case of name collisions. Note that this policy doesn't prevent function name collisions, only libraries. For more information please refer to [Introduction to Redis Functions](/topics/functions-intro). - -@return - -@simple-string-reply diff --git a/commands/function-stats.md b/commands/function-stats.md index 005b47b912..1746d0e0e7 100644 --- a/commands/function-stats.md +++ b/commands/function-stats.md @@ -15,7 +15,3 @@ The reply is map with two keys: You can use this command to inspect the invocation of a long-running function and decide whether kill it with the `FUNCTION KILL` command. For more information please refer to [Introduction to Redis Functions](/topics/functions-intro). - -@return - -@array-reply \ No newline at end of file diff --git a/commands/geoadd.md b/commands/geoadd.md index ecdd6e877b..fdc696d277 100644 --- a/commands/geoadd.md +++ b/commands/geoadd.md @@ -39,13 +39,6 @@ The model assumes that the Earth is a sphere since it uses the Haversine formula The introduced errors are not an issue when used, for example, by social networks and similar applications requiring this type of querying. However, in the worst case, the error may be up to 0.5%, so you may want to consider other systems for error-critical applications. -@return - -@integer-reply, specifically: - -* When used without optional arguments, the number of elements added to the sorted set (excluding score updates). -* If the `CH` option is specified, the number of elements that were changed (added or updated). - @examples ```cli diff --git a/commands/geodist.md b/commands/geodist.md index af78cdff71..257d97f7f3 100644 --- a/commands/geodist.md +++ b/commands/geodist.md @@ -13,13 +13,6 @@ The unit must be one of the following, and defaults to meters: The distance is computed assuming that the Earth is a perfect sphere, so errors up to 0.5% are possible in edge cases. -@return - -@bulk-string-reply, specifically: - -The command returns the distance as a double (represented as a string) -in the specified unit, or NULL if one or both the elements are missing. - @examples ```cli diff --git a/commands/geohash.md b/commands/geohash.md index a99ade2f43..0d43056747 100644 --- a/commands/geohash.md +++ b/commands/geohash.md @@ -18,13 +18,6 @@ have the following properties: 2. It is possible to use them in `geohash.org` URLs such as `http://geohash.org/`. This is an [example of such URL](http://geohash.org/sqdtr74hyu0). 3. Strings with a similar prefix are nearby, but the contrary is not true, it is possible that strings with different prefixes are nearby too. -@return - -@array-reply, specifically: - -The command returns an array where each element is the Geohash corresponding to -each member name passed as argument to the command. - @examples ```cli diff --git a/commands/geopos.md b/commands/geopos.md index 19dd377f1c..c23a879978 100644 --- a/commands/geopos.md +++ b/commands/geopos.md @@ -4,16 +4,6 @@ Given a sorted set representing a geospatial index, populated using the `GEOADD` The command can accept a variable number of arguments so it always returns an array of positions even when a single element is specified. -@return - -@array-reply, specifically: - -The command returns an array where each element is a two elements array -representing longitude and latitude (x,y) of each member name passed as -argument to the command. - -Non existing elements are reported as NULL elements of the array. - @examples ```cli diff --git a/commands/georadius.md b/commands/georadius.md index 3d0bba4688..27e9fdc78b 100644 --- a/commands/georadius.md +++ b/commands/georadius.md @@ -33,23 +33,6 @@ By default the command returns the items to the client. It is possible to store * `!STORE`: Store the items in a sorted set populated with their geospatial information. * `!STOREDIST`: Store the items in a sorted set populated with their distance from the center as a floating point number, in the same unit specified in the radius. -@return - -@array-reply, specifically: - -* Without any `WITH` option specified, the command just returns a linear array like ["New York","Milan","Paris"]. -* If `WITHCOORD`, `WITHDIST` or `WITHHASH` options are specified, the command returns an array of arrays, where each sub-array represents a single item. - -When additional information is returned as an array of arrays for each item, the first item in the sub-array is always the name of the returned item. The other information is returned in the following order as successive elements of the sub-array. - -1. The distance from the center as a floating point number, in the same unit specified in the radius. -2. The geohash integer. -3. The coordinates as a two items x,y array (longitude,latitude). - -So for example the command `GEORADIUS Sicily 15 37 200 km WITHCOORD WITHDIST` will return each item in the following way: - - ["Palermo","190.4424",["13.361389338970184","38.115556395496299"]] - ## Read-only variants Since `GEORADIUS` and `GEORADIUSBYMEMBER` have a `STORE` and `STOREDIST` option they are technically flagged as writing commands in the Redis command table. For this reason read-only replicas will flag them, and Redis Cluster replicas will redirect them to the master instance even if the connection is in read-only mode (see the `READONLY` command of Redis Cluster). diff --git a/commands/georadius_ro.md b/commands/georadius_ro.md index d2e3399b9c..df6c42b147 100644 --- a/commands/georadius_ro.md +++ b/commands/georadius_ro.md @@ -1,7 +1,3 @@ Read-only variant of the `GEORADIUS` command. This command is identical to the `GEORADIUS` command, except that it doesn't support the optional `STORE` and `STOREDIST` parameters. - -@return - -@array-reply: An array with each entry being the corresponding result of the subcommand given at the same position. diff --git a/commands/geosearch.md b/commands/geosearch.md index 972c1c984d..b094a93bce 100644 --- a/commands/geosearch.md +++ b/commands/geosearch.md @@ -28,19 +28,6 @@ When the `ANY` option is used, the command returns as soon as enough matches are When `ANY` is not provided, the command will perform an effort that is proportional to the number of items matching the specified area and sort them, so to query very large areas with a very small `COUNT` option may be slow even if just a few results are returned. -@return - -@array-reply, specifically: - -* Without any `WITH` option specified, the command just returns a linear array like ["New York","Milan","Paris"]. -* If `WITHCOORD`, `WITHDIST` or `WITHHASH` options are specified, the command returns an array of arrays, where each sub-array represents a single item. - -When additional information is returned as an array of arrays for each item, the first item in the sub-array is always the name of the returned item. The other information is returned in the following order as successive elements of the sub-array. - -1. The distance from the center as a floating point number, in the same unit specified in the shape. -2. The geohash integer. -3. The coordinates as a two items x,y array (longitude,latitude). - @examples ```cli diff --git a/commands/geosearchstore.md b/commands/geosearchstore.md index 2a4fc38d15..b27d40125a 100644 --- a/commands/geosearchstore.md +++ b/commands/geosearchstore.md @@ -1,15 +1,11 @@ This command is like `GEOSEARCH`, but stores the result in destination key. -This command comes in place of the now deprecated `GEORADIUS` and `GEORADIUSBYMEMBER`. +This command replaces the now deprecated `GEORADIUS` and `GEORADIUSBYMEMBER`. By default, it stores the results in the `destination` sorted set with their geospatial information. When using the `STOREDIST` option, the command stores the items in a sorted set populated with their distance from the center of the circle or box, as a floating-point number, in the same unit specified for that shape. -@return - -@integer-reply: the number of elements in the resulting set. - @examples ```cli @@ -19,4 +15,4 @@ GEOSEARCHSTORE key1 Sicily FROMLONLAT 15 37 BYBOX 400 400 km ASC COUNT 3 GEOSEARCH key1 FROMLONLAT 15 37 BYBOX 400 400 km ASC WITHCOORD WITHDIST WITHHASH GEOSEARCHSTORE key2 Sicily FROMLONLAT 15 37 BYBOX 400 400 km ASC COUNT 3 STOREDIST ZRANGE key2 0 -1 WITHSCORES -``` \ No newline at end of file +``` diff --git a/commands/get.md b/commands/get.md index 20a3feb4cb..2706dec616 100644 --- a/commands/get.md +++ b/commands/get.md @@ -3,10 +3,6 @@ If the key does not exist the special value `nil` is returned. An error is returned if the value stored at `key` is not a string, because `GET` only handles string values. -@return - -@bulk-string-reply: the value of `key`, or `nil` when `key` does not exist. - @examples ```cli @@ -14,3 +10,7 @@ GET nonexisting SET mykey "Hello" GET mykey ``` + +### Code examples + +{{< clients-example set_and_get />}} diff --git a/commands/getbit.md b/commands/getbit.md index 1506af304a..ec0d7414cd 100644 --- a/commands/getbit.md +++ b/commands/getbit.md @@ -6,10 +6,6 @@ When _key_ does not exist it is assumed to be an empty string, so _offset_ is always out of range and the value is also assumed to be a contiguous space with 0 bits. -@return - -@integer-reply: the bit value stored at _offset_. - @examples ```cli diff --git a/commands/getdel.md b/commands/getdel.md index 8474e93105..68867d3490 100644 --- a/commands/getdel.md +++ b/commands/getdel.md @@ -1,10 +1,6 @@ Get the value of `key` and delete the key. This command is similar to `GET`, except for the fact that it also deletes the key on success (if and only if the key's value type is a string). -@return - -@bulk-string-reply: the value of `key`, `nil` when `key` does not exist, or an error if the key's value type isn't a string. - @examples ```cli diff --git a/commands/getex.md b/commands/getex.md index 89ce809de6..4b11b47384 100644 --- a/commands/getex.md +++ b/commands/getex.md @@ -11,10 +11,6 @@ The `GETEX` command supports a set of options that modify its behavior: * `PXAT` *timestamp-milliseconds* -- Set the specified Unix time at which the key will expire, in milliseconds. * `PERSIST` -- Remove the time to live associated with the key. -@return - -@bulk-string-reply: the value of `key`, or `nil` when `key` does not exist. - @examples ```cli diff --git a/commands/getrange.md b/commands/getrange.md index 7283defc49..c188f95494 100644 --- a/commands/getrange.md +++ b/commands/getrange.md @@ -7,10 +7,6 @@ So -1 means the last character, -2 the penultimate and so forth. The function handles out of range requests by limiting the resulting range to the actual length of the string. -@return - -@bulk-string-reply - @examples ```cli diff --git a/commands/getset.md b/commands/getset.md index dd7aee765c..ba3f82ac51 100644 --- a/commands/getset.md +++ b/commands/getset.md @@ -17,10 +17,6 @@ GETSET mycounter "0" GET mycounter ``` -@return - -@bulk-string-reply: the old value stored at `key`, or `nil` when `key` did not exist. - @examples ```cli diff --git a/commands/hdel.md b/commands/hdel.md index ab6874e1ef..b0dceb2baf 100644 --- a/commands/hdel.md +++ b/commands/hdel.md @@ -3,11 +3,6 @@ Specified fields that do not exist within this hash are ignored. If `key` does not exist, it is treated as an empty hash and this command returns `0`. -@return - -@integer-reply: the number of fields that were removed from the hash, not -including specified but non existing fields. - @examples ```cli diff --git a/commands/hello.md b/commands/hello.md index 3eb6597389..92c6604214 100644 --- a/commands/hello.md +++ b/commands/hello.md @@ -7,7 +7,7 @@ when the connection is in this mode, Redis is able to reply with more semantical replies: for instance, `HGETALL` will return a *map type*, so a client library implementation no longer requires to know in advance to translate the array into a hash before returning it to the caller. For a full coverage of RESP3, please -[check this repository](https://github.com/antirez/resp3). +check the [RESP3 specification](https://github.com/redis/redis-specifications/blob/master/protocol/RESP3.md). In Redis 6 connections start in RESP2 mode, so clients implementing RESP2 do not need to updated or changed. There are no short term plans to drop support for @@ -55,7 +55,3 @@ protocol to the specified version and also accepts the following options: * `AUTH `: directly authenticate the connection in addition to switching to the specified protocol version. This makes calling `AUTH` before `HELLO` unnecessary when setting up a new connection. Note that the `username` can be set to "default" to authenticate against a server that does not use ACLs, but rather the simpler `requirepass` mechanism of Redis prior to version 6. * `SETNAME `: this is the equivalent of calling `CLIENT SETNAME`. - -@return - -@array-reply: a list of server properties. The reply is a map instead of an array when RESP3 is selected. The command returns an error if the `protover` requested does not exist. diff --git a/commands/hexists.md b/commands/hexists.md index f27678a67a..b63b63f443 100644 --- a/commands/hexists.md +++ b/commands/hexists.md @@ -1,12 +1,5 @@ Returns if `field` is an existing field in the hash stored at `key`. -@return - -@integer-reply, specifically: - -* `1` if the hash contains `field`. -* `0` if the hash does not contain `field`, or `key` does not exist. - @examples ```cli diff --git a/commands/hget.md b/commands/hget.md index b8d91016eb..d6bef72f81 100644 --- a/commands/hget.md +++ b/commands/hget.md @@ -1,10 +1,5 @@ Returns the value associated with `field` in the hash stored at `key`. -@return - -@bulk-string-reply: the value associated with `field`, or `nil` when `field` is not -present in the hash or `key` does not exist. - @examples ```cli diff --git a/commands/hgetall.md b/commands/hgetall.md index 3717f001db..4fbd625f84 100644 --- a/commands/hgetall.md +++ b/commands/hgetall.md @@ -2,11 +2,6 @@ Returns all fields and values of the hash stored at `key`. In the returned value, every field name is followed by its value, so the length of the reply is twice the size of the hash. -@return - -@array-reply: list of fields and their values stored in the hash, or an -empty list when `key` does not exist. - @examples ```cli diff --git a/commands/hincrby.md b/commands/hincrby.md index 3d24c254d8..c2f1b63960 100644 --- a/commands/hincrby.md +++ b/commands/hincrby.md @@ -6,10 +6,6 @@ performed. The range of values supported by `HINCRBY` is limited to 64 bit signed integers. -@return - -@integer-reply: the value at `field` after the increment operation. - @examples Since the `increment` argument is signed, both increment and decrement diff --git a/commands/hincrbyfloat.md b/commands/hincrbyfloat.md index d6eb472597..f83d7d124d 100644 --- a/commands/hincrbyfloat.md +++ b/commands/hincrbyfloat.md @@ -4,7 +4,7 @@ is negative, the result is to have the hash field value **decremented** instead If the field does not exist, it is set to `0` before performing the operation. An error is returned if one of the following conditions occur: -* The field contains a value of the wrong type (not a string). +* The key contains a value of the wrong type (not a hash). * The current field content or the specified increment are not parsable as a double precision floating point number. @@ -12,10 +12,6 @@ The exact behavior of this command is identical to the one of the `INCRBYFLOAT` command, please refer to the documentation of `INCRBYFLOAT` for further information. -@return - -@bulk-string-reply: the value of `field` after the increment. - @examples ```cli diff --git a/commands/hkeys.md b/commands/hkeys.md index c74b01e0f6..945b8f6204 100644 --- a/commands/hkeys.md +++ b/commands/hkeys.md @@ -1,10 +1,5 @@ Returns all field names in the hash stored at `key`. -@return - -@array-reply: list of fields in the hash, or an empty list when `key` does -not exist. - @examples ```cli diff --git a/commands/hlen.md b/commands/hlen.md index 2c18193435..ab19a35656 100644 --- a/commands/hlen.md +++ b/commands/hlen.md @@ -1,9 +1,5 @@ Returns the number of fields contained in the hash stored at `key`. -@return - -@integer-reply: number of fields in the hash, or `0` when `key` does not exist. - @examples ```cli diff --git a/commands/hmget.md b/commands/hmget.md index b10c43b3fa..ff322a15d8 100644 --- a/commands/hmget.md +++ b/commands/hmget.md @@ -5,11 +5,6 @@ For every `field` that does not exist in the hash, a `nil` value is returned. Because non-existing keys are treated as empty hashes, running `HMGET` against a non-existing `key` will return a list of `nil` values. -@return - -@array-reply: list of values associated with the given fields, in the same -order as they are requested. - ```cli HSET myhash field1 "Hello" HSET myhash field2 "World" diff --git a/commands/hmset.md b/commands/hmset.md index 8cec77585e..b89a13fc5e 100644 --- a/commands/hmset.md +++ b/commands/hmset.md @@ -3,10 +3,6 @@ Sets the specified fields to their respective values in the hash stored at This command overwrites any specified fields already existing in the hash. If `key` does not exist, a new key holding a hash is created. -@return - -@simple-string-reply - @examples ```cli diff --git a/commands/hrandfield.md b/commands/hrandfield.md index 389a1095ff..e019e8a946 100644 --- a/commands/hrandfield.md +++ b/commands/hrandfield.md @@ -8,17 +8,10 @@ In this case, the number of returned fields is the absolute value of the specifi The optional `WITHVALUES` modifier changes the reply so it includes the respective values of the randomly selected hash fields. -@return - -@bulk-string-reply: without the additional `count` argument, the command returns a Bulk Reply with the randomly selected field, or `nil` when `key` does not exist. - -@array-reply: when the additional `count` argument is passed, the command returns an array of fields, or an empty array when `key` does not exist. -If the `WITHVALUES` modifier is used, the reply is a list fields and their values from the hash. - @examples ```cli -HMSET coin heads obverse tails reverse edge null +HSET coin heads obverse tails reverse edge null HRANDFIELD coin HRANDFIELD coin HRANDFIELD coin -5 WITHVALUES diff --git a/commands/hset.md b/commands/hset.md index 42e15c127d..92c34f3d30 100644 --- a/commands/hset.md +++ b/commands/hset.md @@ -1,14 +1,15 @@ -Sets `field` in the hash stored at `key` to `value`. -If `key` does not exist, a new key holding a hash is created. -If `field` already exists in the hash, it is overwritten. +Sets the specified fields to their respective values in the hash stored at `key`. -@return - -@integer-reply: The number of fields that were added. +This command overwrites the values of specified fields that exist in the hash. +If `key` doesn't exist, a new key holding a hash is created. @examples ```cli HSET myhash field1 "Hello" HGET myhash field1 +HSET myhash field2 "Hi" field3 "World" +HGET myhash field2 +HGET myhash field3 +HGETALL myhash ``` diff --git a/commands/hsetnx.md b/commands/hsetnx.md index c60eaa071b..cc2dbdc020 100644 --- a/commands/hsetnx.md +++ b/commands/hsetnx.md @@ -3,13 +3,6 @@ yet exist. If `key` does not exist, a new key holding a hash is created. If `field` already exists, this operation has no effect. -@return - -@integer-reply, specifically: - -* `1` if `field` is a new field in the hash and `value` was set. -* `0` if `field` already exists in the hash and no operation was performed. - @examples ```cli diff --git a/commands/hstrlen.md b/commands/hstrlen.md index b187f75fb7..e473ecffeb 100644 --- a/commands/hstrlen.md +++ b/commands/hstrlen.md @@ -1,13 +1,9 @@ Returns the string length of the value associated with `field` in the hash stored at `key`. If the `key` or the `field` do not exist, 0 is returned. -@return - -@integer-reply: the string length of the value associated with `field`, or zero when `field` is not present in the hash or `key` does not exist at all. - @examples ```cli -HMSET myhash f1 HelloWorld f2 99 f3 -256 +HSET myhash f1 HelloWorld f2 99 f3 -256 HSTRLEN myhash f1 HSTRLEN myhash f2 HSTRLEN myhash f3 diff --git a/commands/hvals.md b/commands/hvals.md index 5526959276..f54f780519 100644 --- a/commands/hvals.md +++ b/commands/hvals.md @@ -1,10 +1,5 @@ Returns all values in the hash stored at `key`. -@return - -@array-reply: list of values in the hash, or an empty list when `key` does -not exist. - @examples ```cli diff --git a/commands/incr.md b/commands/incr.md index 6abee168b4..e8aae005d0 100644 --- a/commands/incr.md +++ b/commands/incr.md @@ -13,10 +13,6 @@ Redis stores integers in their integer representation, so for string values that actually hold an integer, there is no overhead for storing the string representation of the integer. -@return - -@integer-reply: the value of `key` after the increment - @examples ```cli @@ -82,7 +78,7 @@ END ``` Basically we have a counter for every IP, for every different second. -But this counters are always incremented setting an expire of 10 seconds so that +But these counters are always incremented setting an expire of 10 seconds so that they'll be removed by Redis automatically when the current second is a different one. diff --git a/commands/incrby.md b/commands/incrby.md index 9734351e80..d67a2dae54 100644 --- a/commands/incrby.md +++ b/commands/incrby.md @@ -6,10 +6,6 @@ This operation is limited to 64 bit signed integers. See `INCR` for extra information on increment/decrement operations. -@return - -@integer-reply: the value of `key` after the increment - @examples ```cli diff --git a/commands/incrbyfloat.md b/commands/incrbyfloat.md index 9efca1d9f7..d44bec435d 100644 --- a/commands/incrbyfloat.md +++ b/commands/incrbyfloat.md @@ -23,10 +23,6 @@ Trailing zeroes are always removed. The precision of the output is fixed at 17 digits after the decimal point regardless of the actual internal precision of the computation. -@return - -@bulk-string-reply: the value of `key` after the increment. - @examples ```cli diff --git a/commands/info.md b/commands/info.md index be9b318593..117a5da638 100644 --- a/commands/info.md +++ b/commands/info.md @@ -12,10 +12,10 @@ The optional parameter can be used to select a specific section of information: * `cpu`: CPU consumption statistics * `commandstats`: Redis command statistics * `latencystats`: Redis command latency percentile distribution statistics +* `sentinel`: Redis Sentinel section (only applicable to Sentinel instances) * `cluster`: Redis Cluster section * `modules`: Modules section * `keyspace`: Database related statistics -* `modules`: Module related sections * `errorstats`: Redis error statistics It can also take the following values: @@ -26,13 +26,6 @@ It can also take the following values: When no parameter is provided, the `default` option is assumed. -@return - -@bulk-string-reply: as a collection of text lines. - -Lines can contain a section name (starting with a # character) or a property. -All the properties are in the form of `field:value` terminated by `\r\n`. - ```cli INFO ``` @@ -90,7 +83,12 @@ Here is the meaning of all fields in the **clients** section: * `blocked_clients`: Number of clients pending on a blocking call (`BLPOP`, `BRPOP`, `BRPOPLPUSH`, `BLMOVE`, `BZPOPMIN`, `BZPOPMAX`) * `tracking_clients`: Number of clients being tracked (`CLIENT TRACKING`) +* `pubsub_clients`: Number of clients in pubsub mode (`SUBSCRIBE`, `PSUBSCRIBE`, `SSUBSCRIBE`). Added in Redis 8.0 +* `watching_clients`: Number of clients in watching mode (`WATCH`). Added in Redis 8.0 * `clients_in_timeout_table`: Number of clients in the clients timeout table +* `total_watched_keys`: Number of watched keys. Added in Redis 8.0. +* `total_blocking_keys`: Number of blocking keys. Added in Redis 7.2. +* `total_blocking_keys_on_nokey`: Number of blocking keys that one or more clients that would like to be unblocked when the key is deleted. Added in Redis 7.2. Here is the meaning of all fields in the **memory** section: @@ -116,9 +114,18 @@ Here is the meaning of all fields in the **memory** section: the net memory usage (`used_memory` minus `used_memory_startup`) * `total_system_memory`: The total amount of memory that the Redis host has * `total_system_memory_human`: Human readable representation of previous value -* `used_memory_lua`: Number of bytes used by the Lua engine -* `used_memory_lua_human`: Human readable representation of previous value -* `used_memory_scripts`: Number of bytes used by cached Lua scripts +* `used_memory_lua`: Number of bytes used by the Lua engine for EVAL scripts. Deprecated in Redis 7.0, renamed to `used_memory_vm_eval` +* `used_memory_vm_eval`: Number of bytes used by the script VM engines for EVAL framework (not part of used_memory). Added in Redis 7.0 +* `used_memory_lua_human`: Human readable representation of previous value. Deprecated in Redis 7.0 +* `used_memory_scripts_eval`: Number of bytes overhead by the EVAL scripts (part of used_memory). Added in Redis 7.0 +* `number_of_cached_scripts`: The number of EVAL scripts cached by the server. Added in Redis 7.0 +* `number_of_functions`: The number of functions. Added in Redis 7.0 +* `number_of_libraries`: The number of libraries. Added in Redis 7.0 +* `used_memory_vm_functions`: Number of bytes used by the script VM engines for Functions framework (not part of used_memory). Added in Redis 7.0 +* `used_memory_vm_total`: `used_memory_vm_eval` + `used_memory_vm_functions` (not part of used_memory). Added in Redis 7.0 +* `used_memory_vm_total_human`: Human readable representation of previous value. +* `used_memory_functions`: Number of bytes overhead by Function scripts (part of used_memory). Added in Redis 7.0 +* `used_memory_scripts`: `used_memory_scripts_eval` + `used_memory_functions` (part of used_memory). Added in Redis 7.0 * `used_memory_scripts_human`: Human readable representation of previous value * `maxmemory`: The value of the `maxmemory` configuration directive * `maxmemory_human`: Human readable representation of previous value @@ -137,6 +144,7 @@ Here is the meaning of all fields in the **memory** section: * `allocator_allocated`: Total bytes allocated form the allocator, including internal-fragmentation. Normally the same as `used_memory`. * `allocator_active`: Total bytes in the allocator active pages, this includes external-fragmentation. * `allocator_resident`: Total bytes resident (RSS) in the allocator, this includes pages that can be released to the OS (by `MEMORY PURGE`, or just waiting). +* `allocator_muzzy`: Total bytes of 'muzzy' memory (RSS) in the allocator. Muzzy memory is memory that has been freed, but not yet fully returned to the operating system. It can be reused immediately when needed or reclaimed by the OS when system pressure increases. * `mem_not_counted_for_evict`: Used memory that's not counted for key eviction. This is basically transient replica and AOF buffers. * `mem_clients_slaves`: Memory used by replica clients - Starting Redis 7.0, replica buffers share memory with the replication backlog, so this field can show 0 when replicas don't trigger an increase of memory usage. * `mem_clients_normal`: Memory used by normal clients @@ -145,6 +153,7 @@ Here is the meaning of all fields in the **memory** section: * `mem_replication_backlog`: Memory used by replication backlog * `mem_total_replication_buffers`: Total memory consumed for replication buffers - Added in Redis 7.0. * `mem_allocator`: Memory allocator, chosen at compile time. +* `mem_overhead_db_hashtable_rehashing`: Temporary memory overhead of database dictionaries currently being rehashed - Added in 8.0. * `active_defrag_running`: When `activedefrag` is enabled, this indicates whether defragmentation is currently active, and the CPU percentage it intends to utilize. * `lazyfree_pending_objects`: The number of objects waiting to be freed (as a result of calling `UNLINK`, or `FLUSHDB` and `FLUSHALL` with the **ASYNC** @@ -194,7 +203,7 @@ Here is the meaning of all fields in the **persistence** section: if any * `rdb_last_cow_size`: The size in bytes of copy-on-write memory during the last RDB save operation -* `rdb_last_load_keys_expired`: Number volatile keys deleted during the last RDB loading. Added in Redis 7.0. +* `rdb_last_load_keys_expired`: Number of volatile keys deleted during the last RDB loading. Added in Redis 7.0. * `rdb_last_load_keys_loaded`: Number of keys loaded during the last RDB loading. Added in Redis 7.0. * `aof_enabled`: Flag indicating AOF logging is activated * `aof_rewrite_in_progress`: Flag indicating a AOF rewrite operation is @@ -264,9 +273,10 @@ Here is the meaning of all fields in the **stats** section: * `expired_keys`: Total number of key expiration events * `expired_stale_perc`: The percentage of keys probably expired * `expired_time_cap_reached_count`: The count of times that active expiry cycles have stopped early -* `expire_cycle_cpu_milliseconds`: The cumulative amount of time spend on active expiry cycles +* `expire_cycle_cpu_milliseconds`: The cumulative amount of time spent on active expiry cycles * `evicted_keys`: Number of evicted keys due to `maxmemory` limit * `evicted_clients`: Number of evicted clients due to `maxmemory-clients` limit. Added in Redis 7.0. +* `evicted_scripts`: Number of evicted EVAL scripts due to LRU policy, see `EVAL` for more details. Added in Redis 8.0. * `total_eviction_exceeded_time`: Total time `used_memory` was greater than `maxmemory` since server startup, in milliseconds * `current_eviction_exceeded_time`: The time passed since `used_memory` last rose above `maxmemory`, in milliseconds * `keyspace_hits`: Number of successful lookup of keys in the main dictionary @@ -305,6 +315,19 @@ Here is the meaning of all fields in the **stats** section: * `total_writes_processed`: Total number of write events processed * `io_threaded_reads_processed`: Number of read events processed by the main and I/O threads * `io_threaded_writes_processed`: Number of write events processed by the main and I/O threads +* `client_query_buffer_limit_disconnections`: Total number of disconnections due to client reaching query buffer limit +* `client_output_buffer_limit_disconnections`: Total number of disconnections due to client reaching output buffer limit +* `reply_buffer_shrinks`: Total number of output buffer shrinks +* `reply_buffer_expands`: Total number of output buffer expands +* `eventloop_cycles`: Total number of eventloop cycles +* `eventloop_duration_sum`: Total time spent in the eventloop in microseconds (including I/O and command processing) +* `eventloop_duration_cmd_sum`: Total time spent on executing commands in microseconds +* `instantaneous_eventloop_cycles_per_sec`: Number of eventloop cycles per second +* `instantaneous_eventloop_duration_usec`: Average time spent in a single eventloop cycle in microseconds +* `acl_access_denied_auth`: Number of authentication failures +* `acl_access_denied_cmd`: Number of commands rejected because of access denied to the command +* `acl_access_denied_key`: Number of commands rejected because of access denied to a key +* `acl_access_denied_channel`: Number of commands rejected because of access denied to a channel Here is the meaning of all fields in the **replication** section: @@ -405,6 +428,15 @@ For each error type, the following line is added: * `errorstat_XXX`: `count=XXX` +The **sentinel** section is only available in Redis Sentinel instances. It consists of the following fields: + +* `sentinel_masters`: Number of Redis masters monitored by this Sentinel instance +* `sentinel_tilt`: A value of 1 means this sentinel is in TILT mode +* `sentinel_tilt_since_seconds`: Duration in seconds of current TILT, or -1 if not TILTed. Added in Redis 7.0.0 +* `sentinel_running_scripts`: The number of scripts this Sentinel is currently executing +* `sentinel_scripts_queue_length`: The length of the queue of user scripts that are pending execution +* `sentinel_simulate_failure_flags`: Flags for the `SENTINEL SIMULATE-FAILURE` command + The **cluster** section currently only contains a unique field: * `cluster_enabled`: Indicate Redis cluster is enabled @@ -419,6 +451,14 @@ For each database, the following line is added: * `dbXXX`: `keys=XXX,expires=XXX` +The **debug** section contains experimental metrics, which might change or get removed in future versions. +It won't be included when `INFO` or `INFO ALL` are called, and it is returned only when `INFO DEBUG` is used. + +* `eventloop_duration_aof_sum`: Total time spent on flushing AOF in eventloop in microseconds +* `eventloop_duration_cron_sum`: Total time consumption of cron in microseconds (including serverCron and beforeSleep, but excluding IO and AOF flushing) +* `eventloop_duration_max`: The maximal time spent in a single eventloop cycle in microseconds +* `eventloop_cmd_per_cycle_max`: The maximal number of commands processed in a single eventloop cycle + [hcgcpgp]: http://code.google.com/p/google-perftools/ **A note about the word slave used in this man page**: Starting with Redis 5, if not for backward compatibility, the Redis project no longer uses the word slave. Unfortunately in this command the word slave is part of the protocol, so we'll be able to remove such occurrences only when this API will be naturally deprecated. diff --git a/commands/keys.md b/commands/keys.md index 186caca66c..f51e0ea51a 100644 --- a/commands/keys.md +++ b/commands/keys.md @@ -26,9 +26,12 @@ Supported glob-style patterns: Use `\` to escape special characters if you want to match them verbatim. -@return - -@array-reply: list of keys matching `pattern`. +When using [Redis Cluster](/docs/management/scaling/), the search is optimized for patterns that imply a single slot. +If a pattern can only match keys of one slot, +Redis only iterates over keys in that slot, rather than the whole database, +when searching for keys matching the pattern. +For example, with the pattern `{a}h*llo`, Redis would only try to match it with the keys in slot 15495, which hash tag `{a}` implies. +To use pattern with hash tag, see [Hash tags](/docs/reference/cluster-spec/#hash-tags) in the Cluster specification for more information. @examples diff --git a/commands/lastsave.md b/commands/lastsave.md index cfec6253ab..1e38f6f626 100644 --- a/commands/lastsave.md +++ b/commands/lastsave.md @@ -1,8 +1,4 @@ Return the UNIX TIME of the last DB save executed with success. A client may check if a `BGSAVE` command succeeded reading the `LASTSAVE` value, then issuing a `BGSAVE` command and checking at regular intervals every N -seconds if `LASTSAVE` changed. - -@return - -@integer-reply: an UNIX time stamp. +seconds if `LASTSAVE` changed. Redis considers the database saved successfully at startup. diff --git a/commands/latency-doctor.md b/commands/latency-doctor.md index 8f493aaa55..6693eff2f5 100644 --- a/commands/latency-doctor.md +++ b/commands/latency-doctor.md @@ -39,7 +39,3 @@ I have a few advices for you: For more information refer to the [Latency Monitoring Framework page][lm]. [lm]: /topics/latency-monitor - -@return - -@bulk-string-reply diff --git a/commands/latency-graph.md b/commands/latency-graph.md index f1cfa5e996..285b2488ea 100644 --- a/commands/latency-graph.md +++ b/commands/latency-graph.md @@ -58,7 +58,3 @@ in the lower row) is the minimum, and a # in the higher row is the maximum. For more information refer to the [Latency Monitoring Framework page][lm]. [lm]: /topics/latency-monitor - -@return - -@bulk-string-reply \ No newline at end of file diff --git a/commands/latency-help.md b/commands/latency-help.md index 8077bf07d9..59f3999370 100644 --- a/commands/latency-help.md +++ b/commands/latency-help.md @@ -4,7 +4,3 @@ subcommands. For more information refer to the [Latency Monitoring Framework page][lm]. [lm]: /topics/latency-monitor - -@return - -@array-reply: a list of subcommands and their descriptions diff --git a/commands/latency-histogram.md b/commands/latency-histogram.md index a97928b614..9d984bc9e4 100644 --- a/commands/latency-histogram.md +++ b/commands/latency-histogram.md @@ -1,20 +1,24 @@ -The `LATENCY HISTOGRAM` command reports a cumulative distribution of latencies in the format of a histogram for each of the specified command names. -If no command names are specified then all commands that contain latency information will be replied. +`LATENCY HISTOGRAM` returns a cumulative distribution of commands' latencies in histogram format. -Each reported histogram has the following fields: +By default, all available latency histograms are returned. +You can filter the reply by providing specific command names. -* Command name. -* The total calls for that command. +Each histogram consists of the following fields: + +* Command name +* The total calls for that command * A map of time buckets: - * Each bucket represents a latency range. - * Each bucket covers twice the previous bucket's range. - * Empty buckets are not printed. - * The tracked latencies are between 1 microsecond and roughly 1 second. - * Everything above 1 sec is considered +Inf. - * At max there will be log2(1000000000)=30 buckets. + * Each bucket represents a latency range + * Each bucket covers twice the previous bucket's range + * Empty buckets are excluded from the reply + * The tracked latencies are between 1 microsecond and roughly 1 second + * Everything above 1 second is considered +Inf + * At max, there will be log2(1,000,000,000)=30 buckets + +This command requires the extended latency monitoring feature to be enabled, which is the default. +If you need to enable it, call `CONFIG SET latency-tracking yes`. -This command requires the extended latency monitoring feature to be enabled (by default it's enabled). -If you need to enable it, use `CONFIG SET latency-tracking yes`. +To delete the latency histograms' data use the `CONFIG RESETSTAT` command. @examples @@ -30,9 +34,3 @@ If you need to enable it, use `CONFIG SET latency-tracking yes`. 5# (integer) 16 => (integer) 99968 6# (integer) 33 => (integer) 100000 ``` - -@return - -@array-reply: specifically: - -The command returns a map where each key is a command name, and each value is a map with the total calls, and an inner map of the histogram time buckets. diff --git a/commands/latency-history.md b/commands/latency-history.md index 815e644fff..4207727a1a 100644 --- a/commands/latency-history.md +++ b/commands/latency-history.md @@ -35,10 +35,3 @@ Valid values for `event` are: For more information refer to the [Latency Monitoring Framework page][lm]. [lm]: /topics/latency-monitor - -@return - -@array-reply: specifically: - -The command returns an array where each element is a two elements array -representing the timestamp and the latency of the event. \ No newline at end of file diff --git a/commands/latency-latest.md b/commands/latency-latest.md index 918435b8d8..26a1e4d4ed 100644 --- a/commands/latency-latest.md +++ b/commands/latency-latest.md @@ -28,10 +28,3 @@ OK For more information refer to the [Latency Monitoring Framework page][lm]. [lm]: /topics/latency-monitor - -@return - -@array-reply: specifically: - -The command returns an array where each element is a four elements array -representing the event's name, timestamp, latest and all-time latency measurements. diff --git a/commands/latency-reset.md b/commands/latency-reset.md index 9762869b9f..f2d87cc121 100644 --- a/commands/latency-reset.md +++ b/commands/latency-reset.md @@ -28,7 +28,3 @@ Valid values for `event` are: For more information refer to the [Latency Monitoring Framework page][lm]. [lm]: /topics/latency-monitor - -@return - -@integer-reply: the number of event time series that were reset. diff --git a/commands/lcs.md b/commands/lcs.md index b554686263..09e9048534 100644 --- a/commands/lcs.md +++ b/commands/lcs.md @@ -70,10 +70,3 @@ Finally to also have the match len: 3) "len" 4) (integer) 6 ``` - -@return - -* Without modifiers the string representing the longest common substring is returned. -* When `LEN` is given the command returns the length of the longest common substring. -* When `IDX` is given the command returns an array with the LCS length and all the ranges in both the strings, start and end offset for each string, where there are matches. When `WITHMATCHLEN` is given each array representing a match will also have the length of the match (see examples). - diff --git a/commands/lindex.md b/commands/lindex.md index 229c63deab..0f6438e42e 100644 --- a/commands/lindex.md +++ b/commands/lindex.md @@ -7,10 +7,6 @@ Here, `-1` means the last element, `-2` means the penultimate and so forth. When the value at `key` is not a list, an error is returned. -@return - -@bulk-string-reply: the requested element, or `nil` when `index` is out of range. - @examples ```cli diff --git a/commands/linsert.md b/commands/linsert.md index 9fe8f6131b..fdbdaf9c8c 100644 --- a/commands/linsert.md +++ b/commands/linsert.md @@ -6,11 +6,6 @@ performed. An error is returned when `key` exists but does not hold a list value. -@return - -@integer-reply: the length of the list after the insert operation, or `-1` when -the value `pivot` was not found. - @examples ```cli diff --git a/commands/llen.md b/commands/llen.md index 8c7c70fac1..4c9a7862ee 100644 --- a/commands/llen.md +++ b/commands/llen.md @@ -2,10 +2,6 @@ Returns the length of the list stored at `key`. If `key` does not exist, it is interpreted as an empty list and `0` is returned. An error is returned when the value stored at `key` is not a list. -@return - -@integer-reply: the length of the list at `key`. - @examples ```cli diff --git a/commands/lmove.md b/commands/lmove.md index ec62ced7ab..7dd02fa4e2 100644 --- a/commands/lmove.md +++ b/commands/lmove.md @@ -18,10 +18,6 @@ no-op if `wherefrom` is the same as `whereto`). This command comes in place of the now deprecated `RPOPLPUSH`. Doing `LMOVE RIGHT LEFT` is equivalent. -@return - -@bulk-string-reply: the element being popped and pushed. - @examples ```cli @@ -64,7 +60,7 @@ all the elements of an N-elements list, one after the other, in O(N) without transferring the full list from the server to the client using a single `LRANGE` operation. -The above pattern works even if the following two conditions: +The above pattern works even in the following conditions: * There are multiple clients rotating the list: they'll fetch different elements, until all the elements of the list are visited, and the process diff --git a/commands/lmpop.md b/commands/lmpop.md index aad5b6a3e6..ee053e2378 100644 --- a/commands/lmpop.md +++ b/commands/lmpop.md @@ -10,13 +10,6 @@ See `BLMPOP` for the blocking variant of this command. Elements are popped from either the left or right of the first non-empty list based on the passed argument. The number of returned elements is limited to the lower between the non-empty list's length, and the count argument (which defaults to 1). -@return - -@array-reply: specifically: - -* A `nil` when no element could be popped. -* A two-element array with the first element being the name of the key from which elements were popped, and the second element is an array of elements. - @examples ```cli diff --git a/commands/lolwut.md b/commands/lolwut.md index a767a381ea..027c9b177e 100644 --- a/commands/lolwut.md +++ b/commands/lolwut.md @@ -23,7 +23,3 @@ LOLWUT version should have the following properties: 3. LOLWUT output should be fast to generate so that the command can be called in production instances without issues. It should remain fast even when the user experiments with odd parameters. 4. LOLWUT implementations should be safe and carefully checked for security, and resist to untrusted inputs if they take arguments. 5. LOLWUT must always display the Redis version at the end. - -@return - -@bulk-string-reply (or verbatim reply when using the RESP3 protocol): the string containing the generative computer art, and a text with the Redis version. diff --git a/commands/lpop.md b/commands/lpop.md index c6e77c2d97..ed7dc9cf4e 100644 --- a/commands/lpop.md +++ b/commands/lpop.md @@ -4,16 +4,6 @@ By default, the command pops a single element from the beginning of the list. When provided with the optional `count` argument, the reply will consist of up to `count` elements, depending on the list's length. -@return - -When called without the `count` argument: - -@bulk-string-reply: the value of the first element, or `nil` when `key` does not exist. - -When called with the `count` argument: - -@array-reply: list of popped elements, or `nil` when `key` does not exist. - @examples ```cli diff --git a/commands/lpos.md b/commands/lpos.md index 93fe579447..b4485acb56 100644 --- a/commands/lpos.md +++ b/commands/lpos.md @@ -57,10 +57,6 @@ Finally, the `MAXLEN` option tells the command to compare the provided element o When `MAXLEN` is used, it is possible to specify 0 as the maximum number of comparisons, as a way to tell the command we want unlimited comparisons. This is better than giving a very large `MAXLEN` option because it is more general. -@return - -The command returns the integer representing the matching element, or `nil` if there is no match. However, if the `COUNT` option is given the command returns an array (empty if there are no matches). - @examples ```cli diff --git a/commands/lpush.md b/commands/lpush.md index e8b97203db..c1d198bad9 100644 --- a/commands/lpush.md +++ b/commands/lpush.md @@ -10,10 +10,6 @@ leftmost element to the rightmost element. So for instance the command `LPUSH mylist a b c` will result into a list containing `c` as first element, `b` as second element and `a` as third element. -@return - -@integer-reply: the length of the list after the push operations. - @examples ```cli diff --git a/commands/lpushx.md b/commands/lpushx.md index e98c9037d7..69182df8a8 100644 --- a/commands/lpushx.md +++ b/commands/lpushx.md @@ -3,10 +3,6 @@ already exists and holds a list. In contrary to `LPUSH`, no operation will be performed when `key` does not yet exist. -@return - -@integer-reply: the length of the list after the push operation. - @examples ```cli diff --git a/commands/lrange.md b/commands/lrange.md index 7634f3e6c3..c59de57e1b 100644 --- a/commands/lrange.md +++ b/commands/lrange.md @@ -23,10 +23,6 @@ If `start` is larger than the end of the list, an empty list is returned. If `stop` is larger than the actual end of the list, Redis will treat it like the last element of the list. -@return - -@array-reply: list of elements in the specified range. - @examples ```cli diff --git a/commands/lrem.md b/commands/lrem.md index 36c0c7df00..dd2f7e7fbc 100644 --- a/commands/lrem.md +++ b/commands/lrem.md @@ -12,10 +12,6 @@ For example, `LREM list -2 "hello"` will remove the last two occurrences of Note that non-existing keys are treated like empty lists, so when `key` does not exist, the command will always return `0`. -@return - -@integer-reply: the number of removed elements. - @examples ```cli diff --git a/commands/lset.md b/commands/lset.md index 8f1c391594..c6fb635758 100644 --- a/commands/lset.md +++ b/commands/lset.md @@ -3,10 +3,6 @@ For more information on the `index` argument, see `LINDEX`. An error is returned for out of range indexes. -@return - -@simple-string-reply - @examples ```cli diff --git a/commands/ltrim.md b/commands/ltrim.md index 7cae0c7caf..61ae3f6e26 100644 --- a/commands/ltrim.md +++ b/commands/ltrim.md @@ -31,10 +31,6 @@ It is important to note that when used in this way `LTRIM` is an O(1) operation because in the average case just one element is removed from the tail of the list. -@return - -@simple-string-reply - @examples ```cli diff --git a/commands/memory-doctor.md b/commands/memory-doctor.md index dbb9db3e33..8a61604542 100644 --- a/commands/memory-doctor.md +++ b/commands/memory-doctor.md @@ -1,6 +1,2 @@ The `MEMORY DOCTOR` command reports about different memory-related issues that the Redis server experiences, and advises about possible remedies. - -@return - -@bulk-string-reply \ No newline at end of file diff --git a/commands/memory-help.md b/commands/memory-help.md index c0f4086f53..1b86c43a32 100644 --- a/commands/memory-help.md +++ b/commands/memory-help.md @@ -1,6 +1,2 @@ The `MEMORY HELP` command returns a helpful text describing the different subcommands. - -@return - -@array-reply: a list of subcommands and their descriptions diff --git a/commands/memory-malloc-stats.md b/commands/memory-malloc-stats.md index 8da8e72e96..f0b645ca38 100644 --- a/commands/memory-malloc-stats.md +++ b/commands/memory-malloc-stats.md @@ -3,7 +3,3 @@ the memory allocator. This command is currently implemented only when using **jemalloc** as an allocator, and evaluates to a benign NOOP for all others. - -@return - -@bulk-string-reply: the memory allocator's internal statistics report diff --git a/commands/memory-purge.md b/commands/memory-purge.md index 5ebe43356d..947a4fea45 100644 --- a/commands/memory-purge.md +++ b/commands/memory-purge.md @@ -3,7 +3,3 @@ reclaimed by the allocator. This command is currently implemented only when using **jemalloc** as an allocator, and evaluates to a benign NOOP for all others. - -@return - -@simple-string-reply diff --git a/commands/memory-stats.md b/commands/memory-stats.md index 39cd68e9f8..3d88350dad 100644 --- a/commands/memory-stats.md +++ b/commands/memory-stats.md @@ -20,27 +20,38 @@ values. The following metrics are reported: * `aof.buffer`: The summed size in bytes of AOF related buffers. * `lua.caches`: the summed size in bytes of the overheads of the Lua scripts' caches +* `functions.caches`: the summed size in bytes of the overheads of the Function scripts' + caches * `dbXXX`: For each of the server's databases, the overheads of the main and expiry dictionaries (`overhead.hashtable.main` and `overhead.hashtable.expires`, respectively) are reported in bytes +* `overhead.db.hashtable.lut`: Total overhead of dictionary buckets in databases (Added in Redis 8.0) +* `overhead.db.hashtable.rehashing`: Temporary memory overhead of database dictionaries currently being rehashed (Added in Redis 8.0) * `overhead.total`: The sum of all overheads, i.e. `startup.allocated`, `replication.backlog`, `clients.slaves`, `clients.normal`, `aof.buffer` and those of the internal data structures that are used in managing the Redis keyspace (see `INFO`'s `used_memory_overhead`) +* `db.dict.rehashing.count`: Number of DB dictionaries currently being rehashed (Added in Redis 8.0) * `keys.count`: The total number of keys stored across all databases in the server -* `keys.bytes-per-key`: The ratio between **net memory usage** (`total.allocated` - minus `startup.allocated`) and `keys.count` +* `keys.bytes-per-key`: The ratio between `dataset.bytes` and `keys.count` * `dataset.bytes`: The size in bytes of the dataset, i.e. `overhead.total` subtracted from `total.allocated` (see `INFO`'s `used_memory_dataset`) -* `dataset.percentage`: The percentage of `dataset.bytes` out of the net +* `dataset.percentage`: The percentage of `dataset.bytes` out of the total memory usage -* `peak.percentage`: The percentage of `peak.allocated` out of - `total.allocated` +* `peak.percentage`: The percentage of `total.allocated` out of + `peak.allocated` +* `allocator.allocated`: See `INFO`'s `allocator_allocated` +* `allocator.active`: See `INFO`'s `allocator_active` +* `allocator.resident`: See `INFO`'s `allocator_resident` +* `allocator.muzzy`: See `INFO`'s `allocator_muzzy` +* `allocator-fragmentation.ratio`: See `INFO`'s `allocator_frag_ratio` +* `allocator-fragmentation.bytes`: See `INFO`'s `allocator_frag_bytes` +* `allocator-rss.ratio`: See `INFO`'s `allocator_rss_ratio` +* `allocator-rss.bytes`: See `INFO`'s `allocator_rss_bytes` +* `rss-overhead.ratio`: See `INFO`'s `rss_overhead_ratio` +* `rss-overhead.bytes`: See `INFO`'s `rss_overhead_bytes` * `fragmentation`: See `INFO`'s `mem_fragmentation_ratio` - -@return - -@array-reply: nested list of memory usage metrics and their values +* `fragmentation.bytes`: See `INFO`'s `mem_fragmentation_bytes` **A note about the word slave used in this man page**: Starting with Redis 5, if not for backward compatibility, the Redis project no longer uses the word slave. Unfortunately in this command the word slave is part of the protocol, so we'll be able to remove such occurrences only when this API will be naturally deprecated. diff --git a/commands/memory-usage.md b/commands/memory-usage.md index ae5a4bccc1..b436292373 100644 --- a/commands/memory-usage.md +++ b/commands/memory-usage.md @@ -2,39 +2,38 @@ The `MEMORY USAGE` command reports the number of bytes that a key and its value require to be stored in RAM. The reported usage is the total of memory allocations for data and -administrative overheads that a key its value require. +administrative overheads that a key and its value require. For nested data types, the optional `SAMPLES` option can be provided, where -`count` is the number of sampled nested values. By default, this option is set -to `5`. To sample the all of the nested values, use `SAMPLES 0`. +`count` is the number of sampled nested values. The samples are averaged to estimate the total size. +By default, this option is set to `5`. To sample the all of the nested values, use `SAMPLES 0`. @examples -With Redis v4.0.1 64-bit and **jemalloc**, the empty string measures as follows: +With Redis v7.2.0 64-bit and **jemalloc**, the empty string measures as follows: ``` > SET "" "" OK > MEMORY USAGE "" -(integer) 51 +(integer) 56 ``` These bytes are pure overhead at the moment as no actual data is stored, and are -used for maintaining the internal data structures of the server. Longer keys and +used for maintaining the internal data structures of the server (include internal allocator fragmentation). Longer keys and values show asymptotically linear usage. ``` > SET foo bar OK > MEMORY USAGE foo -(integer) 54 -> SET cento 01234567890123456789012345678901234567890123 -45678901234567890123456789012345678901234567890123456789 +(integer) 56 +> SET foo2 mybar OK -127.0.0.1:6379> MEMORY USAGE cento -(integer) 153 +> MEMORY USAGE foo2 +(integer) 64 +> SET foo3 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 +OK +> MEMORY USAGE foo3 +(integer) 160 ``` - -@return - -@integer-reply: the memory usage in bytes, or `nil` when the key does not exist. diff --git a/commands/mget.md b/commands/mget.md index 8bca6ca81a..97de1cc61d 100644 --- a/commands/mget.md +++ b/commands/mget.md @@ -3,10 +3,6 @@ For every key that does not hold a string value or does not exist, the special value `nil` is returned. Because of this, the operation never fails. -@return - -@array-reply: list of values at the specified keys. - @examples ```cli diff --git a/commands/migrate.md b/commands/migrate.md index 318e0e253f..1bc5913302 100644 --- a/commands/migrate.md +++ b/commands/migrate.md @@ -65,8 +65,3 @@ just a single key exists. * `!KEYS` -- If the key argument is an empty string, the command will instead migrate all the keys that follow the `!KEYS` option (see the above section for more info). * `!AUTH` -- Authenticate with the given password to the remote instance. * `AUTH2` -- Authenticate with the given username and password pair (Redis 6 or greater ACL auth style). - -@return - -@simple-string-reply: The command returns OK on success, or `NOKEY` if no keys were -found in the source instance. diff --git a/commands/module-help.md b/commands/module-help.md index a05bf1e965..6759bfe0fe 100644 --- a/commands/module-help.md +++ b/commands/module-help.md @@ -1,5 +1 @@ The `MODULE HELP` command returns a helpful text describing the different subcommands. - -@return - -@array-reply: a list of subcommands and their descriptions diff --git a/commands/module-list.md b/commands/module-list.md index 1bfa3e232b..3f91414b05 100644 --- a/commands/module-list.md +++ b/commands/module-list.md @@ -1,10 +1 @@ Returns information about the modules loaded to the server. - -@return - -@array-reply: list of loaded modules. Each element in the list represents a -module, and is in itself a list of property names and their values. The -following properties is reported for each loaded module: - -* `name`: Name of the module -* `ver`: Version of the module diff --git a/commands/module-load.md b/commands/module-load.md index 99777c3886..7232257a1d 100644 --- a/commands/module-load.md +++ b/commands/module-load.md @@ -7,7 +7,3 @@ unmodified to the module. **Note**: modules can also be loaded at server startup with `loadmodule` configuration directive in `redis.conf`. - -@return - -@simple-string-reply: `OK` if module was loaded. diff --git a/commands/module-loadex.md b/commands/module-loadex.md index f7a55dbd51..a863aae32c 100644 --- a/commands/module-loadex.md +++ b/commands/module-loadex.md @@ -9,7 +9,3 @@ Any additional arguments that follow the `ARGS` keyword are passed unmodified to **Note**: modules can also be loaded at server startup with `loadmodule` configuration directive in `redis.conf`. - -@return - -@simple-string-reply: `OK` if module was loaded. diff --git a/commands/module-unload.md b/commands/module-unload.md index 84ebebf010..4214e750f0 100644 --- a/commands/module-unload.md +++ b/commands/module-unload.md @@ -7,7 +7,3 @@ library's filename. Known limitations: * Modules that register custom data types can not be unloaded. - -@return - -@simple-string-reply: `OK` if module was unloaded. diff --git a/commands/monitor.md b/commands/monitor.md index 2622cdd53a..5a7a70c575 100644 --- a/commands/monitor.md +++ b/commands/monitor.md @@ -80,11 +80,6 @@ In this particular case, running a single `MONITOR` client can reduce the throughput by more than 50%. Running more `MONITOR` clients will reduce throughput even more. -@return - -**Non standard return value**, just dumps the received commands in an infinite -flow. - ## Behavior change history * `>= 6.0.0`: `AUTH` excluded from the command's output. diff --git a/commands/move.md b/commands/move.md index ceb212caac..1e9f6cdbf9 100644 --- a/commands/move.md +++ b/commands/move.md @@ -3,10 +3,3 @@ destination database. When `key` already exists in the destination database, or it does not exist in the source database, it does nothing. It is possible to use `MOVE` as a locking primitive because of this. - -@return - -@integer-reply, specifically: - -* `1` if `key` was moved. -* `0` if `key` was not moved. diff --git a/commands/mset.md b/commands/mset.md index f070d29329..d22b0de5da 100644 --- a/commands/mset.md +++ b/commands/mset.md @@ -6,10 +6,6 @@ See `MSETNX` if you don't want to overwrite existing values. It is not possible for clients to see that some of the keys were updated while others are unchanged. -@return - -@simple-string-reply: always `OK` since `MSET` can't fail. - @examples ```cli diff --git a/commands/msetnx.md b/commands/msetnx.md index 795bfc9812..71b4117c30 100644 --- a/commands/msetnx.md +++ b/commands/msetnx.md @@ -10,13 +10,6 @@ that either all the fields or none at all are set. It is not possible for clients to see that some of the keys were updated while others are unchanged. -@return - -@integer-reply, specifically: - -* `1` if the all the keys were set. -* `0` if no key was set (at least one key already existed). - @examples ```cli diff --git a/commands/multi.md b/commands/multi.md index dc8789249d..1b2ba22659 100644 --- a/commands/multi.md +++ b/commands/multi.md @@ -2,7 +2,3 @@ Marks the start of a [transaction][tt] block. Subsequent commands will be queued for atomic execution using `EXEC`. [tt]: /topics/transactions - -@return - -@simple-string-reply: always `OK`. diff --git a/commands/object-encoding.md b/commands/object-encoding.md index 3a9583d0d9..685debf912 100644 --- a/commands/object-encoding.md +++ b/commands/object-encoding.md @@ -9,13 +9,34 @@ Redis objects can be encoded in different ways: - `embstr`, an embedded string, which is an object where the internal simple dynamic string, `sds`, is an unmodifiable string allocated in the same chuck as the object itself. `embstr` can be strings with lengths up to the hardcoded limit of `OBJ_ENCODING_EMBSTR_SIZE_LIMIT` or 44 bytes. -* Lists can be encoded as `ziplist` or `linkedlist`. The `ziplist` is the special representation that is used to save space for small lists. -* Sets can be encoded as `intset` or `hashtable`. The `intset` is a special encoding used for small sets composed solely of integers. -* Hashes can be encoded as `ziplist` or `hashtable`. The `ziplist` is a special encoding used for small hashes. -* Sorted Sets can be encoded as `ziplist` or `skiplist` format. As for the List type small sorted sets can be specially encoded using `ziplist`, while the `skiplist` encoding is the one that works with sorted sets of any size. +* Lists can be encoded as: + + - `linkedlist`, simple list encoding. No longer used, an old list encoding. + - `ziplist`, Redis <= 6.2, a space-efficient encoding used for small lists. + - `listpack`, Redis >= 7.0, a space-efficient encoding used for small lists. + - `quicklist`, encoded as linkedlist of ziplists or listpacks. -All the specially encoded types are automatically converted to the general type once you perform an operation that makes it impossible for Redis to retain the space saving encoding. +* Sets can be encoded as: + + - `hashtable`, normal set encoding. + - `intset`, a special encoding used for small sets composed solely of integers. + - `listpack`, Redis >= 7.2, a space-efficient encoding used for small sets. + +* Hashes can be encoded as: + + - `zipmap`, no longer used, an old hash encoding. + - `hashtable`, normal hash encoding. + - `ziplist`, Redis <= 6.2, a space-efficient encoding used for small hashes. + - `listpack`, Redis >= 7.0, a space-efficient encoding used for small hashes. -@return +* Sorted Sets can be encoded as: -@bulk-string-reply: the encoding of the object, or `nil` if the key doesn't exist + - `skiplist`, normal sorted set encoding. + - `ziplist`, Redis <= 6.2, a space-efficient encoding used for small sorted sets. + - `listpack`, Redis >= 7.0, a space-efficient encoding used for small sorted sets. + +* Streams can be encoded as: + + - `stream`, encoded as a radix tree of listpacks. + +All the specially encoded types are automatically converted to the general type once you perform an operation that makes it impossible for Redis to retain the space saving encoding. diff --git a/commands/object-freq.md b/commands/object-freq.md index fdf891e83d..5c75bfb787 100644 --- a/commands/object-freq.md +++ b/commands/object-freq.md @@ -1,9 +1,3 @@ This command returns the logarithmic access frequency counter of a Redis object stored at ``. The command is only available when the `maxmemory-policy` configuration directive is set to one of the LFU policies. - -@return - -@integer-reply - -The counter's value. \ No newline at end of file diff --git a/commands/object-help.md b/commands/object-help.md index f98196c5e1..d528d40751 100644 --- a/commands/object-help.md +++ b/commands/object-help.md @@ -1,5 +1 @@ The `OBJECT HELP` command returns a helpful text describing the different subcommands. - -@return - -@array-reply: a list of subcommands and their descriptions diff --git a/commands/object-idletime.md b/commands/object-idletime.md index 2a89641746..791d8fb00a 100644 --- a/commands/object-idletime.md +++ b/commands/object-idletime.md @@ -1,9 +1,3 @@ This command returns the time in seconds since the last access to the value stored at ``. The command is only available when the `maxmemory-policy` configuration directive is not set to one of the LFU policies. - -@return - -@integer-reply - -The idle time in seconds. \ No newline at end of file diff --git a/commands/object-refcount.md b/commands/object-refcount.md index 639c899dbd..e526681640 100644 --- a/commands/object-refcount.md +++ b/commands/object-refcount.md @@ -1,7 +1 @@ This command returns the reference count of the stored at ``. - -@return - -@integer-reply - -The number of references. \ No newline at end of file diff --git a/commands/persist.md b/commands/persist.md index 67a00147da..44f067d7d8 100644 --- a/commands/persist.md +++ b/commands/persist.md @@ -2,13 +2,6 @@ Remove the existing timeout on `key`, turning the key from _volatile_ (a key with an expire set) to _persistent_ (a key that will never expire as no timeout is associated). -@return - -@integer-reply, specifically: - -* `1` if the timeout was removed. -* `0` if `key` does not exist or does not have an associated timeout. - @examples ```cli diff --git a/commands/pexpire.md b/commands/pexpire.md index bc2e6f109e..2e0df07063 100644 --- a/commands/pexpire.md +++ b/commands/pexpire.md @@ -13,13 +13,6 @@ The `PEXPIRE` command supports a set of options since Redis 7.0: A non-volatile key is treated as an infinite TTL for the purpose of `GT` and `LT`. The `GT`, `LT` and `NX` options are mutually exclusive. -@return - -@integer-reply, specifically: - -* `1` if the timeout was set. -* `0` if the timeout was not set. e.g. key doesn't exist, or operation skipped due to the provided arguments. - @examples ```cli diff --git a/commands/pexpireat.md b/commands/pexpireat.md index 21e2853e39..748d491a72 100644 --- a/commands/pexpireat.md +++ b/commands/pexpireat.md @@ -13,13 +13,6 @@ The `PEXPIREAT` command supports a set of options since Redis 7.0: A non-volatile key is treated as an infinite TTL for the purpose of `GT` and `LT`. The `GT`, `LT` and `NX` options are mutually exclusive. -@return - -@integer-reply, specifically: - -* `1` if the timeout was set. -* `0` if the timeout was not set. e.g. key doesn't exist, or operation skipped due to the provided arguments. - @examples ```cli diff --git a/commands/pexpiretime.md b/commands/pexpiretime.md index 9fcda95507..ffde6be48b 100644 --- a/commands/pexpiretime.md +++ b/commands/pexpiretime.md @@ -1,12 +1,5 @@ `PEXPIRETIME` has the same semantic as `EXPIRETIME`, but returns the absolute Unix expiration timestamp in milliseconds instead of seconds. -@return - -@integer-reply: Expiration Unix timestamp in milliseconds, or a negative value in order to signal an error (see the description below). - -* The command returns `-1` if the key exists but has no associated expiration time. -* The command returns `-2` if the key does not exist. - @examples ```cli diff --git a/commands/pfadd.md b/commands/pfadd.md index 5d0128b1a0..f621a00d4e 100644 --- a/commands/pfadd.md +++ b/commands/pfadd.md @@ -8,12 +8,6 @@ To call the command without elements but just the variable name is valid, this w For an introduction to HyperLogLog data structure check the `PFCOUNT` command page. -@return - -@integer-reply, specifically: - -* 1 if at least 1 HyperLogLog internal register was altered. 0 otherwise. - @examples ```cli diff --git a/commands/pfcount.md b/commands/pfcount.md index 71d10930c0..ac6f712a6a 100644 --- a/commands/pfcount.md +++ b/commands/pfcount.md @@ -11,12 +11,6 @@ For example in order to take the count of all the unique search queries performe Note: as a side effect of calling this function, it is possible that the HyperLogLog is modified, since the last 8 bytes encode the latest computed cardinality for caching purposes. So `PFCOUNT` is technically a write command. -@return - -@integer-reply, specifically: - -* The approximated number of unique elements observed via `PFADD`. - @examples ```cli diff --git a/commands/pfmerge.md b/commands/pfmerge.md index c59c930182..4eb1b90966 100644 --- a/commands/pfmerge.md +++ b/commands/pfmerge.md @@ -9,10 +9,6 @@ If the destination variable exists, it is treated as one of the source sets and its cardinality will be included in the cardinality of the computed HyperLogLog. -@return - -@simple-string-reply: The command just returns `OK`. - @examples ```cli diff --git a/commands/ping.md b/commands/ping.md index c16f760e2f..f1631d4a2c 100644 --- a/commands/ping.md +++ b/commands/ping.md @@ -1,19 +1,15 @@ Returns `PONG` if no argument is provided, otherwise return a copy of the argument as a bulk. -This command is often used to test if a connection is still alive, or to measure -latency. +This command is useful for: +1. Testing whether a connection is still alive. +1. Verifying the server's ability to serve data - an error is returned when this isn't the case (e.g., during load from persistence or accessing a stale replica). +1. Measuring latency. If the client is subscribed to a channel or a pattern, it will instead return a multi-bulk with a "pong" in the first position and an empty bulk in the second position, unless an argument is provided in which case it returns a copy of the argument. -@return - -@simple-string-reply, and specifically `PONG`, when no argument is provided. - -@bulk-string-reply the argument provided, when applicable. - @examples ```cli diff --git a/commands/psubscribe.md b/commands/psubscribe.md index 81c4bbafb0..abb80b9975 100644 --- a/commands/psubscribe.md +++ b/commands/psubscribe.md @@ -7,3 +7,12 @@ Supported glob-style patterns: * `h[ae]llo` subscribes to `hello` and `hallo,` but not `hillo` Use `\` to escape special characters if you want to match them verbatim. + +Once the client enters the subscribed state it is not supposed to issue any other commands, except for additional `SUBSCRIBE`, `SSUBSCRIBE`, `PSUBSCRIBE`, `UNSUBSCRIBE`, `SUNSUBSCRIBE`, `PUNSUBSCRIBE`, `PING`, `RESET` and `QUIT` commands. +However, if RESP3 is used (see `HELLO`) it is possible for a client to issue any commands while in subscribed state. + +For more information, see [Pub/sub](/docs/interact/pubsub/). + +## Behavior change history + +* `>= 6.2.0`: `RESET` can be called to exit subscribed state. diff --git a/commands/psync.md b/commands/psync.md index 8cbacf2fa6..d9eac2ab9a 100644 --- a/commands/psync.md +++ b/commands/psync.md @@ -7,7 +7,3 @@ For more information about replication in Redis please check the [replication page][tr]. [tr]: /topics/replication - -@return - -**Non standard return value**, a bulk transfer of the data followed by `PING` and write requests from the master. diff --git a/commands/pttl.md b/commands/pttl.md index 4e0807971b..302194b46a 100644 --- a/commands/pttl.md +++ b/commands/pttl.md @@ -9,10 +9,6 @@ Starting with Redis 2.8 the return value in case of error changed: * The command returns `-2` if the key does not exist. * The command returns `-1` if the key exists but has no associated expire. -@return - -@integer-reply: TTL in milliseconds, or a negative value in order to signal an error (see the description above). - @examples ```cli diff --git a/commands/publish.md b/commands/publish.md index 62283f8dc1..cd61d04678 100644 --- a/commands/publish.md +++ b/commands/publish.md @@ -3,9 +3,3 @@ Posts a message to the given channel. In a Redis Cluster clients can publish to every node. The cluster makes sure that published messages are forwarded as needed, so clients can subscribe to any channel by connecting to any one of the nodes. - -@return - -@integer-reply: the number of clients that received the message. Note that in a -Redis Cluster, only clients that are connected to the same node as the -publishing client are included in the count. diff --git a/commands/pubsub-channels.md b/commands/pubsub-channels.md index 8b9a06eefd..8e0b3e36fd 100644 --- a/commands/pubsub-channels.md +++ b/commands/pubsub-channels.md @@ -5,7 +5,3 @@ An active channel is a Pub/Sub channel with one or more subscribers (excluding c If no `pattern` is specified, all the channels are listed, otherwise if pattern is specified only channels matching the specified glob-style pattern are listed. Cluster note: in a Redis Cluster clients can subscribe to every node, and can also publish to every other node. The cluster will make sure that published messages are forwarded as needed. That said, `PUBSUB`'s replies in a cluster only report information from the node's Pub/Sub context, rather than the entire cluster. - -@return - -@array-reply: a list of active channels, optionally matching the specified pattern. diff --git a/commands/pubsub-help.md b/commands/pubsub-help.md index a7ab2a359f..f711c27db2 100644 --- a/commands/pubsub-help.md +++ b/commands/pubsub-help.md @@ -1,5 +1 @@ The `PUBSUB HELP` command returns a helpful text describing the different subcommands. - -@return - -@array-reply: a list of subcommands and their descriptions diff --git a/commands/pubsub-numpat.md b/commands/pubsub-numpat.md index 6f3a7c9e14..2a3282c5c6 100644 --- a/commands/pubsub-numpat.md +++ b/commands/pubsub-numpat.md @@ -3,7 +3,3 @@ Returns the number of unique patterns that are subscribed to by clients (that ar Note that this isn't the count of clients subscribed to patterns, but the total number of unique patterns all the clients are subscribed to. Cluster note: in a Redis Cluster clients can subscribe to every node, and can also publish to every other node. The cluster will make sure that published messages are forwarded as needed. That said, `PUBSUB`'s replies in a cluster only report information from the node's Pub/Sub context, rather than the entire cluster. - -@return - -@integer-reply: the number of patterns all the clients are subscribed to. diff --git a/commands/pubsub-numsub.md b/commands/pubsub-numsub.md index d4d6b85e7b..604c317900 100644 --- a/commands/pubsub-numsub.md +++ b/commands/pubsub-numsub.md @@ -3,9 +3,3 @@ Returns the number of subscribers (exclusive of clients subscribed to patterns) Note that it is valid to call this command without channels. In this case it will just return an empty list. Cluster note: in a Redis Cluster clients can subscribe to every node, and can also publish to every other node. The cluster will make sure that published messages are forwarded as needed. That said, `PUBSUB`'s replies in a cluster only report information from the node's Pub/Sub context, rather than the entire cluster. - -@return - -@array-reply: a list of channels and number of subscribers for every channel. - -The format is channel, count, channel, count, ..., so the list is flat. The order in which the channels are listed is the same as the order of the channels specified in the command call. diff --git a/commands/pubsub-shardchannels.md b/commands/pubsub-shardchannels.md index 543eab0edc..c6c460bd79 100644 --- a/commands/pubsub-shardchannels.md +++ b/commands/pubsub-shardchannels.md @@ -6,15 +6,11 @@ If no `pattern` is specified, all the channels are listed, otherwise if pattern The information returned about the active shard channels are at the shard level and not at the cluster level. -@return - -@array-reply: a list of active channels, optionally matching the specified pattern. - @examples ``` > PUBSUB SHARDCHANNELS 1) "orders" -PUBSUB SHARDCHANNELS o* +> PUBSUB SHARDCHANNELS o* 1) "orders" ``` diff --git a/commands/pubsub-shardnumsub.md b/commands/pubsub-shardnumsub.md index 8d09d4381f..8119fe9b56 100644 --- a/commands/pubsub-shardnumsub.md +++ b/commands/pubsub-shardnumsub.md @@ -4,12 +4,6 @@ Note that it is valid to call this command without channels, in this case it wil Cluster note: in a Redis Cluster, `PUBSUB`'s replies in a cluster only report information from the node's Pub/Sub context, rather than the entire cluster. -@return - -@array-reply: a list of channels and number of subscribers for every channel. - -The format is channel, count, channel, count, ..., so the list is flat. The order in which the channels are listed is the same as the order of the shard channels specified in the command call. - @examples ``` diff --git a/commands/quit.md b/commands/quit.md index 6be9b55dd8..e0507d43d2 100644 --- a/commands/quit.md +++ b/commands/quit.md @@ -2,6 +2,6 @@ Ask the server to close the connection. The connection is closed as soon as all pending replies have been written to the client. -@return - -@simple-string-reply: always OK. +**Note:** Clients should not use this command. +Instead, clients should simply close the connection when they're not used anymore. +Terminating a connection on the client side is preferable, as it eliminates `TIME_WAIT` lingering sockets on the server side. diff --git a/commands/randomkey.md b/commands/randomkey.md index d8233224ff..37a2d759ab 100644 --- a/commands/randomkey.md +++ b/commands/randomkey.md @@ -1,5 +1 @@ Return a random key from the currently selected database. - -@return - -@bulk-string-reply: the random key, or `nil` when the database is empty. diff --git a/commands/readonly.md b/commands/readonly.md index bc73b9b980..9bd4f96a7e 100644 --- a/commands/readonly.md +++ b/commands/readonly.md @@ -13,7 +13,3 @@ master node. This may happen because: 1. The client sent a command about hash slots never served by the master of this replica. 2. The cluster was reconfigured (for example resharded) and the replica is no longer able to serve commands for a given hash slot. - -@return - -@simple-string-reply diff --git a/commands/readwrite.md b/commands/readwrite.md index d6d7089be4..9b50eefd80 100644 --- a/commands/readwrite.md +++ b/commands/readwrite.md @@ -4,7 +4,3 @@ Read queries against a Redis Cluster replica node are disabled by default, but you can use the `READONLY` command to change this behavior on a per- connection basis. The `READWRITE` command resets the readonly mode flag of a connection back to readwrite. - -@return - -@simple-string-reply diff --git a/commands/rename.md b/commands/rename.md index 471ecf4187..82c503c219 100644 --- a/commands/rename.md +++ b/commands/rename.md @@ -4,10 +4,6 @@ If `newkey` already exists it is overwritten, when this happens `RENAME` execute In Cluster mode, both `key` and `newkey` must be in the same **hash slot**, meaning that in practice only keys that have the same hash tag can be reliably renamed in cluster. -@return - -@simple-string-reply - @examples ```cli diff --git a/commands/renamenx.md b/commands/renamenx.md index c132af4a73..15b951e4ca 100644 --- a/commands/renamenx.md +++ b/commands/renamenx.md @@ -3,13 +3,6 @@ It returns an error when `key` does not exist. In Cluster mode, both `key` and `newkey` must be in the same **hash slot**, meaning that in practice only keys that have the same hash tag can be reliably renamed in cluster. -@return - -@integer-reply, specifically: - -* `1` if `key` was renamed to `newkey`. -* `0` if `newkey` already exists. - @examples ```cli diff --git a/commands/replicaof.md b/commands/replicaof.md index 1c3ec93dce..8351f1453e 100644 --- a/commands/replicaof.md +++ b/commands/replicaof.md @@ -6,10 +6,6 @@ If a server is already a replica of some master, `REPLICAOF` hostname port will The form `REPLICAOF` NO ONE will stop replication, turning the server into a MASTER, but will not discard the replication. So, if the old master stops working, it is possible to turn the replica into a master and set the application to use this new master in read/write. Later when the other Redis server is fixed, it can be reconfigured to work as a replica. -@return - -@simple-string-reply - @examples ``` diff --git a/commands/reset.md b/commands/reset.md index b3f17d2406..78434755a4 100644 --- a/commands/reset.md +++ b/commands/reset.md @@ -17,7 +17,5 @@ following: appropriate. * Deauthenticates the connection, requiring a call `AUTH` to reauthenticate when authentication is enabled. - -@return - -@simple-string-reply: always 'RESET'. +* Turns off `NO-EVICT` mode. +* Turns off `NO-TOUCH` mode. diff --git a/commands/restore.md b/commands/restore.md index eb605be703..d756a33bb4 100644 --- a/commands/restore.md +++ b/commands/restore.md @@ -18,10 +18,6 @@ exists unless you use the `REPLACE` modifier. `!RESTORE` checks the RDB version and data checksum. If they don't match an error is returned. -@return - -@simple-string-reply: The command returns OK on success. - @examples ``` diff --git a/commands/role.md b/commands/role.md index c308c934f4..08bf21df1f 100644 --- a/commands/role.md +++ b/commands/role.md @@ -69,10 +69,6 @@ The sentinel output is composed of the following parts: 1. The string `sentinel`. 2. An array of master names monitored by this Sentinel instance. -@return - -@array-reply: where the first element is one of `master`, `slave`, `sentinel` and the additional elements are role-specific as illustrated above. - @examples ```cli diff --git a/commands/rpop.md b/commands/rpop.md index 99c863cd23..1f66fe2711 100644 --- a/commands/rpop.md +++ b/commands/rpop.md @@ -4,16 +4,6 @@ By default, the command pops a single element from the end of the list. When provided with the optional `count` argument, the reply will consist of up to `count` elements, depending on the list's length. -@return - -When called without the `count` argument: - -@bulk-string-reply: the value of the last element, or `nil` when `key` does not exist. - -When called with the `count` argument: - -@array-reply: list of popped elements, or `nil` when `key` does not exist. - @examples ```cli diff --git a/commands/rpoplpush.md b/commands/rpoplpush.md index d00e8c99c5..55f38bb555 100644 --- a/commands/rpoplpush.md +++ b/commands/rpoplpush.md @@ -13,10 +13,6 @@ If `source` and `destination` are the same, the operation is equivalent to removing the last element from the list and pushing it as first element of the list, so it can be considered as a list rotation command. -@return - -@bulk-string-reply: the element being popped and pushed. - @examples ```cli diff --git a/commands/rpush.md b/commands/rpush.md index def4ee10cf..9b50a44450 100644 --- a/commands/rpush.md +++ b/commands/rpush.md @@ -10,10 +10,6 @@ leftmost element to the rightmost element. So for instance the command `RPUSH mylist a b c` will result into a list containing `a` as first element, `b` as second element and `c` as third element. -@return - -@integer-reply: the length of the list after the push operation. - @examples ```cli diff --git a/commands/rpushx.md b/commands/rpushx.md index daab019a7e..88e0da657e 100644 --- a/commands/rpushx.md +++ b/commands/rpushx.md @@ -3,10 +3,6 @@ already exists and holds a list. In contrary to `RPUSH`, no operation will be performed when `key` does not yet exist. -@return - -@integer-reply: the length of the list after the push operation. - @examples ```cli diff --git a/commands/sadd.md b/commands/sadd.md index f8232bb220..93ad38d388 100644 --- a/commands/sadd.md +++ b/commands/sadd.md @@ -5,11 +5,6 @@ members. An error is returned when the value stored at `key` is not a set. -@return - -@integer-reply: the number of elements that were added to the set, not including -all the elements already present in the set. - @examples ```cli diff --git a/commands/save.md b/commands/save.md index c66c5e90a7..e2f32aa4d8 100644 --- a/commands/save.md +++ b/commands/save.md @@ -12,7 +12,3 @@ good last resort to perform the dump of the latest dataset. Please refer to the [persistence documentation][tp] for detailed information. [tp]: /topics/persistence - -@return - -@simple-string-reply: The commands returns OK on success. diff --git a/commands/scan.md b/commands/scan.md index fd5924ff98..7ef81e47e9 100644 --- a/commands/scan.md +++ b/commands/scan.md @@ -9,7 +9,7 @@ Since these commands allow for incremental iteration, returning only a small num However while blocking commands like `SMEMBERS` are able to provide all the elements that are part of a Set in a given moment, The SCAN family of commands only offer limited guarantees about the returned elements since the collection that we incrementally iterate can change during the iteration process. -Note that `SCAN`, `SSCAN`, `HSCAN` and `ZSCAN` all work very similarly, so this documentation covers all the four commands. However an obvious difference is that in the case of `SSCAN`, `HSCAN` and `ZSCAN` the first argument is the name of the key holding the Set, Hash or Sorted Set value. The `SCAN` command does not need any key name argument as it iterates keys in the current database, so the iterated object is the database itself. +Note that `SCAN`, `SSCAN`, `HSCAN` and `ZSCAN` all work very similarly, so this documentation covers all four commands. However an obvious difference is that in the case of `SSCAN`, `HSCAN` and `ZSCAN` the first argument is the name of the key holding the Set, Hash or Sorted Set value. The `SCAN` command does not need any key name argument as it iterates keys in the current database, so the iterated object is the database itself. ## SCAN basic usage @@ -50,6 +50,15 @@ As you can see the **SCAN return value** is an array of two values: the first va Since in the second call the returned cursor is 0, the server signaled to the caller that the iteration finished, and the collection was completely explored. Starting an iteration with a cursor value of 0, and calling `SCAN` until the returned cursor is 0 again is called a **full iteration**. +## Return value + +`SCAN`, `SSCAN`, `HSCAN` and `ZSCAN` return a two element multi-bulk reply, where the first element is a string representing an unsigned 64 bit number (the cursor), and the second element is a multi-bulk with an array of elements. + +* `SCAN` array of elements is a list of keys. +* `SSCAN` array of elements is a list of Set members. +* `HSCAN` array of elements contain two elements, a field and a value, for every returned element of the Hash. +* `ZSCAN` array of elements contain two elements, a member and its associated score, for every returned element of the Sorted Set. + ## Scan guarantees The `SCAN` command, and the other commands in the `SCAN` family, are able to provide to the user a set of guarantees associated to full iterations. @@ -66,25 +75,25 @@ However because `SCAN` has very little state associated (just the cursor) it has `SCAN` family functions do not guarantee that the number of elements returned per call are in a given range. The commands are also allowed to return zero elements, and the client should not consider the iteration complete as long as the returned cursor is not zero. -However the number of returned elements is reasonable, that is, in practical terms SCAN may return a maximum number of elements in the order of a few tens of elements when iterating a large collection, or may return all the elements of the collection in a single call when the iterated collection is small enough to be internally represented as an encoded data structure (this happens for small sets, hashes and sorted sets). +However the number of returned elements is reasonable, that is, in practical terms `SCAN` may return a maximum number of elements in the order of a few tens of elements when iterating a large collection, or may return all the elements of the collection in a single call when the iterated collection is small enough to be internally represented as an encoded data structure (this happens for small Sets, Hashes and Sorted Sets). However there is a way for the user to tune the order of magnitude of the number of returned elements per call using the **COUNT** option. ## The COUNT option -While `SCAN` does not provide guarantees about the number of elements returned at every iteration, it is possible to empirically adjust the behavior of `SCAN` using the **COUNT** option. Basically with COUNT the user specified the *amount of work that should be done at every call in order to retrieve elements from the collection*. This is **just a hint** for the implementation, however generally speaking this is what you could expect most of the times from the implementation. +While `SCAN` does not provide guarantees about the number of elements returned at every iteration, it is possible to empirically adjust the behavior of `SCAN` using the **COUNT** option. Basically with COUNT the user specifies the *amount of work that should be done at every call in order to retrieve elements from the collection*. This is **just a hint** for the implementation, however generally speaking this is what you could expect most of the times from the implementation. -* The default COUNT value is 10. -* When iterating the key space, or a Set, Hash or Sorted Set that is big enough to be represented by a hash table, assuming no **MATCH** option is used, the server will usually return *count* or a bit more than *count* elements per call. Please check the *why SCAN may return all the elements at once* section later in this document. -* When iterating Sets encoded as intsets (small sets composed of just integers), or Hashes and Sorted Sets encoded as ziplists (small hashes and sets composed of small individual values), usually all the elements are returned in the first `SCAN` call regardless of the COUNT value. +* The default `COUNT` value is 10. +* When iterating the key space, or a Set, Hash or Sorted Set that is big enough to be represented by a hash table, assuming no **MATCH** option is used, the server will usually return *count* or a few more than *count* elements per call. Please check the *why SCAN may return all the elements at once* section later in this document. +* When iterating Sets encoded as intsets (small sets composed of just integers), or Hashes and Sorted Sets encoded as ziplists (small hashes and sets composed of small individual values), usually all the elements are returned in the first `SCAN` call regardless of the `COUNT` value. Important: **there is no need to use the same COUNT value** for every iteration. The caller is free to change the count from one iteration to the other as required, as long as the cursor passed in the next call is the one obtained in the previous call to the command. ## The MATCH option -It is possible to only iterate elements matching a given glob-style pattern, similarly to the behavior of the `KEYS` command that takes a pattern as only argument. +It is possible to only iterate elements matching a given glob-style pattern, similarly to the behavior of the `KEYS` command that takes a pattern as its only argument. -To do so, just append the `MATCH ` arguments at the end of the `SCAN` command (it works with all the SCAN family commands). +To do so, just append the `MATCH ` arguments at the end of the `SCAN` command (it works with all the `SCAN` family commands). This is an example of iteration using **MATCH**: @@ -137,8 +146,14 @@ redis 127.0.0.1:6379> scan 176 MATCH *11* COUNT 1000 redis 127.0.0.1:6379> ``` -As you can see most of the calls returned zero elements, but the last call where a COUNT of 1000 was used in order to force the command to do more scanning for that iteration. +As you can see most of the calls returned zero elements, but the last call where a `COUNT` of 1000 was used in order to force the command to do more scanning for that iteration. +When using [Redis Cluster](/docs/management/scaling/), the search is optimized for patterns that imply a single slot. +If a pattern can only match keys of one slot, +Redis only iterates over keys in that slot, rather than the whole database, +when searching for keys matching the pattern. +For example, with the pattern `{a}h*llo`, Redis would only try to match it with the keys in slot 15495, which hash tag `{a}` implies. +To use pattern with hash tag, see [Hash tags](/docs/reference/cluster-spec/#hash-tags) in the Cluster specification for more information. ## The TYPE option @@ -163,6 +178,25 @@ redis 127.0.0.1:6379> SCAN 0 TYPE zset It is important to note that the **TYPE** filter is also applied after elements are retrieved from the database, so the option does not reduce the amount of work the server has to do to complete a full iteration, and for rare types you may receive no elements in many iterations. +## The NOVALUES option + +When using `HSCAN`, you can use the `NOVALUES` option to make Redis return only the keys in the hash table without their corresponding values. + +``` +redis 127.0.0.1:6379> HSET myhash a 1 b 2 +OK +redis 127.0.0.1:6379> HSCAN myhash 0 +1) "0" +2) 1) "a" + 2) "1" + 3) "b" + 4) "2" +redis 127.0.0.1:6379> HSCAN myhash 0 NOVALUES +1) "0" +2) 1) "a" + 2) "b" +``` + ## Multiple parallel iterations It is possible for an infinite number of clients to iterate the same collection at the same time, as the full state of the iterator is in the cursor, that is obtained and returned to the client at every call. No server side state is taken at all. @@ -173,7 +207,7 @@ Since there is no state server side, but the full state is captured by the curso ## Calling SCAN with a corrupted cursor -Calling `SCAN` with a broken, negative, out of range, or otherwise invalid cursor, will result into undefined behavior but never into a crash. What will be undefined is that the guarantees about the returned elements can no longer be ensured by the `SCAN` implementation. +Calling `SCAN` with a broken, negative, out of range, or otherwise invalid cursor, will result in undefined behavior but never in a crash. What will be undefined is that the guarantees about the returned elements can no longer be ensured by the `SCAN` implementation. The only valid cursors to use are: @@ -194,14 +228,9 @@ However once the data structures are bigger and are promoted to use real hash ta Also note that this behavior is specific of `SSCAN`, `HSCAN` and `ZSCAN`. `SCAN` itself never shows this behavior because the key space is always represented by hash tables. -## Return value - -`SCAN`, `SSCAN`, `HSCAN` and `ZSCAN` return a two elements multi-bulk reply, where the first element is a string representing an unsigned 64 bit number (the cursor), and the second element is a multi-bulk with an array of elements. +## Further reading -* `SCAN` array of elements is a list of keys. -* `SSCAN` array of elements is a list of Set members. -* `HSCAN` array of elements contain two elements, a field and a value, for every returned element of the Hash. -* `ZSCAN` array of elements contain two elements, a member and its associated score, for every returned element of the sorted set. +For more information about managing keys, please refer to the [The Redis Keyspace](/docs/manual/keyspace) tutorial. ## Additional examples diff --git a/commands/scard.md b/commands/scard.md index 85d3c01059..1bbbc0c8bd 100644 --- a/commands/scard.md +++ b/commands/scard.md @@ -1,10 +1,5 @@ Returns the set cardinality (number of elements) of the set stored at `key`. -@return - -@integer-reply: the cardinality (number of elements) of the set, or `0` if `key` -does not exist. - @examples ```cli diff --git a/commands/script-debug.md b/commands/script-debug.md index 3779ed5e47..5a2e845f69 100644 --- a/commands/script-debug.md +++ b/commands/script-debug.md @@ -20,8 +20,3 @@ is active and retains all changes to the data set once it ends. * `NO`. Disables scripts debug mode. For more information about `EVAL` scripts please refer to [Introduction to Eval Scripts](/topics/eval-intro). - -@return - -@simple-string-reply: `OK`. - diff --git a/commands/script-exists.md b/commands/script-exists.md index 758660cf9b..5681628f25 100644 --- a/commands/script-exists.md +++ b/commands/script-exists.md @@ -9,10 +9,3 @@ operation can be performed solely using `EVALSHA` instead of `EVAL` to save bandwidth. For more information about `EVAL` scripts please refer to [Introduction to Eval Scripts](/topics/eval-intro). - -@return - -@array-reply The command returns an array of integers that correspond to -the specified SHA1 digest arguments. -For every corresponding SHA1 digest of a script that actually exists in the -script cache, a 1 is returned, otherwise 0 is returned. diff --git a/commands/script-flush.md b/commands/script-flush.md index 705d0142d4..d1e65a109d 100644 --- a/commands/script-flush.md +++ b/commands/script-flush.md @@ -10,10 +10,6 @@ It is possible to use one of the following modifiers to dictate the flushing mod For more information about `EVAL` scripts please refer to [Introduction to Eval Scripts](/topics/eval-intro). -@return - -@simple-string-reply - ## Behavior change history * `>= 6.2.0`: Default flush behavior now configurable by the **lazyfree-lazy-user-flush** configuration directive. \ No newline at end of file diff --git a/commands/script-help.md b/commands/script-help.md index 02b716326f..ed745e1389 100644 --- a/commands/script-help.md +++ b/commands/script-help.md @@ -1,5 +1 @@ The `SCRIPT HELP` command returns a helpful text describing the different subcommands. - -@return - -@array-reply: a list of subcommands and their descriptions diff --git a/commands/script-kill.md b/commands/script-kill.md index 5b4c646edb..b6dae1486e 100644 --- a/commands/script-kill.md +++ b/commands/script-kill.md @@ -13,7 +13,3 @@ the Redis process in a hard way and preventing it from persisting with half-writ information. For more information about `EVAL` scripts please refer to [Introduction to Eval Scripts](/topics/eval-intro). - -@return - -@simple-string-reply diff --git a/commands/script-load.md b/commands/script-load.md index ed5ab2dbc1..e85c6de628 100644 --- a/commands/script-load.md +++ b/commands/script-load.md @@ -10,8 +10,3 @@ The command works in the same way even if the script was already present in the script cache. For more information about `EVAL` scripts please refer to [Introduction to Eval Scripts](/topics/eval-intro). - -@return - -@bulk-string-reply This command returns the SHA1 digest of the script added into the -script cache. diff --git a/commands/sdiff.md b/commands/sdiff.md index 5d458eca34..7815877126 100644 --- a/commands/sdiff.md +++ b/commands/sdiff.md @@ -12,10 +12,6 @@ SDIFF key1 key2 key3 = {b,d} Keys that do not exist are considered to be empty sets. -@return - -@array-reply: list with members of the resulting set. - @examples ```cli diff --git a/commands/sdiffstore.md b/commands/sdiffstore.md index e941016742..23cd591532 100644 --- a/commands/sdiffstore.md +++ b/commands/sdiffstore.md @@ -3,10 +3,6 @@ is stored in `destination`. If `destination` already exists, it is overwritten. -@return - -@integer-reply: the number of elements in the resulting set. - @examples ```cli diff --git a/commands/select.md b/commands/select.md index 9ebc04e969..5f76dedbab 100644 --- a/commands/select.md +++ b/commands/select.md @@ -8,7 +8,3 @@ In practical terms, Redis databases should be used to separate different keys be When using Redis Cluster, the `SELECT` command cannot be used, since Redis Cluster only supports database zero. In the case of a Redis Cluster, having multiple databases would be useless and an unnecessary source of complexity. Commands operating atomically on a single database would not be possible with the Redis Cluster design and goals. Since the currently selected database is a property of the connection, clients should track the currently selected database and re-select it on reconnection. While there is no command in order to query the selected database in the current connection, the `CLIENT LIST` output shows, for each client, the currently selected database. - -@return - -@simple-string-reply diff --git a/commands/set.md b/commands/set.md index 6f1ceca75a..1dc7322e96 100644 --- a/commands/set.md +++ b/commands/set.md @@ -6,29 +6,17 @@ Any previous time to live associated with the key is discarded on successful `SE The `SET` command supports a set of options that modify its behavior: -* `EX` *seconds* -- Set the specified expire time, in seconds. -* `PX` *milliseconds* -- Set the specified expire time, in milliseconds. -* `EXAT` *timestamp-seconds* -- Set the specified Unix time at which the key will expire, in seconds. -* `PXAT` *timestamp-milliseconds* -- Set the specified Unix time at which the key will expire, in milliseconds. +* `EX` *seconds* -- Set the specified expire time, in seconds (a positive integer). +* `PX` *milliseconds* -- Set the specified expire time, in milliseconds (a positive integer). +* `EXAT` *timestamp-seconds* -- Set the specified Unix time at which the key will expire, in seconds (a positive integer). +* `PXAT` *timestamp-milliseconds* -- Set the specified Unix time at which the key will expire, in milliseconds (a positive integer). * `NX` -- Only set the key if it does not already exist. -* `XX` -- Only set the key if it already exist. +* `XX` -- Only set the key if it already exists. * `KEEPTTL` -- Retain the time to live associated with the key. * `!GET` -- Return the old string stored at key, or nil if key did not exist. An error is returned and `SET` aborted if the value stored at key is not a string. Note: Since the `SET` command options can replace `SETNX`, `SETEX`, `PSETEX`, `GETSET`, it is possible that in future versions of Redis these commands will be deprecated and finally removed. -@return - -@simple-string-reply: `OK` if `SET` was executed correctly. - -@nil-reply: `(nil)` if the `SET` operation was not performed because the user specified the `NX` or `XX` option but the condition was not met. - -If the command is issued with the `!GET` option, the above does not apply. It will instead reply as follows, regardless if the `SET` was actually performed: - -@bulk-string-reply: the old string value stored at key. - -@nil-reply: `(nil)` if the key did not exist. - @examples ```cli @@ -38,6 +26,10 @@ GET mykey SET anotherkey "will expire in a minute" EX 60 ``` +### Code examples + +{{< clients-example set_and_get />}} + ## Patterns **Note:** The following pattern is discouraged in favor of [the Redlock algorithm](https://redis.io/topics/distlock) which is only a bit more complex to implement, but offers better guarantees and is fault tolerant. diff --git a/commands/setbit.md b/commands/setbit.md index e0b440b56c..a2e27ae840 100644 --- a/commands/setbit.md +++ b/commands/setbit.md @@ -20,10 +20,6 @@ allocation) takes ~8ms. Note that once this first allocation is done, subsequent calls to `SETBIT` for the same _key_ will not have the allocation overhead. -@return - -@integer-reply: the original bit value stored at _offset_. - @examples ```cli diff --git a/commands/setex.md b/commands/setex.md index 8d8b6b3f22..c56718a11a 100644 --- a/commands/setex.md +++ b/commands/setex.md @@ -1,23 +1,13 @@ Set `key` to hold the string `value` and set `key` to timeout after a given number of seconds. -This command is equivalent to executing the following commands: +This command is equivalent to: ``` -SET mykey value -EXPIRE mykey seconds +SET key value EX seconds ``` -`SETEX` is atomic, and can be reproduced by using the previous two commands -inside an `MULTI` / `EXEC` block. -It is provided as a faster alternative to the given sequence of operations, -because this operation is very common when Redis is used as a cache. - An error is returned when `seconds` is invalid. -@return - -@simple-string-reply - @examples ```cli @@ -25,3 +15,6 @@ SETEX mykey 10 "Hello" TTL mykey GET mykey ``` +## See also + +`TTL` \ No newline at end of file diff --git a/commands/setnx.md b/commands/setnx.md index 833573c45e..72f5ac6a63 100644 --- a/commands/setnx.md +++ b/commands/setnx.md @@ -3,13 +3,6 @@ In that case, it is equal to `SET`. When `key` already holds a value, no operation is performed. `SETNX` is short for "**SET** if **N**ot e**X**ists". -@return - -@integer-reply, specifically: - -* `1` if the key was set -* `0` if the key was not set - @examples ```cli diff --git a/commands/setrange.md b/commands/setrange.md index 617e3d5dc3..3c7aa05b01 100644 --- a/commands/setrange.md +++ b/commands/setrange.md @@ -26,10 +26,6 @@ Thanks to `SETRANGE` and the analogous `GETRANGE` commands, you can use Redis strings as a linear array with O(1) random access. This is a very fast and efficient storage in many real world use cases. -@return - -@integer-reply: the length of the string after it was modified by the command. - @examples Basic usage: diff --git a/commands/shutdown.md b/commands/shutdown.md index 5dca6dec25..8bef39752d 100644 --- a/commands/shutdown.md +++ b/commands/shutdown.md @@ -62,12 +62,6 @@ This provides a best effort minimizing the risk of data loss in a situation wher Before version 7.0, shutting down a heavily loaded master node in a diskless setup was more likely to result in data loss. To minimize the risk of data loss in such setups, it's advised to trigger a manual `FAILOVER` (or `CLUSTER FAILOVER`) to demote the master to a replica and promote one of the replicas to be the new master, before shutting down a master node. -@return - -@simple-string-reply: `OK` if `ABORT` was specified and shutdown was aborted. -On successful shutdown, nothing is returned since the server quits and the connection is closed. -On failure, an error is returned. - ## Behavior change history * `>= 7.0.0`: Introduced waiting for lagging replicas before exiting. \ No newline at end of file diff --git a/commands/sinter.md b/commands/sinter.md index 465b3d73f7..e0b665b8a3 100644 --- a/commands/sinter.md +++ b/commands/sinter.md @@ -14,10 +14,6 @@ Keys that do not exist are considered to be empty sets. With one of the keys being an empty set, the resulting set is also empty (since set intersection with an empty set always results in an empty set). -@return - -@array-reply: list with members of the resulting set. - @examples ```cli diff --git a/commands/sintercard.md b/commands/sintercard.md index 24473e50b5..464e7bded5 100644 --- a/commands/sintercard.md +++ b/commands/sintercard.md @@ -8,10 +8,6 @@ By default, the command calculates the cardinality of the intersection of all gi When provided with the optional `LIMIT` argument (which defaults to 0 and means unlimited), if the intersection cardinality reaches limit partway through the computation, the algorithm will exit and yield limit as the cardinality. Such implementation ensures a significant speedup for queries where the limit is lower than the actual intersection cardinality. -@return - -@integer-reply: the number of elements in the resulting intersection. - @examples ```cli diff --git a/commands/sinterstore.md b/commands/sinterstore.md index 17dd0bf0b4..e3e712f036 100644 --- a/commands/sinterstore.md +++ b/commands/sinterstore.md @@ -3,10 +3,6 @@ it is stored in `destination`. If `destination` already exists, it is overwritten. -@return - -@integer-reply: the number of elements in the resulting set. - @examples ```cli diff --git a/commands/sismember.md b/commands/sismember.md index 219cd6e3e0..08b1c6c51f 100644 --- a/commands/sismember.md +++ b/commands/sismember.md @@ -1,12 +1,5 @@ Returns if `member` is a member of the set stored at `key`. -@return - -@integer-reply, specifically: - -* `1` if the element is a member of the set. -* `0` if the element is not a member of the set, or if `key` does not exist. - @examples ```cli diff --git a/commands/slaveof.md b/commands/slaveof.md index 34b95741ed..b42dc06160 100644 --- a/commands/slaveof.md +++ b/commands/slaveof.md @@ -16,7 +16,3 @@ So, if the old master stops working, it is possible to turn the replica into a master and set the application to use this new master in read/write. Later when the other Redis server is fixed, it can be reconfigured to work as a replica. - -@return - -@simple-string-reply diff --git a/commands/slowlog-get.md b/commands/slowlog-get.md index d496e39439..4b8f0c58d5 100644 --- a/commands/slowlog-get.md +++ b/commands/slowlog-get.md @@ -6,7 +6,7 @@ The execution time does not include I/O operations like talking with the client, A new entry is added to the slow log whenever a command exceeds the execution time threshold defined by the `slowlog-log-slower-than` configuration directive. The maximum number of entries in the slow log is governed by the `slowlog-max-len` configuration directive. -By default the command returns all of the entries in the log. The optional `count` argument limits the number of returned entries, so the command returns at most up to `count` entries. +By default the command returns latest ten entries in the log. The optional `count` argument limits the number of returned entries, so the command returns at most up to `count` entries, the special number -1 means return all entries. Each entry from the slow log is comprised of the following six values: @@ -20,7 +20,3 @@ Each entry from the slow log is comprised of the following six values: The entry's unique ID can be used in order to avoid processing slow log entries multiple times (for instance you may have a script sending you an email alert for every new slow log entry). The ID is never reset in the course of the Redis server execution, only a server restart will reset it. - -@reply - -@array-reply: a list of slow log entries. diff --git a/commands/slowlog-help.md b/commands/slowlog-help.md index a70f3a5d4e..86bf5b39a2 100644 --- a/commands/slowlog-help.md +++ b/commands/slowlog-help.md @@ -1,5 +1 @@ The `SLOWLOG HELP` command returns a helpful text describing the different subcommands. - -@return - -@array-reply: a list of subcommands and their descriptions diff --git a/commands/slowlog-len.md b/commands/slowlog-len.md index 6f0d97758a..9515bb9473 100644 --- a/commands/slowlog-len.md +++ b/commands/slowlog-len.md @@ -4,9 +4,3 @@ A new entry is added to the slow log whenever a command exceeds the execution ti The maximum number of entries in the slow log is governed by the `slowlog-max-len` configuration directive. Once the slog log reaches its maximal size, the oldest entry is removed whenever a new entry is created. The slow log can be cleared with the `SLOWLOG RESET` command. - -@reply - -@integer-reply - -The number of entries in the slow log. diff --git a/commands/slowlog-reset.md b/commands/slowlog-reset.md index b522c26a5c..a860f4433f 100644 --- a/commands/slowlog-reset.md +++ b/commands/slowlog-reset.md @@ -1,7 +1,3 @@ This command resets the slow log, clearing all entries in it. Once deleted the information is lost forever. - -@reply - -@simple-string-reply: `OK` diff --git a/commands/smembers.md b/commands/smembers.md index 2272859352..20a8e23872 100644 --- a/commands/smembers.md +++ b/commands/smembers.md @@ -2,10 +2,6 @@ Returns all the members of the set value stored at `key`. This has the same effect as running `SINTER` with one argument `key`. -@return - -@array-reply: all elements of the set. - @examples ```cli diff --git a/commands/smismember.md b/commands/smismember.md index c4cec64a6b..4b8885996c 100644 --- a/commands/smismember.md +++ b/commands/smismember.md @@ -2,11 +2,6 @@ Returns whether each `member` is a member of the set stored at `key`. For every `member`, `1` is returned if the value is a member of the set, or `0` if the element is not a member of the set or if `key` does not exist. -@return - -@array-reply: list representing the membership of the given elements, in the same -order as they are requested. - @examples ```cli diff --git a/commands/smove.md b/commands/smove.md index 6b2400b6b0..241125cd2b 100644 --- a/commands/smove.md +++ b/commands/smove.md @@ -12,13 +12,6 @@ removed from the source set. An error is returned if `source` or `destination` does not hold a set value. -@return - -@integer-reply, specifically: - -* `1` if the element is moved. -* `0` if the element is not a member of `source` and no operation was performed. - @examples ```cli diff --git a/commands/sort.md b/commands/sort.md index 2a091db48c..a5843d86e7 100644 --- a/commands/sort.md +++ b/commands/sort.md @@ -105,8 +105,11 @@ SORT mylist BY weight_* GET object_* GET # ## Restrictions for using external keys -When enabling `Redis cluster-mode` there is no way to guarantee the existence of the external keys on the node which the command is processed on. -In this case, any use of `GET` or `BY` which reference external key pattern will cause the command to fail with an error. +Before 8.0, when enabling `Redis cluster-mode` there is no way to guarantee the existence of the external keys on the node which the command is processed on. In this case, any use of `GET` or `BY` which reference external key pattern will cause the command to fail with an error. + +Starting from 8.0, pattern with hash tag can be mapped to a slot, and so in `Redis cluster-mode`, the use of `BY` or `GET` is allowed when pattern contains hash tag and implies a specific slot which the key is also in, which means any key matching this pattern must be in the same slot as the key, and therefore in the same node. For example, in cluster mode, `{mylist}weight_*` is acceptable as a pattern when sorting `mylist`, while pattern `{abc}weight_*` will be denied, causing the command to fail with an error. + +To use pattern with hash tag, see [Hash tags](/docs/reference/cluster-spec/#hash-tags) for more information. Starting from Redis 7.0, any use of `GET` or `BY` which reference external key pattern will only be allowed in case the current user running the command has full key read permissions. Full key read permissions can be set for the user by, for example, specifying `'%R~*'` or `'~*` with the relevant command access rules. @@ -147,8 +150,3 @@ SORT mylist BY weight_*->fieldname GET object_*->fieldname The string `->` is used to separate the key name from the hash field name. The key is substituted as documented above, and the hash stored at the resulting key is accessed to retrieve the specified hash field. - -@return - -@array-reply: without passing the `store` option the command returns a list of sorted elements. -@integer-reply: when the `store` option is specified the command returns the number of sorted elements in the destination list. diff --git a/commands/sort_ro.md b/commands/sort_ro.md index 66223a32e5..82dc85ee8d 100644 --- a/commands/sort_ro.md +++ b/commands/sort_ro.md @@ -11,7 +11,3 @@ See original `SORT` for more details. ``` SORT_RO mylist BY weight_*->fieldname GET object_*->fieldname ``` - -@return - -@array-reply: a list of sorted elements. diff --git a/commands/spop.md b/commands/spop.md index 8c86a9ab36..5ebfaed881 100644 --- a/commands/spop.md +++ b/commands/spop.md @@ -6,16 +6,6 @@ By default, the command pops a single member from the set. When provided with the optional `count` argument, the reply will consist of up to `count` members, depending on the set's cardinality. -@return - -When called without the `count` argument: - -@bulk-string-reply: the removed member, or `nil` when `key` does not exist. - -When called with the `count` argument: - -@array-reply: the removed members, or an empty array when `key` does not exist. - @examples ```cli diff --git a/commands/spublish.md b/commands/spublish.md index e8b6925c1c..0b8e65d09a 100644 --- a/commands/spublish.md +++ b/commands/spublish.md @@ -6,10 +6,6 @@ The cluster makes sure that published shard messages are forwarded to all the no For more information about sharded pubsub, see [Sharded Pubsub](/topics/pubsub#sharded-pubsub). -@return - -@integer-reply: the number of clients that received the message. - @examples For example the following command publish to channel `orders` with a subscriber already waiting for message(s). diff --git a/commands/srandmember.md b/commands/srandmember.md index dd2d4a837d..b90dd23b7b 100644 --- a/commands/srandmember.md +++ b/commands/srandmember.md @@ -6,12 +6,6 @@ The array's length is either `count` or the set's cardinality (`SCARD`), whichev If called with a negative `count`, the behavior changes and the command is allowed to return the **same element multiple times**. In this case, the number of returned elements is the absolute value of the specified `count`. -@return - -@bulk-string-reply: without the additional `count` argument, the command returns a Bulk Reply with the randomly selected element, or `nil` when `key` does not exist. - -@array-reply: when the additional `count` argument is passed, the command returns an array of elements, or an empty array when `key` does not exist. - @examples ```cli diff --git a/commands/srem.md b/commands/srem.md index fca5b75d3c..2a85a99e6b 100644 --- a/commands/srem.md +++ b/commands/srem.md @@ -5,11 +5,6 @@ If `key` does not exist, it is treated as an empty set and this command returns An error is returned when the value stored at `key` is not a set. -@return - -@integer-reply: the number of members that were removed from the set, not -including non existing members. - @examples ```cli diff --git a/commands/ssubscribe.md b/commands/ssubscribe.md index c628b8e2d4..bf7d30e859 100644 --- a/commands/ssubscribe.md +++ b/commands/ssubscribe.md @@ -15,7 +15,7 @@ Reading messages... (press Ctrl-C to quit) 1) "ssubscribe" 2) "orders" 3) (integer) 1 -1) "message" +1) "smessage" 2) "orders" 3) "hello" ``` diff --git a/commands/strlen.md b/commands/strlen.md index e504180f01..f4b1fee52d 100644 --- a/commands/strlen.md +++ b/commands/strlen.md @@ -1,11 +1,6 @@ Returns the length of the string value stored at `key`. An error is returned when `key` holds a non-string value. -@return - -@integer-reply: the length of the string at `key`, or `0` when `key` does not -exist. - @examples ```cli diff --git a/commands/subscribe.md b/commands/subscribe.md index bbc71272f9..1bde8ebe9c 100644 --- a/commands/subscribe.md +++ b/commands/subscribe.md @@ -3,7 +3,10 @@ Subscribes the client to the specified channels. Once the client enters the subscribed state it is not supposed to issue any other commands, except for additional `SUBSCRIBE`, `SSUBSCRIBE`, `PSUBSCRIBE`, `UNSUBSCRIBE`, `SUNSUBSCRIBE`, `PUNSUBSCRIBE`, `PING`, `RESET` and `QUIT` commands. +However, if RESP3 is used (see `HELLO`) it is possible for a client to issue any commands while in subscribed state. + +For more information, see [Pub/sub](/docs/interact/pubsub/). ## Behavior change history -* `>= 6.2.0`: `RESET` can be called to exit subscribed state. \ No newline at end of file +* `>= 6.2.0`: `RESET` can be called to exit subscribed state. diff --git a/commands/substr.md b/commands/substr.md index 7283defc49..c188f95494 100644 --- a/commands/substr.md +++ b/commands/substr.md @@ -7,10 +7,6 @@ So -1 means the last character, -2 the penultimate and so forth. The function handles out of range requests by limiting the resulting range to the actual length of the string. -@return - -@bulk-string-reply - @examples ```cli diff --git a/commands/sunion.md b/commands/sunion.md index 205646893a..31315484b6 100644 --- a/commands/sunion.md +++ b/commands/sunion.md @@ -11,10 +11,6 @@ SUNION key1 key2 key3 = {a,b,c,d,e} Keys that do not exist are considered to be empty sets. -@return - -@array-reply: list with members of the resulting set. - @examples ```cli diff --git a/commands/sunionstore.md b/commands/sunionstore.md index 716caf13f0..28b752640a 100644 --- a/commands/sunionstore.md +++ b/commands/sunionstore.md @@ -3,10 +3,6 @@ it is stored in `destination`. If `destination` already exists, it is overwritten. -@return - -@integer-reply: the number of elements in the resulting set. - @examples ```cli diff --git a/commands/swapdb.md b/commands/swapdb.md index ead2db07c0..7037f477f0 100644 --- a/commands/swapdb.md +++ b/commands/swapdb.md @@ -6,10 +6,6 @@ the other way around. Example: This will swap database 0 with database 1. All the clients connected with database 0 will immediately see the new data, exactly like all the clients connected with database 1 will see the data that was formerly of database 0. -@return - -@simple-string-reply: `OK` if `SWAPDB` was executed correctly. - @examples ``` diff --git a/commands/sync.md b/commands/sync.md index cb958479ca..5d9ad047f9 100644 --- a/commands/sync.md +++ b/commands/sync.md @@ -8,7 +8,3 @@ For more information about replication in Redis please check the [replication page][tr]. [tr]: /topics/replication - -@return - -**Non standard return value**, a bulk transfer of the data followed by `PING` and write requests from the master. diff --git a/commands/time.md b/commands/time.md index 2cf1af6828..b7adb8f164 100644 --- a/commands/time.md +++ b/commands/time.md @@ -3,15 +3,6 @@ timestamp and the amount of microseconds already elapsed in the current second. Basically the interface is very similar to the one of the `gettimeofday` system call. -@return - -@array-reply, specifically: - -A multi bulk reply containing two elements: - -* unix time in seconds. -* microseconds. - @examples ```cli diff --git a/commands/touch.md b/commands/touch.md index a369354503..ba5bc9b0db 100644 --- a/commands/touch.md +++ b/commands/touch.md @@ -1,10 +1,6 @@ Alters the last access time of a key(s). A key is ignored if it does not exist. -@return - -@integer-reply: The number of keys that were touched. - @examples ```cli diff --git a/commands/ttl.md b/commands/ttl.md index 15821e1140..1cc0a86f90 100644 --- a/commands/ttl.md +++ b/commands/ttl.md @@ -11,10 +11,6 @@ Starting with Redis 2.8 the return value in case of error changed: See also the `PTTL` command that returns the same information with milliseconds resolution (Only available in Redis 2.6 or greater). -@return - -@integer-reply: TTL in seconds, or a negative value in order to signal an error (see the description above). - @examples ```cli diff --git a/commands/type.md b/commands/type.md index 8a818e0544..7bdd48407d 100644 --- a/commands/type.md +++ b/commands/type.md @@ -2,10 +2,6 @@ Returns the string representation of the type of the value stored at `key`. The different types that can be returned are: `string`, `list`, `set`, `zset`, `hash` and `stream`. -@return - -@simple-string-reply: type of `key`, or `none` when `key` does not exist. - @examples ```cli diff --git a/commands/unlink.md b/commands/unlink.md index c91dd664de..0d5ade0217 100644 --- a/commands/unlink.md +++ b/commands/unlink.md @@ -5,10 +5,6 @@ blocking, while `DEL` is. This is where the command name comes from: the command just **unlinks** the keys from the keyspace. The actual removal will happen later asynchronously. -@return - -@integer-reply: The number of keys that were unlinked. - @examples ```cli diff --git a/commands/unwatch.md b/commands/unwatch.md index b60bcb8040..dcdda08dab 100644 --- a/commands/unwatch.md +++ b/commands/unwatch.md @@ -3,7 +3,3 @@ Flushes all the previously watched keys for a [transaction][tt]. [tt]: /topics/transactions If you call `EXEC` or `DISCARD`, there's no need to manually call `UNWATCH`. - -@return - -@simple-string-reply: always `OK`. diff --git a/commands/wait.md b/commands/wait.md index d3636ae0b4..b671faff4e 100644 --- a/commands/wait.md +++ b/commands/wait.md @@ -4,13 +4,13 @@ of replicas. If the timeout, specified in milliseconds, is reached, the command returns even if the specified number of replicas were not yet reached. The command **will always return** the number of replicas that acknowledged -the write commands sent before the `WAIT` command, both in the case where +the write commands sent by the current client before the `WAIT` command, both in the case where the specified number of replicas are reached, or when the timeout is reached. A few remarks: 1. When `WAIT` returns, all the previous write commands sent in the context of the current connection are guaranteed to be received by the number of replicas returned by `WAIT`. -2. If the command is sent as part of a `MULTI` transaction, the command does not block but instead just return ASAP the number of replicas that acknowledged the previous write commands. +2. If the command is sent as part of a `MULTI` transaction (since Redis 7.0, any context that does not allow blocking, such as inside scripts), the command does not block but instead just return ASAP the number of replicas that acknowledged the previous write commands. 3. A timeout of 0 means to block forever. 4. Since `WAIT` returns the number of replicas reached both in case of failure and success, the client should check that the returned value is equal or greater to the replication level it demanded. @@ -37,10 +37,6 @@ write command was executed in the context of a given client. When `WAIT` is called Redis checks if the specified number of replicas already acknowledged this offset or a greater one. -@return - -@integer-reply: The command returns the number of replicas reached by all the writes performed in the context of the current connection. - @examples ``` diff --git a/commands/waitaof.md b/commands/waitaof.md new file mode 100644 index 0000000000..a6269e2375 --- /dev/null +++ b/commands/waitaof.md @@ -0,0 +1,59 @@ +This command blocks the current client until all previous write commands by that client are acknowledged as having been fsynced to the AOF of the local Redis and/or at least the specified number of replicas. + +`numlocal` represents the number of local fsyncs required to be confirmed before proceeding. +When `numlocal` is set to 1, the command blocks until the data written to the Redis instance is confirmed to be persisted to the local AOF file. +The value 0 disables this check. + +If the timeout, specified in milliseconds, is reached, the command returns even if the specified number of acknowledgments has not been met. + +The command **will always return** the number of masters and replicas that have fsynced all write commands sent by the current client before the `WAITAOF` command, both in the case where the specified thresholds were met, and when the timeout is reached. + +A few remarks: + +1. When `WAITAOF` returns, all the previous write commands sent in the context of the current connection are guaranteed to be fsynced to the AOF of at least the number of masters and replicas returned by `WAITAOF`. +2. If the command is sent as part of a `MULTI` transaction (or any other context that does not allow blocking, such as inside scripts), the command does not block but instead returns immediately the number of masters and replicas that fsynced all previous write commands. +3. A timeout of 0 means to block forever. +4. Since `WAITAOF` returns the number of fsyncs completed both in case of success and timeout, the client should check that the returned values are equal or greater than the persistence level required. +5. `WAITAOF` cannot be used on replica instances, and the `numlocal` argument cannot be non-zero if the local Redis does not have AOF enabled. + +Limitations +--- +It is possible to write a module or Lua script that propagate writes to the AOF but not the replication stream. +(For modules, this is done using the `fmt` argument to `RedisModule_Call` or `RedisModule_Replicate`; For Lua scripts, this is achieved using `redis.set_repl`.) + +These features are incompatible with the `WAITAOF` command as it is currently implemented, and using them in combination may result in incorrect behavior. + +Consistency and WAITAOF +--- + +Note that, similarly to `WAIT`, `WAITAOF` does not make Redis a strongly-consistent store. +Unless waiting for all members of a cluster to fsync writes to disk, data can still be lost during a failover or a Redis restart. +However, `WAITAOF` does improve real-world data safety. + +Implementation details +--- + +Since Redis 7.2, Redis tracks and increments the replication offset even when no replicas are configured (as long as AOF exists). + +In addition, Redis replicas asynchronously ping their master with two replication offsets: the offset they have processed in the replication stream, and the offset they have fsynced to their AOF. + +Redis remembers, for each client, the replication offset of the produced replication stream when the last write command was executed in the context of that client. +When `WAITAOF` is called, Redis checks if the local Redis and/or the specified number of replicas have confirmed fsyncing this offset or a greater one to their AOF. + +@examples + +``` +> SET foo bar +OK +> WAITAOF 1 0 0 +1) (integer) 1 +2) (integer) 0 +> WAITAOF 0 1 1000 +1) (integer) 1 +2) (integer) 0 +``` + +In the above example, the first call to `WAITAOF` does not use a timeout and asks for the write to be fsynced to the local Redis only; it returns with [1, 0] when this is completed. + +In the second attempt we instead specify a timeout, and ask for the write to be confirmed as fsynced by a single replica. +Since there are no connected replicas, the `WAITAOF` command unblocks after one second and again returns [1, 0], indicating the write has been fsynced on the local Redis but no replicas. diff --git a/commands/watch.md b/commands/watch.md index 08f823ff7f..2121bd5cb5 100644 --- a/commands/watch.md +++ b/commands/watch.md @@ -2,7 +2,3 @@ Marks the given keys to be watched for conditional execution of a [transaction][tt]. [tt]: /topics/transactions - -@return - -@simple-string-reply: always `OK`. diff --git a/commands/xack.md b/commands/xack.md index aae2db5586..daeb7c21c8 100644 --- a/commands/xack.md +++ b/commands/xack.md @@ -14,15 +14,6 @@ so that such message does not get processed again, and as a side effect, the PEL entry about this message is also purged, releasing memory from the Redis server. -@return - -@integer-reply, specifically: - -The command returns the number of messages successfully acknowledged. -Certain message IDs may no longer be part of the PEL (for example because -they have already been acknowledged), and XACK will not count them as -successfully acknowledged. - @examples ``` diff --git a/commands/xadd.md b/commands/xadd.md index d651a6858c..3f431a133b 100644 --- a/commands/xadd.md +++ b/commands/xadd.md @@ -72,17 +72,6 @@ Will add a new entry but will also evict old entries so that the stream will con For further information about Redis streams please check our [introduction to Redis Streams document](/topics/streams-intro). -@return - -@bulk-string-reply, specifically: - -The command returns the ID of the added entry. The ID is the one auto-generated -if `*` is passed as ID argument, otherwise the command just returns the same ID -specified by the user during insertion. - -The command returns a @nil-reply when used with the `NOMKSTREAM` option and the -key doesn't exist. - @examples ```cli diff --git a/commands/xautoclaim.md b/commands/xautoclaim.md index 5ff44f2fd2..4b65eba3ae 100644 --- a/commands/xautoclaim.md +++ b/commands/xautoclaim.md @@ -25,16 +25,6 @@ These message IDs are returned to the caller as a part of `XAUTOCLAIM`s reply. Lastly, claiming a message with `XAUTOCLAIM` also increments the attempted deliveries count for that message, unless the `JUSTID` option has been specified (which only delivers the message ID, not the message itself). Messages that cannot be processed for some reason - for example, because consumers systematically crash when processing them - will exhibit high attempted delivery counts that can be detected by monitoring. -@return - -@array-reply, specifically: - -An array with three elements: - -1. A stream ID to be used as the `` argument for the next call to `XAUTOCLAIM`. -2. An array containing all the successfully claimed messages in the same format as `XRANGE`. -3. An array containing message IDs that no longer exist in the stream, and were deleted from the PEL in which they were found. - @examples ``` diff --git a/commands/xclaim.md b/commands/xclaim.md index d3a17dc32a..50fd8d23c2 100644 --- a/commands/xclaim.md +++ b/commands/xclaim.md @@ -10,7 +10,7 @@ command argument. Normally this is what happens: This dynamic is clearly explained in the [Stream intro documentation](/topics/streams-intro). -Note that the message is claimed only if its idle time is greater the minimum idle time we specify when calling `XCLAIM`. Because as a side effect `XCLAIM` will also reset the idle time (since this is a new attempt at processing the message), two consumers trying to claim a message at the same time will never both succeed: only one will successfully claim the message. This avoids that we process a given message multiple times in a trivial way (yet multiple processing is possible and unavoidable in the general case). +Note that the message is claimed only if its idle time is greater than the minimum idle time we specify when calling `XCLAIM`. Because as a side effect `XCLAIM` will also reset the idle time (since this is a new attempt at processing the message), two consumers trying to claim a message at the same time will never both succeed: only one will successfully claim the message. This avoids that we process a given message multiple times in a trivial way (yet multiple processing is possible and unavoidable in the general case). Moreover, as a side effect, `XCLAIM` will increment the count of attempted deliveries of the message unless the `JUSTID` option has been specified (which only delivers the message ID, not the message itself). In this way messages that cannot be processed for some reason, for instance because the consumers crash attempting to process them, will start to have a larger counter and can be detected inside the system. @@ -35,14 +35,6 @@ useful to normal users: 4. `FORCE`: Creates the pending message entry in the PEL even if certain specified IDs are not already in the PEL assigned to a different client. However the message must be exist in the stream, otherwise the IDs of non existing messages are ignored. 5. `JUSTID`: Return just an array of IDs of messages successfully claimed, without returning the actual message. Using this option means the retry counter is not incremented. -@return - -@array-reply, specifically: - -The command returns all the messages successfully claimed, in the same format -as `XRANGE`. However if the `JUSTID` option was specified, only the message -IDs are reported, without including the actual message. - @examples ``` diff --git a/commands/xdel.md b/commands/xdel.md index 3ee4a3d067..57b9a8ba47 100644 --- a/commands/xdel.md +++ b/commands/xdel.md @@ -26,10 +26,6 @@ collection in case a given macro-node reaches a given amount of deleted entries. Currently with the usage we anticipate for this data structure, it is not a good idea to add such complexity. -@return - -@integer-reply: the number of entries actually deleted. - @examples ``` diff --git a/commands/xgroup-create.md b/commands/xgroup-create.md index f0f1606bf0..124a8a14f4 100644 --- a/commands/xgroup-create.md +++ b/commands/xgroup-create.md @@ -1,23 +1,21 @@ -This command creates a new consumer group uniquely identified by `` for the stream stored at ``. +Create a new consumer group uniquely identified by `` for the stream stored at `` -Every group has a unique name in a given stream. When a consumer group with the same name already exists, the command returns a `-BUSYGROUP` error. +Every group has a unique name in a given stream. +When a consumer group with the same name already exists, the command returns a `-BUSYGROUP` error. The command's `` argument specifies the last delivered entry in the stream from the new group's perspective. -The special ID `$` means the ID of the last entry in the stream, but you can provide any valid ID instead. +The special ID `$` is the ID of the last entry in the stream, but you can substitute it with any valid ID. + For example, if you want the group's consumers to fetch the entire stream from the beginning, use zero as the starting ID for the consumer group: XGROUP CREATE mystream mygroup 0 -By default, the `XGROUP CREATE` command insists that the target stream exists and returns an error when it doesn't. -However, you can use the optional `MKSTREAM` subcommand as the last argument after the `` to automatically create the stream (with length of 0) if it doesn't exist: +By default, the `XGROUP CREATE` command expects that the target stream exists, and returns an error when it doesn't. +If a stream does not exist, you can create it automatically with length of 0 by using the optional `MKSTREAM` subcommand as the last argument after the ``: XGROUP CREATE mystream mygroup $ MKSTREAM -The optional `entries_read` named argument can be specified to enable consumer group lag tracking for an arbitrary ID. -An arbitrary ID is any ID that isn't the ID of the stream's first entry, its last entry or the zero ("0-0") ID. -This can be useful you know exactly how many entries are between the arbitrary ID (excluding it) and the stream's last entry. -In such cases, the `entries_read` can be set to the stream's `entries_added` subtracted with the number of entries. - -@return - -@simple-string-reply: `OK` on success. +To enable consumer group lag tracking, specify the optional `entries_read` named argument with an arbitrary ID. +An arbitrary ID is any ID that isn't the ID of the stream's first entry, last entry, or zero ("0-0") ID. +Use it to find out how many entries are between the arbitrary ID (excluding it) and the stream's last entry. +Set the `entries_read` the stream's `entries_added` subtracted by the number of entries. diff --git a/commands/xgroup-createconsumer.md b/commands/xgroup-createconsumer.md index 17274a5eab..f81d468a02 100644 --- a/commands/xgroup-createconsumer.md +++ b/commands/xgroup-createconsumer.md @@ -1,7 +1,4 @@ Create a consumer named `` in the consumer group `` of the stream that's stored at ``. Consumers are also created automatically whenever an operation, such as `XREADGROUP`, references a consumer that doesn't exist. - -@return - -@integer-reply: the number of created consumers (0 or 1) \ No newline at end of file +This is valid for `XREADGROUP` only when there is data in the stream. diff --git a/commands/xgroup-delconsumer.md b/commands/xgroup-delconsumer.md index 9e73da8922..57c71adb12 100644 --- a/commands/xgroup-delconsumer.md +++ b/commands/xgroup-delconsumer.md @@ -4,7 +4,3 @@ Sometimes it may be useful to remove old consumers since they are no longer used Note, however, that any pending messages that the consumer had will become unclaimable after it was deleted. It is strongly recommended, therefore, that any pending messages are claimed or acknowledged prior to deleting the consumer from the group. - -@return - -@integer-reply: the number of pending messages that the consumer had before it was deleted diff --git a/commands/xgroup-destroy.md b/commands/xgroup-destroy.md index 448468ba14..29af5f73cb 100644 --- a/commands/xgroup-destroy.md +++ b/commands/xgroup-destroy.md @@ -1,7 +1,3 @@ The `XGROUP DESTROY` command completely destroys a consumer group. The consumer group will be destroyed even if there are active consumers, and pending messages, so make sure to call this command only when really needed. - -@return - -@integer-reply: the number of destroyed consumer groups (0 or 1) \ No newline at end of file diff --git a/commands/xgroup-help.md b/commands/xgroup-help.md index 1eb1a7bb34..405008ac80 100644 --- a/commands/xgroup-help.md +++ b/commands/xgroup-help.md @@ -1,5 +1 @@ The `XGROUP HELP` command returns a helpful text describing the different subcommands. - -@return - -@array-reply: a list of subcommands and their descriptions diff --git a/commands/xgroup-setid.md b/commands/xgroup-setid.md index 0808404f25..5ea91accdc 100644 --- a/commands/xgroup-setid.md +++ b/commands/xgroup-setid.md @@ -10,7 +10,3 @@ The optional `entries_read` argument can be specified to enable consumer group l An arbitrary ID is any ID that isn't the ID of the stream's first entry, its last entry or the zero ("0-0") ID. This can be useful you know exactly how many entries are between the arbitrary ID (excluding it) and the stream's last entry. In such cases, the `entries_read` can be set to the stream's `entries_added` subtracted with the number of entries. - -@return - -@simple-string-reply: `OK` on success. diff --git a/commands/xinfo-consumers.md b/commands/xinfo-consumers.md index f65366d3c9..855a308ced 100644 --- a/commands/xinfo-consumers.md +++ b/commands/xinfo-consumers.md @@ -3,12 +3,12 @@ This command returns the list of consumers that belong to the `` cons The following information is provided for each consumer in the group: * **name**: the consumer's name -* **pending**: the number of pending messages for the client, which are messages that were delivered but are yet to be acknowledged -* **idle**: the number of milliseconds that have passed since the consumer last interacted with the server +* **pending**: the number of entries in the PEL: pending messages for the consumer, which are messages that were delivered but are yet to be acknowledged +* **idle**: the number of milliseconds that have passed since the consumer's last attempted interaction (Examples: `XREADGROUP`, `XCLAIM`, `XAUTOCLAIM`) +* **inactive**: the number of milliseconds that have passed since the consumer's last successful interaction (Examples: `XREADGROUP` that actually read some entries into the PEL, `XCLAIM`/`XAUTOCLAIM` that actually claimed some entries) -@reply - -@array-reply: a list of consumers. +Note that before Redis 7.2.0, **idle** used to denote the time passed since last successful interaction. +In 7.2.0, **inactive** was added and **idle** was changed to denote the time passed since last attempted interaction. @examples @@ -20,10 +20,14 @@ The following information is provided for each consumer in the group: 4) (integer) 1 5) idle 6) (integer) 9104628 + 7) inactive + 8) (integer) 18104698 2) 1) name 2) "Bob" 3) pending 4) (integer) 1 5) idle 6) (integer) 83841983 + 7) inactive + 8) (integer) 993841998 ``` diff --git a/commands/xinfo-groups.md b/commands/xinfo-groups.md index 03eafecfa2..3074725158 100644 --- a/commands/xinfo-groups.md +++ b/commands/xinfo-groups.md @@ -1,12 +1,12 @@ -This command returns the list of all consumers groups of the stream stored at ``. +This command returns the list of all consumer groups of the stream stored at ``. By default, only the following information is provided for each of the groups: * **name**: the consumer group's name * **consumers**: the number of consumers in the group * **pending**: the length of the group's pending entries list (PEL), which are messages that were delivered but are yet to be acknowledged -* **last-delivered-id**: the ID of the last entry delivered the group's consumers -* **entries-read**: the logical "read counter" of the last entry delivered to group's consumers +* **last-delivered-id**: the ID of the last entry delivered to the group's consumers +* **entries-read**: the logical "read counter" of the last entry delivered to the group's consumers * **lag**: the number of entries in the stream that are still waiting to be delivered to the group's consumers, or a NULL when that number can't be determined. ### Consumer group lag @@ -22,7 +22,7 @@ The lag is the difference between these two. The stream's counter (the `entries_added` field of the `XINFO STREAM` command) is incremented by one with every `XADD` and counts all of the entries added to the stream during its lifetime. -The consumer group's counter, `entries_read`, is the logical counter of entries that the group had read. +The consumer group's counter, `entries_read`, is the logical counter of entries the group had read. It is important to note that this counter is only a heuristic rather than an accurate counter, and therefore the use of the term "logical". The counter attempts to reflect the number of entries that the group **should have read** to get to its current `last-delivered-id`. The `entries_read` counter is accurate only in a perfect world, where a consumer group starts at the stream's first entry and processes all of its entries (i.e., no entries deleted before processing). @@ -39,10 +39,6 @@ However, the lag is only temporarily unavailable. It is restored automatically during regular operation as consumers keep processing messages. Once the consumer group delivers the last message in the stream to its members, it will be set with the correct logical read counter, and tracking its lag can be resumed. -@reply - -@array-reply: a list of consumer groups. - @examples ``` diff --git a/commands/xinfo-help.md b/commands/xinfo-help.md index 293892fd8f..34ad659b9c 100644 --- a/commands/xinfo-help.md +++ b/commands/xinfo-help.md @@ -1,5 +1 @@ The `XINFO HELP` command returns a helpful text describing the different subcommands. - -@return - -@array-reply: a list of subcommands and their descriptions diff --git a/commands/xinfo-stream.md b/commands/xinfo-stream.md index f69760840b..4fcbe1e2ee 100644 --- a/commands/xinfo-stream.md +++ b/commands/xinfo-stream.md @@ -12,16 +12,42 @@ The informative details provided by this command are: * **first-entry**: the ID and field-value tuples of the first entry in the stream * **last-entry**: the ID and field-value tuples of the last entry in the stream +### The `FULL` modifier + The optional `FULL` modifier provides a more verbose reply. When provided, the `FULL` reply includes an **entries** array that consists of the stream entries (ID and field-value tuples) in ascending order. Furthermore, **groups** is also an array, and for each of the consumer groups it consists of the information reported by `XINFO GROUPS` and `XINFO CONSUMERS`. -The `COUNT` option can be used to limit the number of stream and PEL entries that are returned (The first `` entries are returned). -The default `COUNT` is 10 and a `COUNT` of 0 means that all entries will be returned (execution time may be long if the stream has a lot of entries). +The following information is provided for each of the groups: + +* **name**: the consumer group's name +* **last-delivered-id**: the ID of the last entry delivered to the group's consumers +* **entries-read**: the logical "read counter" of the last entry delivered to the group's consumers +* **lag**: the number of entries in the stream that are still waiting to be delivered to the group's consumers, or a NULL when that number can't be determined. +* **pel-count**: the length of the group's pending entries list (PEL), which are messages that were delivered but are yet to be acknowledged +* **pending**: an array with pending entries information (see below) +* **consumers**: an array with consumers information (see below) + +The following information is provided for each pending entry: -@return +1. The ID of the message. +2. The name of the consumer that fetched the message and has still to acknowledge it. We call it the current *owner* of the message. +3. The UNIX timestamp of when the message was delivered to this consumer. +4. The number of times this message was delivered. -@array-reply: a list of informational bits +The following information is provided for each consumer: + +* **name**: the consumer's name +* **seen-time**: the UNIX timestamp of the last attempted interaction (Examples: `XREADGROUP`, `XCLAIM`, `XAUTOCLAIM`) +* **active-time**: the UNIX timestamp of the last successful interaction (Examples: `XREADGROUP` that actually read some entries into the PEL, `XCLAIM`/`XAUTOCLAIM` that actually claimed some entries) +* **pel-count**: the number of entries in the PEL: pending messages for the consumer, which are messages that were delivered but are yet to be acknowledged +* **pending**: an array with pending entries information, has the same structure as described above, except the consumer name is omitted (redundant, since anyway we are in a specific consumer context) + +Note that before Redis 7.2.0, **seen-time** used to denote the last successful interaction. +In 7.2.0, **active-time** was added and **seen-time** was changed to denote the last attempted interaction. + +The `COUNT` option can be used to limit the number of stream and PEL entries that are returned (The first `` entries are returned). +The default `COUNT` is 10 and a `COUNT` of 0 means that all entries will be returned (execution time may be long if the stream has a lot of entries). @examples @@ -107,12 +133,13 @@ OK 14) 1) 1) "name" 2) "Alice" 3) "seen-time" - 4) (integer) 1638125153423 - 5) "pel-count" - 6) (integer) 1 - 7) "pending" - 8) 1) 1) "1638125133432-0" - 2) (integer) 1638125153423 - 3) (integer) 1 -> + 4) (integer) 1638125133422 + 5) "active-time" + 6) (integer) 1638125133432 + 7) "pel-count" + 8) (integer) 1 + 9) "pending" + 10) 1) 1) "1638125133432-0" + 2) (integer) 1638125133432 + 3) (integer) 1 ``` diff --git a/commands/xlen.md b/commands/xlen.md index 41c2010e7f..e996f7ee18 100644 --- a/commands/xlen.md +++ b/commands/xlen.md @@ -8,10 +8,6 @@ Streams are not auto-deleted once they have no entries inside (for instance after an `XDEL` call), because the stream may have consumer groups associated with it. -@return - -@integer-reply: the number of entries of the stream at `key`. - @examples ```cli diff --git a/commands/xpending.md b/commands/xpending.md index 48840aaa38..c22ae8e38d 100644 --- a/commands/xpending.md +++ b/commands/xpending.md @@ -127,11 +127,3 @@ The `XPENDING` command allows iterating over the pending entries just like prefixing the ID of the last-read pending entry with the `(` character that denotes an open (exclusive) range, and proving it to the subsequent call to the command. - -@return - -@array-reply, specifically: - -The command returns data in different format depending on the way it is -called, as previously explained in this page. However the reply is always -an array of items. diff --git a/commands/xrange.md b/commands/xrange.md index fc6d11dd3c..a8d4e89b67 100644 --- a/commands/xrange.md +++ b/commands/xrange.md @@ -41,9 +41,8 @@ will just return every entry in the stream: ... other entries here ... ``` -The `-` ID is effectively just exactly as specifying `0-0`, while -`+` is equivalent to `18446744073709551615-18446744073709551615`, however -they are nicer to type. +The `-` and `+` special IDs mean, respectively, the minimal and maximal range IDs, +however they are nicer to type. ## Incomplete IDs @@ -201,15 +200,6 @@ of XRANGE: For further information about Redis streams please check our [introduction to Redis Streams document](/topics/streams-intro). -@return - -@array-reply, specifically: - -The command returns the entries with IDs matching the specified range. -The returned entries are complete, that means that the ID and all the fields -they are composed are returned. Moreover, the entries are returned with -their fields and values in the exact same order as `XADD` added them. - @examples ```cli diff --git a/commands/xread.md b/commands/xread.md index ea0f311ecc..0a6741519f 100644 --- a/commands/xread.md +++ b/commands/xread.md @@ -197,19 +197,6 @@ are not removed from the stream when clients are served, so every client waiting will be served as soon as an `XADD` command provides data to the stream. -@return - -@array-reply, specifically: - -The command returns an array of results: each element of the returned -array is an array composed of a two element containing the key name and -the entries reported for that key. The entries reported are full stream -entries, having IDs and the list of all the fields and values. Field and -values are guaranteed to be reported in the same order they were added -by `XADD`. - -When **BLOCK** is used, on timeout a null reply is returned. - Reading the [Redis Streams introduction](/topics/streams-intro) is highly suggested in order to understand more about the streams overall behavior -and semantics. +and semantics. \ No newline at end of file diff --git a/commands/xreadgroup.md b/commands/xreadgroup.md index c597b68bb5..e94d2bb619 100644 --- a/commands/xreadgroup.md +++ b/commands/xreadgroup.md @@ -102,18 +102,32 @@ consumers that are processing new things. To see how the command actually replies, please check the `XREAD` command page. -@return +## What happens when a pending message is deleted? -@array-reply, specifically: +Entries may be deleted from the stream due to trimming or explicit calls to `XDEL` at any time. +By design, Redis doesn't prevent the deletion of entries that are present in the stream's PELs. +When this happens, the PELs retain the deleted entries' IDs, but the actual entry payload is no longer available. +Therefore, when reading such PEL entries, Redis will return a null value in place of their respective data. -The command returns an array of results: each element of the returned -array is an array composed of a two element containing the key name and -the entries reported for that key. The entries reported are full stream -entries, having IDs and the list of all the fields and values. Field and -values are guaranteed to be reported in the same order they were added -by `XADD`. +Example: -When **BLOCK** is used, on timeout a null reply is returned. +``` +> XADD mystream 1 myfield mydata +"1-0" +> XGROUP CREATE mystream mygroup 0 +OK +> XREADGROUP GROUP mygroup myconsumer STREAMS mystream > +1) 1) "mystream" + 2) 1) 1) "1-0" + 2) 1) "myfield" + 2) "mydata" +> XDEL mystream 1-0 +(integer) 1 +> XREADGROUP GROUP mygroup myconsumer STREAMS mystream 0 +1) 1) "mystream" + 2) 1) 1) "1-0" + 2) (nil) +``` Reading the [Redis Streams introduction](/topics/streams-intro) is highly suggested in order to understand more about the streams overall behavior diff --git a/commands/xrevrange.md b/commands/xrevrange.md index d61b3f5073..ab10209082 100644 --- a/commands/xrevrange.md +++ b/commands/xrevrange.md @@ -14,16 +14,6 @@ enough to send: XREVRANGE somestream + - COUNT 1 -@return - -@array-reply, specifically: - -The command returns the entries with IDs matching the specified range, -from the higher ID to the lower ID matching. -The returned entries are complete, that means that the ID and all the fields -they are composed are returned. Moreover the entries are returned with -their fields and values in the exact same order as `XADD` added them. - @examples ```cli diff --git a/commands/xtrim.md b/commands/xtrim.md index 08d55d5a9e..b26fec018e 100644 --- a/commands/xtrim.md +++ b/commands/xtrim.md @@ -44,10 +44,6 @@ When used, it specifies the maximal `count` of entries that will be evicted. When `LIMIT` and `count` aren't specified, the default value of 100 * the number of entries in a macro node will be implicitly used as the `count`. Specifying the value 0 as `count` disables the limiting mechanism entirely. -@return - -@integer-reply: The number of entries deleted from the stream. - @examples ```cli diff --git a/commands/zadd.md b/commands/zadd.md index eb77de6790..1f11c6ed28 100644 --- a/commands/zadd.md +++ b/commands/zadd.md @@ -58,17 +58,6 @@ The lexicographic ordering used is binary, it compares strings as array of bytes If the user inserts all the elements in a sorted set with the same score (for example 0), all the elements of the sorted set are sorted lexicographically, and range queries on elements are possible using the command `ZRANGEBYLEX` (Note: it is also possible to query sorted sets by range of scores using `ZRANGEBYSCORE`). -@return - -@integer-reply, specifically: - -* When used without optional arguments, the number of elements added to the sorted set (excluding score updates). -* If the `CH` option is specified, the number of elements that were changed (added or updated). - -If the `INCR` option is specified, the return value will be @bulk-string-reply: - -* The new score of `member` (a double precision floating point number) represented as string, or `nil` if the operation was aborted (when called with either the `XX` or the `NX` option). - @examples ```cli diff --git a/commands/zcard.md b/commands/zcard.md index 5ad504335d..bacabd2883 100644 --- a/commands/zcard.md +++ b/commands/zcard.md @@ -1,11 +1,6 @@ Returns the sorted set cardinality (number of elements) of the sorted set stored at `key`. -@return - -@integer-reply: the cardinality (number of elements) of the sorted set, or `0` -if `key` does not exist. - @examples ```cli diff --git a/commands/zcount.md b/commands/zcount.md index 82ce39b81c..a90d7c4b37 100644 --- a/commands/zcount.md +++ b/commands/zcount.md @@ -6,10 +6,6 @@ The `min` and `max` arguments have the same semantic as described for Note: the command has a complexity of just O(log(N)) because it uses elements ranks (see `ZRANK`) to get an idea of the range. Because of this there is no need to do a work proportional to the size of the range. -@return - -@integer-reply: the number of elements in the specified score range. - @examples ```cli diff --git a/commands/zdiff.md b/commands/zdiff.md index d9449b7ef5..e2df52e424 100644 --- a/commands/zdiff.md +++ b/commands/zdiff.md @@ -1,11 +1,6 @@ This command is similar to `ZDIFFSTORE`, but instead of storing the resulting sorted set, it is returned to the client. -@return - -@array-reply: the result of the difference (optionally with their scores, in case -the `WITHSCORES` option is given). - @examples ```cli diff --git a/commands/zdiffstore.md b/commands/zdiffstore.md index abe3ba7e73..d9d2e2cac3 100644 --- a/commands/zdiffstore.md +++ b/commands/zdiffstore.md @@ -6,11 +6,6 @@ Keys that do not exist are considered to be empty sets. If `destination` already exists, it is overwritten. -@return - -@integer-reply: the number of elements in the resulting sorted set at -`destination`. - @examples ```cli diff --git a/commands/zincrby.md b/commands/zincrby.md index 0b8ccf0985..bbf48716b8 100644 --- a/commands/zincrby.md +++ b/commands/zincrby.md @@ -11,11 +11,6 @@ The `score` value should be the string representation of a numeric value, and accepts double precision floating point numbers. It is possible to provide a negative value to decrement the score. -@return - -@bulk-string-reply: the new score of `member` (a double precision floating point -number), represented as string. - @examples ```cli diff --git a/commands/zinter.md b/commands/zinter.md index 5a7adccd79..b796517797 100644 --- a/commands/zinter.md +++ b/commands/zinter.md @@ -3,11 +3,6 @@ sorted set, it is returned to the client. For a description of the `WEIGHTS` and `AGGREGATE` options, see `ZUNIONSTORE`. -@return - -@array-reply: the result of intersection (optionally with their scores, in case -the `WITHSCORES` option is given). - @examples ```cli diff --git a/commands/zintercard.md b/commands/zintercard.md index 613849fc2d..7ee7d1edeb 100644 --- a/commands/zintercard.md +++ b/commands/zintercard.md @@ -7,10 +7,6 @@ By default, the command calculates the cardinality of the intersection of all gi When provided with the optional `LIMIT` argument (which defaults to 0 and means unlimited), if the intersection cardinality reaches limit partway through the computation, the algorithm will exit and yield limit as the cardinality. Such implementation ensures a significant speedup for queries where the limit is lower than the actual intersection cardinality. -@return - -@integer-reply: the number of elements in the resulting intersection. - @examples ```cli diff --git a/commands/zinterstore.md b/commands/zinterstore.md index 0ecda0ddd7..1d386aa3a4 100644 --- a/commands/zinterstore.md +++ b/commands/zinterstore.md @@ -13,11 +13,6 @@ For a description of the `WEIGHTS` and `AGGREGATE` options, see `ZUNIONSTORE`. If `destination` already exists, it is overwritten. -@return - -@integer-reply: the number of elements in the resulting sorted set at -`destination`. - @examples ```cli diff --git a/commands/zlexcount.md b/commands/zlexcount.md index 15484f7973..2eeed5c27d 100644 --- a/commands/zlexcount.md +++ b/commands/zlexcount.md @@ -5,10 +5,6 @@ The `min` and `max` arguments have the same meaning as described for Note: the command has a complexity of just O(log(N)) because it uses elements ranks (see `ZRANK`) to get an idea of the range. Because of this there is no need to do a work proportional to the size of the range. -@return - -@integer-reply: the number of elements in the specified score range. - @examples ```cli diff --git a/commands/zmpop.md b/commands/zmpop.md index 16848a0e08..e3fd6944b3 100644 --- a/commands/zmpop.md +++ b/commands/zmpop.md @@ -12,13 +12,6 @@ The optional `COUNT` can be used to specify the number of elements to pop, and i The number of popped elements is the minimum from the sorted set's cardinality and `COUNT`'s value. -@return - -@array-reply: specifically: - -* A `nil` when no element could be popped. -* A two-element array with the first element being the name of the key from which elements were popped, and the second element is an array of the popped elements. Every entry in the elements array is also an array that contains the member and its score. - @examples ```cli diff --git a/commands/zmscore.md b/commands/zmscore.md index c2317e90b8..0111a4a01b 100644 --- a/commands/zmscore.md +++ b/commands/zmscore.md @@ -2,11 +2,6 @@ Returns the scores associated with the specified `members` in the sorted set sto For every `member` that does not exist in the sorted set, a `nil` value is returned. -@return - -@array-reply: list of scores or `nil` associated with the specified `member` values (a double precision floating point number), -represented as strings. - @examples ```cli diff --git a/commands/zpopmax.md b/commands/zpopmax.md index 8f6750a75d..c0245f28b6 100644 --- a/commands/zpopmax.md +++ b/commands/zpopmax.md @@ -6,10 +6,6 @@ value that is higher than the sorted set's cardinality will not produce an error. When returning multiple elements, the one with the highest score will be the first, followed by the elements with lower scores. -@return - -@array-reply: list of popped elements and scores. - @examples ```cli diff --git a/commands/zpopmin.md b/commands/zpopmin.md index 16f7c97ac1..2214e57dd6 100644 --- a/commands/zpopmin.md +++ b/commands/zpopmin.md @@ -6,10 +6,6 @@ value that is higher than the sorted set's cardinality will not produce an error. When returning multiple elements, the one with the lowest score will be the first, followed by the elements with greater scores. -@return - -@array-reply: list of popped elements and scores. - @examples ```cli diff --git a/commands/zrandmember.md b/commands/zrandmember.md index aae0b25434..d1f6ed983e 100644 --- a/commands/zrandmember.md +++ b/commands/zrandmember.md @@ -8,13 +8,6 @@ In this case, the number of returned elements is the absolute value of the speci The optional `WITHSCORES` modifier changes the reply so it includes the respective scores of the randomly selected elements from the sorted set. -@return - -@bulk-string-reply: without the additional `count` argument, the command returns a Bulk Reply with the randomly selected element, or `nil` when `key` does not exist. - -@array-reply: when the additional `count` argument is passed, the command returns an array of elements, or an empty array when `key` does not exist. -If the `WITHSCORES` modifier is used, the reply is a list elements and their scores from the sorted set. - @examples ```cli diff --git a/commands/zrange.md b/commands/zrange.md index 1a8e4215a4..a928689d71 100644 --- a/commands/zrange.md +++ b/commands/zrange.md @@ -100,17 +100,10 @@ Because of the first *normalized* part in every element (before the colon charac The binary nature of the comparison allows to use sorted sets as a general purpose index, for example, the first part of the element can be a 64-bit big-endian number. Since big-endian numbers have the most significant bytes in the initial positions, the binary comparison will match the numerical comparison of the numbers. This can be used in order to implement range queries on 64-bit values. As in the example below, after the first 8 bytes, we can store the value of the element we are indexing. -@return - -@array-reply: list of elements in the specified range (optionally with -their scores, in case the `WITHSCORES` option is given). - @examples ```cli -ZADD myzset 1 "one" -ZADD myzset 2 "two" -ZADD myzset 3 "three" +ZADD myzset 1 "one" 2 "two" 3 "three" ZRANGE myzset 0 -1 ZRANGE myzset 2 3 ZRANGE myzset -2 -1 @@ -119,11 +112,13 @@ ZRANGE myzset -2 -1 The following example using `WITHSCORES` shows how the command returns always an array, but this time, populated with *element_1*, *score_1*, *element_2*, *score_2*, ..., *element_N*, *score_N*. ```cli +ZADD myzset 1 "one" 2 "two" 3 "three" ZRANGE myzset 0 1 WITHSCORES ``` This example shows how to query the sorted set by score, excluding the value `1` and up to infinity, returning only the second element of the result: ```cli +ZADD myzset 1 "one" 2 "two" 3 "three" ZRANGE myzset (1 +inf BYSCORE LIMIT 1 1 -``` \ No newline at end of file +``` diff --git a/commands/zrangebylex.md b/commands/zrangebylex.md index 4eefffc05a..b4663f674b 100644 --- a/commands/zrangebylex.md +++ b/commands/zrangebylex.md @@ -47,10 +47,6 @@ comparison of the numbers. This can be used in order to implement range queries on 64 bit values. As in the example below, after the first 8 bytes we can store the value of the element we are actually indexing. -@return - -@array-reply: list of elements in the specified score range. - @examples ```cli diff --git a/commands/zrangebyscore.md b/commands/zrangebyscore.md index bc81708533..5c4ea3a9fc 100644 --- a/commands/zrangebyscore.md +++ b/commands/zrangebyscore.md @@ -40,11 +40,6 @@ ZRANGEBYSCORE zset (5 (10 Will return all the elements with `5 < score < 10` (5 and 10 excluded). -@return - -@array-reply: list of elements in the specified score range (optionally -with their scores). - @examples ```cli diff --git a/commands/zrangestore.md b/commands/zrangestore.md index 8dc744c3e3..2a0bbfc9d9 100644 --- a/commands/zrangestore.md +++ b/commands/zrangestore.md @@ -1,9 +1,5 @@ This command is like `ZRANGE`, but stores the result in the `` destination key. -@return - -@integer-reply: the number of elements in the resulting sorted set. - @examples ```cli diff --git a/commands/zrank.md b/commands/zrank.md index 1419adf82a..8436883cd0 100644 --- a/commands/zrank.md +++ b/commands/zrank.md @@ -3,15 +3,11 @@ ordered from low to high. The rank (or index) is 0-based, which means that the member with the lowest score has rank `0`. +The optional `WITHSCORE` argument supplements the command's reply with the score of the element returned. + Use `ZREVRANK` to get the rank of an element with the scores ordered from high to low. -@return - -* If `member` exists in the sorted set, @integer-reply: the rank of `member`. -* If `member` does not exist in the sorted set or `key` does not exist, - @bulk-string-reply: `nil`. - @examples ```cli @@ -20,4 +16,6 @@ ZADD myzset 2 "two" ZADD myzset 3 "three" ZRANK myzset "three" ZRANK myzset "four" +ZRANK myzset "three" WITHSCORE +ZRANK myzset "four" WITHSCORE ``` diff --git a/commands/zrem.md b/commands/zrem.md index d97fd4ba94..642e2874bc 100644 --- a/commands/zrem.md +++ b/commands/zrem.md @@ -3,13 +3,6 @@ Non existing members are ignored. An error is returned when `key` exists and does not hold a sorted set. -@return - -@integer-reply, specifically: - -* The number of members removed from the sorted set, not including non existing - members. - @examples ```cli diff --git a/commands/zremrangebylex.md b/commands/zremrangebylex.md index 4264f1b072..83df974cdd 100644 --- a/commands/zremrangebylex.md +++ b/commands/zremrangebylex.md @@ -2,10 +2,6 @@ When all the elements in a sorted set are inserted with the same score, in order The meaning of `min` and `max` are the same of the `ZRANGEBYLEX` command. Similarly, this command actually removes the same elements that `ZRANGEBYLEX` would return if called with the same `min` and `max` arguments. -@return - -@integer-reply: the number of elements removed. - @examples ```cli diff --git a/commands/zremrangebyrank.md b/commands/zremrangebyrank.md index edd3cf39a5..30a068b673 100644 --- a/commands/zremrangebyrank.md +++ b/commands/zremrangebyrank.md @@ -7,10 +7,6 @@ the element with the highest score. For example: `-1` is the element with the highest score, `-2` the element with the second highest score and so forth. -@return - -@integer-reply: the number of elements removed. - @examples ```cli diff --git a/commands/zremrangebyscore.md b/commands/zremrangebyscore.md index fdf9a9869c..839b17b3c7 100644 --- a/commands/zremrangebyscore.md +++ b/commands/zremrangebyscore.md @@ -1,10 +1,6 @@ Removes all elements in the sorted set stored at `key` with a score between `min` and `max` (inclusive). -@return - -@integer-reply: the number of elements removed. - @examples ```cli diff --git a/commands/zrevrange.md b/commands/zrevrange.md index 3a19810c94..2a36390456 100644 --- a/commands/zrevrange.md +++ b/commands/zrevrange.md @@ -4,11 +4,6 @@ Descending lexicographical order is used for elements with equal score. Apart from the reversed ordering, `ZREVRANGE` is similar to `ZRANGE`. -@return - -@array-reply: list of elements in the specified range (optionally with -their scores). - @examples ```cli diff --git a/commands/zrevrangebylex.md b/commands/zrevrangebylex.md index c6772c9128..eb9ad8f436 100644 --- a/commands/zrevrangebylex.md +++ b/commands/zrevrangebylex.md @@ -2,10 +2,6 @@ When all the elements in a sorted set are inserted with the same score, in order Apart from the reversed ordering, `ZREVRANGEBYLEX` is similar to `ZRANGEBYLEX`. -@return - -@array-reply: list of elements in the specified score range. - @examples ```cli diff --git a/commands/zrevrangebyscore.md b/commands/zrevrangebyscore.md index e95d771bb3..c6e2e3e537 100644 --- a/commands/zrevrangebyscore.md +++ b/commands/zrevrangebyscore.md @@ -9,11 +9,6 @@ order. Apart from the reversed ordering, `ZREVRANGEBYSCORE` is similar to `ZRANGEBYSCORE`. -@return - -@array-reply: list of elements in the specified score range (optionally -with their scores). - @examples ```cli diff --git a/commands/zrevrank.md b/commands/zrevrank.md index 6c64d98734..c79868920b 100644 --- a/commands/zrevrank.md +++ b/commands/zrevrank.md @@ -3,15 +3,11 @@ ordered from high to low. The rank (or index) is 0-based, which means that the member with the highest score has rank `0`. +The optional `WITHSCORE` argument supplements the command's reply with the score of the element returned. + Use `ZRANK` to get the rank of an element with the scores ordered from low to high. -@return - -* If `member` exists in the sorted set, @integer-reply: the rank of `member`. -* If `member` does not exist in the sorted set or `key` does not exist, - @bulk-string-reply: `nil`. - @examples ```cli @@ -20,4 +16,6 @@ ZADD myzset 2 "two" ZADD myzset 3 "three" ZREVRANK myzset "one" ZREVRANK myzset "four" +ZREVRANK myzset "three" WITHSCORE +ZREVRANK myzset "four" WITHSCORE ``` diff --git a/commands/zscore.md b/commands/zscore.md index 8b1e74dd88..324019d940 100644 --- a/commands/zscore.md +++ b/commands/zscore.md @@ -3,11 +3,6 @@ Returns the score of `member` in the sorted set at `key`. If `member` does not exist in the sorted set, or `key` does not exist, `nil` is returned. -@return - -@bulk-string-reply: the score of `member` (a double precision floating point number), -represented as string. - @examples ```cli diff --git a/commands/zunion.md b/commands/zunion.md index d77d81f47c..f85bfd821a 100644 --- a/commands/zunion.md +++ b/commands/zunion.md @@ -3,11 +3,6 @@ sorted set, it is returned to the client. For a description of the `WEIGHTS` and `AGGREGATE` options, see `ZUNIONSTORE`. -@return - -@array-reply: the result of union (optionally with their scores, in case -the `WITHSCORES` option is given). - @examples ```cli diff --git a/commands/zunionstore.md b/commands/zunionstore.md index 49e2d506e9..efa2fbba55 100644 --- a/commands/zunionstore.md +++ b/commands/zunionstore.md @@ -21,11 +21,6 @@ the minimum or maximum score of an element across the inputs where it exists. If `destination` already exists, it is overwritten. -@return - -@integer-reply: the number of elements in the resulting sorted set at -`destination`. - @examples ```cli diff --git a/community/index.md b/community/_index.md similarity index 54% rename from community/index.md rename to community/_index.md index 659e24b9c3..b67d2a2aaf 100644 --- a/community/index.md +++ b/community/_index.md @@ -3,11 +3,11 @@ title: Community linkTitle: Community --- -Since 2009, the Redis open source project has inspired an enthusiastic and active community of users and contributors. We continue to be committed to fostering an open, welcoming, diverse, inclusive, and healthy community. +Since 2009, the Redis project has inspired an enthusiastic and active community of users and contributors. We continue to be committed to fostering an open, welcoming, diverse, inclusive, and healthy community. ## Code of Conduct -Redis has adopted the [Contributor Covenant Code of Conduct](https://github.com/redis/redis/blob/unstable/CONDUCT). +Redis has adopted the [Contributor Covenant Code of Conduct](https://github.com/redis/redis/blob/unstable/CODE_OF_CONDUCT.md). ## Getting help @@ -29,31 +29,15 @@ For occasional updates on the new Redis releases, you can either [subscribe to t To keep up with the latest from Redis Inc., including news on Redis Cloud and Redis Stack, consider [following the Redis Twitter feed](https://twitter.com/redisinc). -## Project governance - -Redis has adopted a [light governance model](/docs/about/governance) led by individuals who have made significant contributions to Redis and demonstrated a long-term commitment to the project. - -Learn more about the project's governance and the Redis Core Team on the [Redis governance page](/docs/about/governance). - -## Conferences and meetups - -Redis regularly sponsors conferences and meetups. Recent conferences include: - -* [Redis Days 2022](https://redis.com/redisdays/) - -* [RedisConf 2021](https://redis.com/redisconf/) - -* [RedisConf 2020](https://www.youtube.com/c/Redisinc/playlists?view=50&sort=dd&shelf_id=4) - -If you're looking for a Redis meetup, check out [Redis Live](https://meetups.redis.com/redis-live/). This virtual, online meetup gets together each month to highlight members of the Redis community. - ## Contributing to Redis -There are many ways to contribute to Redis, starting with documentation all the way to changes to the open source Redis server. Here are a few ways you can get involved. +> Future releases of Redis will be dual-licensed under a source-available license. You can choose between the [Redis Source Available License 2.0 (RSALv2)](/docs/about/license) or the Server Side Public License v1 (SSPLv1). + +There are many ways to contribute to Redis, starting with documentation all the way to changes to the Redis server. Here are a few ways you can get involved. ### Contributing to docs -The [Redis docs](https://github.com/redis/redis-doc) are open source, and we'd love to incorporate your contributions. For small changes and typos, we recommend creating a pull request against [redis-doc repo](https://github.com/redis/redis-doc/pulls). +We welcome contributions to the [Redis docs](https://github.com/redis/redis-doc). For small changes and typos, we recommend creating a pull request against [redis-doc repo](https://github.com/redis/redis-doc/pulls). ### Reporting bugs @@ -63,4 +47,4 @@ For larger doc changes, we ask that you first create an issue describing your pr ### Client libraries -The Redis client libraries are nearly always open source and accepting of contributions. Consult the contribution guidelines for the library you're interested in. +The Redis client libraries are nearly always open source and accepting of contributions. Consult the contribution guidelines for the library you're interested in. \ No newline at end of file diff --git a/docs/about/_index.md b/docs/about/_index.md index fa840d2b6e..56dc574a12 100644 --- a/docs/about/_index.md +++ b/docs/about/_index.md @@ -1,22 +1,25 @@ --- title: Introduction to Redis linkTitle: "About" -weight: 1 -description: Learn about the Redis open source project +weight: 10 +description: Learn about Redis aliases: - /topics/introduction - /buzz --- -Redis is an open source (BSD licensed), in-memory **data structure store** used as a database, cache, message broker, and streaming engine. Redis provides data structures such as -[strings](/topics/data-types-intro#strings), [hashes](/topics/data-types-intro#hashes), [lists](/topics/data-types-intro#lists), [sets](/topics/data-types-intro#sets), [sorted sets](/topics/data-types-intro#sorted-sets) with range queries, [bitmaps](/topics/data-types-intro#bitmaps), [hyperloglogs](/topics/data-types-intro#hyperloglogs), [geospatial indexes](/commands/geoadd), and [streams](/topics/streams-intro). Redis has built-in [replication](/topics/replication), [Lua scripting](/commands/eval), [LRU eviction](/topics/lru-cache), [transactions](/topics/transactions), and different levels of [on-disk persistence](/topics/persistence), and provides high availability via [Redis Sentinel](/topics/sentinel) and automatic partitioning with [Redis Cluster](/topics/cluster-tutorial). +Redis is an open source (BSD licensed), in-memory __data structure store__ used as a database, cache, message broker, and streaming engine. -You can run **atomic operations** +> Future releases of Redis will be dual-licensed under a source-available license. You can choose between the [Redis Source Available License 2.0 (RSALv2)](/docs/about/license) or the Server Side Public License v1 (SSPLv1). + +Redis provides [data structures](/docs/data-types/) such as [strings](/docs/data-types/strings/), [hashes](/docs/data-types/hashes/), [lists](/docs/data-types/lists/), [sets](/docs/data-types/sets/), [sorted sets](/docs/data-types/sorted-sets/) with range queries, [bitmaps](/docs/data-types/bitmaps/), [hyperloglogs](/docs/data-types/hyperloglogs/), [geospatial indexes](/docs/data-types/geospatial/), and [streams](/docs/data-types/streams/). Redis has built-in [replication](/topics/replication), [Lua scripting](/commands/eval), [LRU eviction](/docs/reference/eviction/), [transactions](/topics/transactions), and different levels of [on-disk persistence](/topics/persistence), and provides high availability via [Redis Sentinel](/topics/sentinel) and automatic partitioning with [Redis Cluster](/topics/cluster-tutorial). + +You can run __atomic operations__ on these types, like [appending to a string](/commands/append); [incrementing the value in a hash](/commands/hincrby); [pushing an element to a list](/commands/lpush); [computing set intersection](/commands/sinter), [union](/commands/sunion) and [difference](/commands/sdiff); -or [getting the member with highest ranking in a sorted set](/commands/zrangebyscore). +or [getting the member with highest ranking in a sorted set](/commands/zrange). To achieve top performance, Redis works with an **in-memory dataset**. Depending on your use case, Redis can persist your data either @@ -31,7 +34,7 @@ Redis also includes: * [Pub/Sub](/topics/pubsub) * [Lua scripting](/commands/eval) * [Keys with a limited time-to-live](/commands/expire) -* [LRU eviction of keys](/topics/lru-cache) +* [LRU eviction of keys](/docs/reference/eviction) * [Automatic failover](/topics/sentinel) You can use Redis from [most programming languages](/clients). @@ -39,3 +42,5 @@ You can use Redis from [most programming languages](/clients). Redis is written in **ANSI C** and works on most POSIX systems like Linux, \*BSD, and Mac OS X, without external dependencies. Linux and OS X are the two operating systems where Redis is developed and tested the most, and we **recommend using Linux for deployment**. Redis may work in Solaris-derived systems like SmartOS, but support is *best effort*. There is no official support for Windows builds. + +
diff --git a/docs/about/governance.md b/docs/about/governance.md deleted file mode 100644 index 8c10badf77..0000000000 --- a/docs/about/governance.md +++ /dev/null @@ -1,91 +0,0 @@ ---- -title: "Redis open source governance" -linkTitle: "Governance" -weight: 3 -description: > - Governance model for the Redis open source project -aliases: - - /topics/governance ---- - -From 2009-2020, Salvatore Sanfilippo built, led, and maintained the Redis open source project. During this time, Redis had no formal governance structure, operating primarily as a [BDFL](https://en.wikipedia.org/wiki/Benevolent_dictator_for_life)-style project. - -As Redis grew, matured, and expanded its user base, it became increasingly important to form a sustainable structure for its ongoing development and maintenance. Salvatore and the core Redis contributors wanted to ensure the project’s continuity and reflect its larger community. With this in mind, a new governance structure was adopted. - -## Current governance structure - -Starting on June 30, 2020, Redis adopted a _light governance_ model that matches the current size of the project and minimizes the changes from its earlier model. The governance model is intended to be a meritocracy, aiming to empower individuals who demonstrate a long-term commitment and make significant contributions. - -## The Redis core team - -Salvatore Sanfilippo named two successors to take over and lead the Redis project: Yossi Gottlieb ([yossigo](https://github.com/yossigo)) and Oran Agra ([oranagra](https://github.com/oranagra)) - -With the backing and blessing of Redis Ltd., we took this opportunity to create a more open, scalable, and community-driven “core team” structure to run the project. The core team consists of members selected based on demonstrated, long-term personal involvement and contributions. - -The current core team members are: - -* Project Lead: Yossi Gottlieb ([yossigo](https://github.com/yossigo)) from Redis Ltd. -* Project Lead: Oran Agra ([oranagra](https://github.com/oranagra)) from Redis Ltd. -* Community Lead: Itamar Haber ([itamarhaber](https://github.com/itamarhaber)) from Redis Ltd. -* Member: Zhao Zhao ([soloestoy](https://github.com/soloestoy)) from Alibaba -* Member: Madelyn Olson ([madolson](https://github.com/madolson)) from Amazon Web Services - -The Redis core team members serve the Redis open source project and community. They are expected to set a good example of behavior, culture, and tone in accordance with the adopted [Code of Conduct](https://www.contributor-covenant.org/). They should also consider and act upon the best interests of the project and the community in a way that is free from foreign or conflicting interests. - -The core team will be responsible for the Redis core project, which is the part of Redis that is hosted in the main Redis repository and is BSD licensed. It will also aim to maintain coordination and collaboration with other projects that make up the Redis ecosystem, including Redis clients, satellite projects, major middleware that relies on Redis, etc. - -#### Roles and responsibilities - -The core team has the following remit: - -* Managing the core Redis code and documentation -* Managing new Redis releases -* Maintaining a high-level technical direction/roadmap -* Providing a fast response, including fixes/patches, to address security vulnerabilities and other major issues -* Project governance decisions and changes -* Coordination of Redis core with the rest of the Redis ecosystem -* Managing the membership of the core team - -The core team aims to form and empower a community of contributors by further delegating tasks to individuals who demonstrate commitment, know-how, and skills. In particular, we hope to see greater community involvement in the following areas: - -* Support, troubleshooting, and bug fixes of reported issues -* Triage of contributions/pull requests - -#### Decision making - -* **Normal decisions** will be made by core team members based on a lazy consensus approach: each member may vote +1 (positive) or -1 (negative). A negative vote must include thorough reasoning and better yet, an alternative proposal. The core team will always attempt to reach a full consensus rather than a majority. Examples of normal decisions: - * Day-to-day approval of pull requests and closing issues - * Opening new issues for discussion -* **Major decisions** that have a significant impact on the Redis architecture, design, or philosophy as well as core-team structure or membership changes should preferably be determined by full consensus. If the team is not able to achieve a full consensus, a majority vote is required. Examples of major decisions: - * Fundamental changes to the Redis core - * Adding a new data structure - * Creating a new version of RESP (Redis Serialization Protocol) - * Changes that affect backward compatibility - * Adding or changing core team members -* Project leads have a right to veto major decisions - -#### Core team membership - -* The core team is not expected to serve for life, however, long-term participation is desired to provide stability and consistency in the Redis programming style and the community. -* If a core-team member whose work is funded by Redis Ltd. must be replaced, the replacement will be designated by Redis Ltd. after consultation with the remaining core-team members. -* If a core-team member not funded by Redis Ltd. will no longer participate, for whatever reason, the other team members will select a replacement. - -## Community forums and communications - -We want the Redis community to be as welcoming and inclusive as possible. To that end, we have adopted a [Code of Conduct](https://www.contributor-covenant.org/) that we ask all community members to read and observe. - -We encourage that all significant communications will be public, asynchronous, archived, and open for the community to actively participate in using the channels described [here](https://redis.io/community). The exception to that is sensitive security issues that require resolution prior to public disclosure. - -To contact the core team about sensitive matters, such as misconduct or security issues, please email [redis@redis.io](mailto:redis@redis.io). - -## New Redis repository and commits approval process - -The Redis core source repository is hosted under [https://github.com/redis/redis](https://github.com/redis/redis). Our target is to eventually host everything (the Redis core source and other ecosystem projects) under the Redis GitHub organization ([https://github.com/redis](https://github.com/redis)). Commits to the Redis source repository will require code review, approval of at least one core-team member who is not the author of the commit, and no objections. - -## Project and development updates - -Stay connected to the project and the community! For project and community updates, follow the project [channels](https://redis.io/community). Development announcements will be made via [the Redis mailing list](https://groups.google.com/forum/#!forum/redis-db). - -## Updates to these governance rules - -Any substantial changes to these rules will be treated as a major decision. Minor changes or ministerial corrections will be treated as normal decisions. diff --git a/docs/about/license.md b/docs/about/license.md index 7d5e4ea8a5..6f78dd1f2a 100644 --- a/docs/about/license.md +++ b/docs/about/license.md @@ -6,56 +6,90 @@ description: > Redis license and trademark information aliases: - /topics/license + - /docs/stack/license/ --- -Redis is **open source software** released under the terms of the **three clause BSD license**. Most of the Redis source code was written and is copyrighted by Salvatore Sanfilippo and Pieter Noordhuis. A list of other contributors can be found in the git history. -The Redis trademark and logo are owned by Redis Ltd. and can be -used in accordance with the [Redis Trademark Guidelines](/docs/about/trademark). +* Redis is source-available software, available under the terms of the RSALv2 and SSPLv1 licenses. Most of the Redis source code was written and is copyrighted by Salvatore Sanfilippo and Pieter Noordhuis. A list of other contributors can be found in the git history. -## Three clause BSD license + The Redis trademark and logo are owned by Redis Ltd. and can be +used in accordance with the [Redis Trademark Guidelines](https://redis.com/legal/trademark-guidelines/). -Every file in the Redis distribution, with the exceptions of third party files specified in the list below, contain the following license: +* RedisInsight is licensed under the Server Side Public License (SSPL). -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: +* Redis Stack Server, which combines open source Redis with Search and Query features, JSON, Time Series, and Probabilistic data structures is dual-licensed under the Redis Source Available License (RSALv2), as described below, and the [Server Side Public License](https://redis.com/legal/server-side-public-license-sspl/) (SSPL). For information about licensing per version, see [Versions and licenses](/docs/about/about-stack/#redis-stack-license). -* Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. -* Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. +## Licenses: -* Neither the name of Redis nor the names of its contributors may be used - to endorse or promote products derived from this software without - specific prior written permission. +### REDIS SOURCE AVAILABLE LICENSE (RSAL) 2.0 -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. +Last updated: November 15, 2022 -## Third-party files and licenses +#### Acceptance -Redis uses source code from third parties. All this code contains a BSD or BSD-compatible license. The following is a list of third-party files and information about their copyright. +By using the software, you agree to all of the terms and conditions below. + +#### Copyright License + +The licensor grants you a non-exclusive, royalty-free, worldwide, non-sublicensable, non-transferable license to use, copy, distribute, make available, and prepare derivative works of the software, in each case subject to the limitations and conditions below. + +#### Limitations + +You may not make the functionality of the software or a modified version available to third parties as a service, or distribute the software or a modified version in a manner that makes the functionality of the software available to third parties. Making the functionality of the software or modified version available to third parties includes, without limitation, enabling third parties to interact with the functionality of the software or modified version in distributed form or remotely through a computer network, offering a product or service the value of which entirely or primarily derives from the value of the software or modified version, or offering a product or service that accomplishes for users the primary purpose of the software or modified version. + +You may not alter, remove, or obscure any licensing, copyright, or other notices of the licensor in the software. Any use of the licensor’s trademarks is subject to applicable law. + +#### Patents + +The licensor grants you a license, under any patent claims the licensor can license, or becomes able to license, to make, have made, use, sell, offer for sale, import and have imported the software, in each case subject to the limitations and conditions in this license. This license does not cover any patent claims that you cause to be infringed by modifications or additions to the software. If you or your company make any written claim that the software infringes or contributes to infringement of any patent, your patent license for the software granted under these terms ends immediately. If your company makes such a claim, your patent license ends immediately for work on behalf of your company. + +#### Notices + +You must ensure that anyone who gets a copy of any part of the software from you also gets a copy of these terms. If you modify the software, you must include in any modified copies of the software prominent notices stating that you have modified the software. + +#### No Other Rights + +These terms do not imply any licenses other than those expressly granted in these terms. + +#### Termination + +If you use the software in violation of these terms, such use is not licensed, and your licenses will automatically terminate. If the licensor provides you with a notice of your violation, and you cease all violations of this license no later than 30 days after you receive that notice, your licenses will be reinstated retroactively. However, if you violate these terms after such reinstatement, any additional violation of these terms will cause your licenses to terminate automatically and permanently. -* Redis uses the [LHF compression library](http://oldhome.schmorp.de/marc/liblzf.html). LibLZF is copyright Marc Alexander Lehmann and is released under the terms of the **two-clause BSD license**. +#### No Liability + +As far as the law allows, the software comes as is, without any warranty or condition, and the licensor will not be liable to you for any damages arising out of these terms or the use or nature of the software, under any kind of legal claim. + +#### Definitions + +The licensor is the entity offering these terms, and the software is the software the licensor makes available under these terms, including any portion of it. + +To modify a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission other than making an exact copy. The resulting work is called a modified version of the earlier work. + +**you** refers to the individual or entity agreeing to these terms. + +**your company** is any legal entity, sole proprietorship, or other kind of organization that you work for, plus all organizations that have control over, are under the control of, or are under common control with that organization. + +**control** means ownership of substantially all the assets of an entity, or the power to direct its management and policies by vote, contract, or otherwise. Control can be direct or indirect. + +**your licenses** are all the licenses granted to you for the software under these terms. + +**use** means anything you do with the software requiring one of your licenses. + +**trademark** means trademarks, service marks, and similar rights. + +#### Third-party files and licenses + +Redis uses source code from third parties. All this code contains a BSD or BSD-compatible license. The following is a list of third-party files and information about their copyright. -* Redis uses the `sha1.c` file that is copyright by Steve Reid and released under the **public domain**. This file is extremely popular and used among open source and proprietary code. +* Redis uses the [LHF compression library](http://oldhome.schmorp.de/marc/liblzf.html). LibLZF is copyright Marc Alexander Lehmann and is released under the terms of the two-clause BSD license. -* When compiled on Linux, Redis uses the [Jemalloc allocator](https://github.com/jemalloc/jemalloc), which is copyrighted by Jason Evans, Mozilla Foundation, and Facebook, Inc and released under the **two-clause BSD license**. +* Redis uses the sha1.c file that is copyright by Steve Reid and released under the public domain. This file is extremely popular and used among open source and proprietary code. -* Inside Jemalloc, the file `pprof` is copyrighted by Google Inc. and released under the **three-clause BSD license**. +* When compiled on Linux, Redis uses the [Jemalloc allocator](https://github.com/jemalloc/jemalloc), which is copyrighted by Jason Evans, Mozilla Foundation, and Facebook, Inc and released under the two-clause BSD license. -* Inside Jemalloc the files `inttypes.h`, `stdbool.h`, `stdint.h`, `strings.h` under the `msvc_compat` directory are copyright Alexander Chemeris and released under the **three-clause BSD license**. +* Inside Jemalloc, the file pprof is copyrighted by Google Inc. and released under the three-clause BSD license. -* The libraries **hiredis** and **linenoise** also included inside the Redis distribution are copyright Salvatore Sanfilippo and Pieter Noordhuis and released under the terms respectively of the **three-clause BSD license** and **two-clause BSD license**. +* Inside Jemalloc the files inttypes.h, stdbool.h, stdint.h, strings.h under the msvc_compat directory are copyright Alexander Chemeris and released under the three-clause BSD license. +* The libraries hiredis and linenoise also included inside the Redis distribution are copyright Salvatore Sanfilippo and Pieter Noordhuis and released under the terms respectively of the three-clause BSD license and two-clause BSD license. \ No newline at end of file diff --git a/docs/about/sponsors.md b/docs/about/sponsors.md deleted file mode 100644 index 8c9180a783..0000000000 --- a/docs/about/sponsors.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -title: "Redis sponsors" -linkTitle: "Sponsors" -weight: 4 -description: Current and former Redis sponsors -aliases: - - /topics/sponsors ---- - -From 2015 to 2020, Salvatore Sanfilippo's work on Redis was sponsored by [Redis Ltd.](https://redis.com) Since June 2020, Redis Ltd. has sponsored the [governance of Redis](/topics/governance). Redis Ltd. also sponsors the hosting and maintenance of [redis.io](https://redis.io). - -Past sponsorships: - -* The [Shuttleworth Foundation](http://www.shuttleworthfoundation.org) has donated 5000 USD to the Redis project in form of a flash grant. -* From May 2013 to June 2015, the work [Salvatore Sanfilippo](http://twitter.com/antirez) did to develop Redis was sponsored by [Pivotal](http://gopivotal.com). -* Before May 2013, the project was sponsored by VMware with the work of [Salvatore Sanfilippo](http://twitter.com/antirez) and [Pieter Noordhuis](http://twitter.com/pnoordhuis). -* [VMware](http://vmware.com) and later [Pivotal](http://pivotal.io) provided a 24 GB RAM workstation for Salvatore to run the Redis CI test and other long running tests. Later, Salvatore equipped the server with an SSD drive in order to test in the same hardware with rotating and flash drives. -* [Linode](https://linode.com), in January 2010, provided virtual machines for Redis testing in a virtualized environment. -* Slicehost, January 2010, provided Virtual Machines for Redis testing in a virtualized environment. -* [Citrusbyte](http://citrusbyte.com), in December 2009, contributed part of Virtual Memory implementation. -* [Hitmeister](http://www.hitmeister.de/), in December 2009, contributed part of Redis Cluster. -* [Engine Yard](http://engineyard.com), in December 2009, contributed blocking POP (BLPOP) and part of the Virtual Memory implementation. - -Also thanks to the following people or organizations that donated to the Project: - -* Emil Vladev -* [Brad Jasper](http://bradjasper.com/) -* [Mrkris](http://mrkris.com/) - -The Redis community is grateful to [Redis Ltd.](http://redis.com), [Pivotal](http://gopivotal.com), [VMware](http://vmware.com) and to the other companies and people who have donated to the Redis project. Thank you. - -## redis.io - -[Citrusbyte](https://citrusbyte.com) sponsored the creation of the official -Redis logo (designed by Carlos Prioglio) and -transferred its copyright to Salvatore Sanfilippo. - -They also sponsored the initial implementation of this site by -[Damian Janowski](https://twitter.com/djanowski) and [Michel -Martens](https://twitter.com/soveran). - -The `redis.io` domain was donated for a few years to the project by [I Want My -Name](https://iwantmyname.com). diff --git a/docs/about/trademark.md b/docs/about/trademark.md deleted file mode 100644 index e57d6832ed..0000000000 --- a/docs/about/trademark.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -title: "Redis trademark guidelines" -linkTitle: "Trademark" -weight: 6 -description: How can the Redis trademarks be used? -aliases: - - /topics/trademark ---- - -1. **OPEN SOURCE LICENSE VS. TRADEMARKS.** The three-clause BSD license gives you the right to redistribute and use the software in source and binary forms, with or without modification, under certain conditions. However, open source licenses like the three-clause BSD license do not address trademarks. Redis trademarks and brands need to be used in a way consistent with trademark law, and that is why we have prepared this policy – to help you understand what branding is allowed or required when using our software. - -2. **PURPOSE**. To outline the policy and guidelines for using the Redis trademark (“Mark”) and logo (“Logo”). - -3. **WHY IS THIS IMPORTANT?** The Mark and Logo are symbols of the quality and community support associated with the open source Redis. Trademarks protect not only its owners, but its users and the entire open source community. Our community members need to know that they can rely on the quality represented by the brand. No one should use the Mark or Logo in any way that misleads anyone, either directly or by omission, or in any way that is likely to confuse or take advantage of the community, or constitutes unfair competition. For example, you cannot say you are distributing Redis software when you are distributing a modified version of Redis open source, because people will be confused when they are not getting the same features and functionality they would get if they downloaded the software directly from Redis, or will think that the modified software is endorsed or sponsored by us or the community. You also cannot use the Mark or Logo on your website or in connection with any services in a way that suggests that your website is an official Redis website or service, or that suggests that we endorse your website or services. - -4. **PROPER USE OF THE REDIS TRADEMARKS AND LOGO.** You may do any of the following: - * a. When you use an unaltered, unmodified copy of open source Redis downloaded from https://redis.io (the “Software”) as a data source for your application, you may use the Mark and Logo to identify your use. The open source Redis software combined with, or integrated into, any other software program, including but not limited to automation software for offering Redis as a cloud service or orchestration software for offering Redis in containers is considered “modified” Redis software and does not entitle you to use the Mark or the Logo, except in a case of nominative use, as described below. Integrating the Software with other software or service can introduce performance or quality control problems that can devalue the goodwill in the Redis brand and we want to be sure that such problems do not confuse users as to the quality of the product. - * b. The Software is developed by and for the Redis community. If you are engaged in community advocacy, you can use the Mark but not the Logo in the context of showing support for the open source Redis project, provided that: - * i. The Mark is used in a manner consistent with this policy; - * ii. There is no commercial purpose behind the use and you are not offering Redis commercially under the same domain name; - * iii. There is no suggestion that you are the creator or source of Redis, or that your project is approved, sponsored, or affiliated with us or the community; and - * iv. You must include attribution according to section 6.a. below. - * c. __Nominative Use__: Trademark law permits third parties the use of a mark to identify the trademark holder’s product or service so long as such use is not likely to cause unnecessary consumer or public confusion. This is referred to as a nominative or fair use. When you distribute, or offer an altered, modified or combined copy of the Software, such as in the case of a cloud service or a container service, you may engage in “nominative use” of the Mark, but this does not allow you to use the Logo. - * d. Examples of Nominative Use: - * i. Offering an XYZ software, which is an altered, modified or combined copy of the open source Redis software, including but not limited to offering Redis as a cloud service or as a container service, and while fully complying with the open source Redis API - you may only name it **"XYZ for Redis®"** or state that **"XYZ software is compatible with the Redis® API"**. No other term or description of your software is allowed. - * ii. Offering an ABC application, which uses an altered, modified or combined copy of the open source Redis software as a data source, including but not limited to using Redis as a cloud service or a container service, and while the modified Redis fully complies with the open source Redis API - you may only state that **"ABC application is using XYZ for Redis®"**, or **"ABC application is using a software which is compatible with the Redis® API"**. No other term or description of your application is allowed. - * iii. If, however, the offered XYZ software, or service based thereof, or application ABC uses an altered, modified or combined copy of the open source Redis software that does not fully comply with the open source Redis API - you may not use the Mark and Logo at all. - * e. In any use (or nominative use) of the Mark or the Logo as per the above, you should comply with all the provisions of Section 6 (General Use). -5. **IMPROPER USE OF THE REDIS TRADEMARKS AND LOGOS**. Any use of the Mark -or Logo other than as expressly described as permitted herein is not permitted because we believe that it would likely cause impermissible public confusion. Use of the Mark that we will likely consider infringing without permission for use include: - * a. Entity Names. You may not form a company, use a company name, or create a software product or service name that includes the Mark or implies any that such company is the source or sponsor of Redis. If you wish to form an entity for a user or developer group, please contact us and we will be glad to discuss a license for a suitable name; - * b. Class or Quality. You may not imply that you are providing a class or quality of Redis (e.g., "enterprise-class" or "commercial quality" or “fully managed”) in a way that implies Redis is not of that class, grade or quality, nor that other parties are not of that class, grade, or quality; - * c. False or Misleading Statements. You may not make false or misleading statements regarding your use of Redis (e.g., "we wrote the majority of the code" or "we are major contributors" or "we are committers"); - * d. Domain Names and Subdomains. You must not use Redis or any confusingly similar phrase in a domain name or subdomain. For instance “www.Redishost.com” is not allowed. If you wish to use such a domain name for a user or developer group, please contact us and we will be glad to discuss a license for a suitable domain name. Because of the many persons who, unfortunately, seek to spoof, swindle or deceive the community by using confusing domain names, we must be very strict about this rule; - * e. Websites. You must not use our Mark or Logo on your website in a way that suggests that your website is an official website or that we endorse your website; - * f. Merchandise. You must not manufacture, sell or give away merchandise items, such as T-shirts and mugs, bearing the Mark or Logo, or create any mascot for Redis. If you wish to use the Mark or Logo for a user or developer group, please contact us and we will be glad to discuss a license to do this; - * g. Variations, takeoffs or abbreviations. You may not use a variation of the Mark for any purpose. For example, the following are not acceptable: - * i. Red; - * ii. MyRedis; and - * iii. RedisHost. - * h. Rebranding. You may not change the Mark or Logo on a redistributed (unmodified) Software to your own brand or logo. You may not hold yourself out as the source of the Redis software, except to the extent you have modified it as allowed under the three-clause BSD license, and you make it clear that you are the source only of the modification; - * i. Combination Marks. Do not use our Mark or Logo in combination with any other marks or logos. For example Foobar Redis, or the name of your company or product typeset to look like the Redis logo; and - * j. Web Tags. Do not use the Mark in a title or metatag of a web page to influence search engine rankings or result listings, rather than for discussion or advocacy of the Redis project. -6. **GENERAL USE INFORMATION.** - * a. Attribution. Any permitted use of the Mark or Logo, as indicated above, should comply with the following provisions: - * i. You should add the R mark (®) and an asterisk (`*`) to the first mention of the word "Redis" as part of or in connection with a product name; - * ii. Whenever "Redis®`*`" is shown - add the following legend (with an asterisk) in a noticeable and readable format: "`*` Redis is a registered trademark of Redis Ltd. Any rights therein are reserved to Redis Ltd. Any use by `<`company XYZ`>` is for referential purposes only and does not indicate any sponsorship, endorsement or affiliation between Redis and `<`company XYZ`>`"; and - * iii. Sections i. and ii. above apply to any appearance of the word "Redis" in: (a) any web page, gated or un-gated; (b) any marketing collateral, white paper, or other promotional material, whether printed or electronic; and (c) any advertisement, in any format. - * b. Capitalization. Always distinguish the Mark from surrounding text with at least initial capital letters or in all capital letters, (e.g., as Redis or REDIS). - * c. Adjective. Always use the Mark as an adjective modifying a noun, such as “the Redis software.” - * d. Do not make any changes to the Logo. This means you may not add decorative elements, change the colors, change the proportions, distort it, add elements or combine it with other logos. -7. **NOTIFY US OF ABUSE.** Do not make any changes to the Logo. This means you may not add decorative elements, change the colors, change the proportions, distort it, add elements or combine it with other logos. -8. **MORE QUESTIONS?** If you have questions about this policy, or wish to request a license for any uses that are not specifically authorized in this policy, please contact us at legal@redis.com. - diff --git a/docs/clients/index.md b/docs/clients/index.md deleted file mode 100644 index e13277cf0b..0000000000 --- a/docs/clients/index.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: "Clients" -linkTitle: "Clients" -weight: 10 -description: Redis clients -layout: bazzar -bazzar: clients -aliases: - - /clients - - /clients/ ---- - diff --git a/docs/connect/_index.md b/docs/connect/_index.md new file mode 100644 index 0000000000..a53d05d406 --- /dev/null +++ b/docs/connect/_index.md @@ -0,0 +1,39 @@ +--- +title: Connect to Redis +linkTitle: Connect +description: Learn how to use user interfaces and client libraries +weight: 35 +aliases: + - /docs/ui +--- + +You can connect to Redis in the following ways: + +* With the `redis-cli` command line tool +* Use RedisInsight as a graphical user interface +* Via a client library for your programming language + +## Redis command line interface + +The [Redis command line interface](/docs/connect/cli) (also known as `redis-cli`) is a terminal program that sends commands to and reads replies from the Redis server. It has the following two main modes: + +1. An interactive Read Eval Print Loop (REPL) mode where the user types Redis commands and receives replies. +2. A command mode where `redis-cli` is executed with additional arguments, and the reply is printed to the standard output. + +## RedisInsight + +[RedisInsight](/docs/connect/insight) combines a graphical user interface with Redis CLI to let you work with any Redis deployment. You can visually browse and interact with data, take advantage of diagnostic tools, learn by example, and much more. Best of all, RedisInsight is free. + +## Client libraries + +It's easy to connect your application to a Redis database. The official client libraries cover the following languages: + +* [C#/.NET](/docs/connect/clients/dotnet) +* [Go](/docs/connect/clients/go) +* [Java](/docs/connect/clients/java) +* [Node.js](/docs/connect/clients/nodejs) +* [Python](/docs/connect/clients/python) + +You can find a complete list of all client libraries, including the community-maintained ones, on the [clients page](/resources/clients/). + +
diff --git a/docs/manual/cli.md b/docs/connect/cli.md similarity index 97% rename from docs/manual/cli.md rename to docs/connect/cli.md index af1b6d10df..4a9a4b2e97 100644 --- a/docs/manual/cli.md +++ b/docs/connect/cli.md @@ -6,11 +6,11 @@ description: > Overview of redis-cli, the Redis command line interface aliases: - /docs/manual/cli + - /docs/management/cli + - /docs/ui/cli --- -The Redis command line interface (`redis-cli`) is a terminal program used to send commands to and read replies from the Redis server. It has two main modes: an interactive Read Eval Print Loop (REPL) mode where the user types Redis commands and receives replies, and a command mode where `redis-cli` is executed with additional arguments and the reply is printed to the standard output. - -In interactive mode, `redis-cli` has basic line editing capabilities to provide a familiar tyPING experience. +In interactive mode, `redis-cli` has basic line editing capabilities to provide a familiar typing experience. To launch the program in special modes, you can use several options, including: @@ -29,7 +29,7 @@ To run a Redis command and return a standard output at the terminal, include the The reply of the command is "7". Since Redis replies are typed (strings, arrays, integers, nil, errors, etc.), you see the type of the reply between parentheses. This additional information may not be ideal when the output of `redis-cli` must be used as input of another command or redirected into a file. -`redis-cli` only shows additional information for human readibility when it detects the standard output is a tty, or terminal. For all other outputs it will auto-enable the *raw output mode*, as in the following example: +`redis-cli` only shows additional information for human readability when it detects the standard output is a tty, or terminal. For all other outputs it will auto-enable the *raw output mode*, as in the following example: $ redis-cli INCR mycounter > /tmp/output.txt $ cat /tmp/output.txt @@ -96,7 +96,7 @@ You can change the port using several command line options. To specify a differe PONG If your instance is password protected, the `-a ` option will -preform authentication saving the need of explicitly using the `AUTH` command: +perform authentication saving the need of explicitly using the `AUTH` command: $ redis-cli -a myUnguessablePazzzzzword123 PING PONG @@ -122,6 +122,11 @@ option and the URI pattern `redis://user:password@host:port/dbnum`: $ redis-cli -u redis://LJenkins:p%40ssw0rd@redis-16379.hosted.com:16379/0 PING PONG +**NOTE:** +User, password and dbnum are optional. +For authentication without a username, use username `default`. +For TLS, use the scheme `rediss`. + ## SSL/TLS By default, `redis-cli` uses a plain TCP connection to connect to Redis. @@ -261,7 +266,7 @@ In interactive mode the user types Redis commands at the prompt. The command is sent to the server, processed, and the reply is parsed back and rendered into a simpler form to read. -Nothing special is needed for running the `redis-cli`in interactive mode - +Nothing special is needed for running the `redis-cli` in interactive mode - just execute it without any arguments $ redis-cli @@ -538,7 +543,7 @@ iteration. Because of the command that it uses this option is called `--scan`. key-446 key-371 -Note that `head -10` is used in order to print only the first lines of the +Note that `head -10` is used in order to print only the first ten lines of the output. Scanning is able to use the underlying pattern matching capability of @@ -603,7 +608,7 @@ once you use the `MONITOR` command. All commands received by the active Redis in 1460100081.165665 [0 127.0.0.1:51706] "set" "shipment:8000736522714:status" "sorting" 1460100083.053365 [0 127.0.0.1:51707] "get" "shipment:8000736522714:status" -Note that it is possible to use to pipe the output, so you can monitor +Note that it is possible to pipe the output, so you can monitor for specific patterns using tools such as `grep`. ## Monitoring the latency of Redis instances diff --git a/docs/connect/clients/_index.md b/docs/connect/clients/_index.md new file mode 100644 index 0000000000..d6e8fcff39 --- /dev/null +++ b/docs/connect/clients/_index.md @@ -0,0 +1,28 @@ +--- +title: "Connect with Redis clients" +linkTitle: "Clients" +description: Connect your application to a Redis database and try an example +weight: 45 +aliases: + - /docs/redis-clients + - /docs/stack/get-started/clients/ + - /docs/clients/ +--- + +Here, you will learn how to connect your application to a Redis database. If you're new to Redis, you might first want to [install Redis with Redis Stack and RedisInsight](/docs/getting-started/install-stack/). + +For more Redis topics, see [Using](/docs/manual/) and [Managing](/docs/management/) Redis. + +If you're ready to get started, see the following guides for the official client libraries you can use with Redis. For a complete list of community-driven clients, see [Clients](/resources/clients/). + + +## High-level client libraries + +The Redis OM client libraries let you use the document modeling, indexing, and querying capabilities of Redis Stack much like the way you'd use an [ORM](https://en.wikipedia.org/wiki/Object%E2%80%93relational_mapping). The following Redis OM libraries support Redis Stack: + +* [Redis OM .NET](/docs/clients/om-clients/stack-dotnet/) +* [Redis OM Node](/docs/clients/om-clients/stack-node/) +* [Redis OM Python](/docs/clients/om-clients/stack-python/) +* [Redis OM Spring](/docs/clients/om-clients/stack-spring/) + +
\ No newline at end of file diff --git a/docs/connect/clients/dotnet.md b/docs/connect/clients/dotnet.md new file mode 100644 index 0000000000..c38c7e3f3b --- /dev/null +++ b/docs/connect/clients/dotnet.md @@ -0,0 +1,273 @@ +--- +title: "C#/.NET guide" +linkTitle: "C#/.NET" +description: Connect your .NET application to a Redis database +weight: 1 +aliases: + - /docs/clients/dotnet/ + - /docs/redis-clients/dotnet/ +--- + +Install Redis and the Redis client, then connect your .NET application to a Redis database. + +## NRedisStack + +[NRedisStack](https://github.com/redis/NRedisStack) is a .NET client for Redis. +`NredisStack` requires a running Redis or [Redis Stack](https://redis.io/docs/getting-started/install-stack/) server. See [Getting started](/docs/getting-started/) for Redis installation instructions. + +### Install + +Using the `dotnet` CLI, run: + +``` +dotnet add package NRedisStack +``` + +### Connect + +Connect to localhost on port 6379. + +``` +using NRedisStack; +using NRedisStack.RedisStackCommands; +using StackExchange.Redis; +//... +ConnectionMultiplexer redis = ConnectionMultiplexer.Connect("localhost"); +IDatabase db = redis.GetDatabase(); +``` + +Store and retrieve a simple string. + +```csharp +db.StringSet("foo", "bar"); +Console.WriteLine(db.StringGet("foo")); // prints bar +``` + +Store and retrieve a HashMap. + +```csharp +var hash = new HashEntry[] { + new HashEntry("name", "John"), + new HashEntry("surname", "Smith"), + new HashEntry("company", "Redis"), + new HashEntry("age", "29"), + }; +db.HashSet("user-session:123", hash); + +var hashFields = db.HashGetAll("user-session:123"); +Console.WriteLine(String.Join("; ", hashFields)); +// Prints: +// name: John; surname: Smith; company: Redis; age: 29 +``` + +To access Redis Stack capabilities, you should use appropriate interface like this: + +``` +IBloomCommands bf = db.BF(); +ICuckooCommands cf = db.CF(); +ICmsCommands cms = db.CMS(); +IGraphCommands graph = db.GRAPH(); +ITopKCommands topk = db.TOPK(); +ITdigestCommands tdigest = db.TDIGEST(); +ISearchCommands ft = db.FT(); +IJsonCommands json = db.JSON(); +ITimeSeriesCommands ts = db.TS(); +``` + +#### Connect to a Redis cluster + +To connect to a Redis cluster, you just need to specify one or all cluster endpoints in the client configuration: + +```csharp +ConfigurationOptions options = new ConfigurationOptions +{ + //list of available nodes of the cluster along with the endpoint port. + EndPoints = { + { "localhost", 16379 }, + { "localhost", 16380 }, + // ... + }, +}; + +ConnectionMultiplexer cluster = ConnectionMultiplexer.Connect(options); +IDatabase db = cluster.GetDatabase(); + +db.StringSet("foo", "bar"); +Console.WriteLine(db.StringGet("foo")); // prints bar +``` + +#### Connect to your production Redis with TLS + +When you deploy your application, use TLS and follow the [Redis security](/docs/management/security/) guidelines. + +Before connecting your application to the TLS-enabled Redis server, ensure that your certificates and private keys are in the correct format. + +To convert user certificate and private key from the PEM format to `pfx`, use this command: + +```bash +openssl pkcs12 -inkey redis_user_private.key -in redis_user.crt -export -out redis.pfx +``` + +Enter password to protect your `pfx` file. + +Establish a secure connection with your Redis database using this snippet. + +```csharp +ConfigurationOptions options = new ConfigurationOptions +{ + EndPoints = { { "my-redis.cloud.redislabs.com", 6379 } }, + User = "default", // use your Redis user. More info https://redis.io/docs/management/security/acl/ + Password = "secret", // use your Redis password + Ssl = true, + SslProtocols = System.Security.Authentication.SslProtocols.Tls12 +}; + +options.CertificateSelection += delegate +{ + return new X509Certificate2("redis.pfx", "secret"); // use the password you specified for pfx file +}; +options.CertificateValidation += ValidateServerCertificate; + +bool ValidateServerCertificate( + object sender, + X509Certificate? certificate, + X509Chain? chain, + SslPolicyErrors sslPolicyErrors) +{ + if (certificate == null) { + return false; + } + + var ca = new X509Certificate2("redis_ca.pem"); + bool verdict = (certificate.Issuer == ca.Subject); + if (verdict) { + return true; + } + Console.WriteLine("Certificate error: {0}", sslPolicyErrors); + return false; +} + +ConnectionMultiplexer muxer = ConnectionMultiplexer.Connect(options); + +//Creation of the connection to the DB +IDatabase conn = muxer.GetDatabase(); + +//send SET command +conn.StringSet("foo", "bar"); + +//send GET command and print the value +Console.WriteLine(conn.StringGet("foo")); +``` + +### Example: Indexing and querying JSON documents + +This example shows how to convert Redis search results to JSON format using `NRedisStack`. + +Make sure that you have Redis Stack and `NRedisStack` installed. + +Import dependencies and connect to the Redis server: + +```csharp +using NRedisStack; +using NRedisStack.RedisStackCommands; +using NRedisStack.Search; +using NRedisStack.Search.Aggregation; +using NRedisStack.Search.Literals.Enums; +using StackExchange.Redis; + +// ... + +ConnectionMultiplexer redis = ConnectionMultiplexer.Connect("localhost"); +``` + +Get a reference to the database and for search and JSON commands. + +```csharp +var db = redis.GetDatabase(); +var ft = db.FT(); +var json = db.JSON(); +``` + +Let's create some test data to add to your database. + +```csharp +var user1 = new { + name = "Paul John", + email = "paul.john@example.com", + age = 42, + city = "London" +}; + +var user2 = new { + name = "Eden Zamir", + email = "eden.zamir@example.com", + age = 29, + city = "Tel Aviv" +}; + +var user3 = new { + name = "Paul Zamir", + email = "paul.zamir@example.com", + age = 35, + city = "Tel Aviv" +}; +``` + +Create an index. In this example, all JSON documents with the key prefix `user:` are indexed. For more information, see [Query syntax](/docs/interact/search-and-query/query/). + +```csharp +var schema = new Schema() + .AddTextField(new FieldName("$.name", "name")) + .AddTagField(new FieldName("$.city", "city")) + .AddNumericField(new FieldName("$.age", "age")); + +ft.Create( + "idx:users", + new FTCreateParams().On(IndexDataType.JSON).Prefix("user:"), + schema); +``` + +Use `JSON.SET` to set each user value at the specified path. + +```csharp +json.Set("user:1", "$", user1); +json.Set("user:2", "$", user2); +json.Set("user:3", "$", user3); +``` + +Let's find user `Paul` and filter the results by age. + +```csharp +var res = ft.Search("idx:users", new Query("Paul @age:[30 40]")).Documents.Select(x => x["json"]); +Console.WriteLine(string.Join("\n", res)); +// Prints: {"name":"Paul Zamir","email":"paul.zamir@example.com","age":35,"city":"Tel Aviv"} +``` + +Return only the `city` field. + +```csharp +var res_cities = ft.Search("idx:users", new Query("Paul").ReturnFields(new FieldName("$.city", "city"))).Documents.Select(x => x["city"]); +Console.WriteLine(string.Join(", ", res_cities)); +// Prints: London, Tel Aviv +``` + +Count all users in the same city. + +```csharp +var request = new AggregationRequest("*").GroupBy("@city", Reducers.Count().As("count")); +var result = ft.Aggregate("idx:users", request); + +for (var i=0; i:@localhost:6379/") +if err != nil { + panic(err) +} + +client := redis.NewClient(opt) +``` + +Store and retrieve a simple string. + +```go +ctx := context.Background() + +err := client.Set(ctx, "foo", "bar", 0).Err() +if err != nil { + panic(err) +} + +val, err := client.Get(ctx, "foo").Result() +if err != nil { + panic(err) +} +fmt.Println("foo", val) +``` + +Store and retrieve a map. + +```go +session := map[string]string{"name": "John", "surname": "Smith", "company": "Redis", "age": "29"} +for k, v := range session { + err := client.HSet(ctx, "user-session:123", k, v).Err() + if err != nil { + panic(err) + } +} + +userSession := client.HGetAll(ctx, "user-session:123").Val() +fmt.Println(userSession) + ``` + +#### Connect to a Redis cluster + +To connect to a Redis cluster, use `NewClusterClient`. + +```go +client := redis.NewClusterClient(&redis.ClusterOptions{ + Addrs: []string{":16379", ":16380", ":16381", ":16382", ":16383", ":16384"}, + + // To route commands by latency or randomly, enable one of the following. + //RouteByLatency: true, + //RouteRandomly: true, +}) +``` + +#### Connect to your production Redis with TLS + +When you deploy your application, use TLS and follow the [Redis security](/docs/management/security/) guidelines. + +Establish a secure connection with your Redis database using this snippet. + +```go +// Load client cert +cert, err := tls.LoadX509KeyPair("redis_user.crt", "redis_user_private.key") +if err != nil { + log.Fatal(err) +} + +// Load CA cert +caCert, err := os.ReadFile("redis_ca.pem") +if err != nil { + log.Fatal(err) +} +caCertPool := x509.NewCertPool() +caCertPool.AppendCertsFromPEM(caCert) + +client := redis.NewClient(&redis.Options{ + Addr: "my-redis.cloud.redislabs.com:6379", + Username: "default", // use your Redis user. More info https://redis.io/docs/management/security/acl/ + Password: "secret", // use your Redis password + TLSConfig: &tls.Config{ + MinVersion: tls.VersionTLS12, + Certificates: []tls.Certificate{cert}, + RootCAs: caCertPool, + }, +}) + +//send SET command +err = client.Set(ctx, "foo", "bar", 0).Err() +if err != nil { + panic(err) +} + +//send GET command and print the value +val, err := client.Get(ctx, "foo").Result() +if err != nil { + panic(err) +} +fmt.Println("foo", val) +``` + + +#### dial tcp: i/o timeout + +You get a `dial tcp: i/o timeout` error when `go-redis` can't connect to the Redis Server, for example, when the server is down or the port is protected by a firewall. To check if Redis Server is listening on the port, run telnet command on the host where the `go-redis` client is running. + +```go +telnet localhost 6379 +Trying 127.0.0.1... +telnet: Unable to connect to remote host: Connection refused +``` + +If you use Docker, Istio, or any other service mesh/sidecar, make sure the app starts after the container is fully available, for example, by configuring healthchecks with Docker and holdApplicationUntilProxyStarts with Istio. +For more information, see [Healthcheck](https://docs.docker.com/engine/reference/run/#healthcheck). + +### Learn more + +* [Documentation](https://redis.uptrace.dev/guide/) +* [GitHub](https://github.com/redis/go-redis) + diff --git a/docs/connect/clients/java/_index.md b/docs/connect/clients/java/_index.md new file mode 100644 index 0000000000..21a90ffe4b --- /dev/null +++ b/docs/connect/clients/java/_index.md @@ -0,0 +1,11 @@ +--- +title: "Connect with Redis Java clients" +linkTitle: "Java" +description: Connect your application to a Redis database using Java and try an example +weight: 3 +--- + +You have two choices of Java clients that you can use with Redis: + +- Jedis, for synchronous applications. +- Lettuce, for asynchronous and reactive applications. diff --git a/docs/connect/clients/java/jedis.md b/docs/connect/clients/java/jedis.md new file mode 100644 index 0000000000..7384fa6c77 --- /dev/null +++ b/docs/connect/clients/java/jedis.md @@ -0,0 +1,310 @@ +--- +title: "Jedis guide" +linkTitle: "Jedis" +description: Connect your Java application to a Redis database +weight: 1 +aliases: + - /docs/clients/java/ + - /docs/redis-clients/java/ +--- + +Install Redis and the Redis client, then connect your Java application to a Redis database. + +## Jedis + +[Jedis](https://github.com/redis/jedis) is a Java client for Redis designed for performance and ease of use. + +### Install + +To include `Jedis` as a dependency in your application, edit the dependency file, as follows. + +* If you use **Maven**: + + ```xml + + redis.clients + jedis + 5.1.2 + + ``` + +* If you use **Gradle**: + + ``` + repositories { + mavenCentral() + } + //... + dependencies { + implementation 'redis.clients:jedis:5.1.2' + //... + } + ``` + +* If you use the JAR files, download the latest Jedis and Apache Commons Pool2 JAR files from [Maven Central](https://central.sonatype.com/) or any other Maven repository. + +* Build from [source](https://github.com/redis/jedis) + +### Connect + +For many applications, it's best to use a connection pool. You can instantiate and use a `Jedis` connection pool like so: + +```java +package org.example; +import redis.clients.jedis.Jedis; +import redis.clients.jedis.JedisPool; + +public class Main { + public static void main(String[] args) { + JedisPool pool = new JedisPool("localhost", 6379); + + try (Jedis jedis = pool.getResource()) { + // Store & Retrieve a simple string + jedis.set("foo", "bar"); + System.out.println(jedis.get("foo")); // prints bar + + // Store & Retrieve a HashMap + Map hash = new HashMap<>();; + hash.put("name", "John"); + hash.put("surname", "Smith"); + hash.put("company", "Redis"); + hash.put("age", "29"); + jedis.hset("user-session:123", hash); + System.out.println(jedis.hgetAll("user-session:123")); + // Prints: {name=John, surname=Smith, company=Redis, age=29} + } + } +} +``` + +Because adding a `try-with-resources` block for each command can be cumbersome, consider using `JedisPooled` as an easier way to pool connections. + +```java +import redis.clients.jedis.JedisPooled; + +//... + +JedisPooled jedis = new JedisPooled("localhost", 6379); +jedis.set("foo", "bar"); +System.out.println(jedis.get("foo")); // prints "bar" +``` + +#### Connect to a Redis cluster + +To connect to a Redis cluster, use `JedisCluster`. + +```java +import redis.clients.jedis.JedisCluster; +import redis.clients.jedis.HostAndPort; + +//... + +Set jedisClusterNodes = new HashSet(); +jedisClusterNodes.add(new HostAndPort("127.0.0.1", 7379)); +jedisClusterNodes.add(new HostAndPort("127.0.0.1", 7380)); +JedisCluster jedis = new JedisCluster(jedisClusterNodes); +``` + +#### Connect to your production Redis with TLS + +When you deploy your application, use TLS and follow the [Redis security](/docs/management/security/) guidelines. + +Before connecting your application to the TLS-enabled Redis server, ensure that your certificates and private keys are in the correct format. + +To convert user certificate and private key from the PEM format to `pkcs12`, use this command: + +``` +openssl pkcs12 -export -in ./redis_user.crt -inkey ./redis_user_private.key -out redis-user-keystore.p12 -name "redis" +``` + +Enter password to protect your `pkcs12` file. + +Convert the server (CA) certificate to the JKS format using the [keytool](https://docs.oracle.com/en/java/javase/12/tools/keytool.html) shipped with JDK. + +``` +keytool -importcert -keystore truststore.jks \ + -storepass REPLACE_WITH_YOUR_PASSWORD \ + -file redis_ca.pem +``` + +Establish a secure connection with your Redis database using this snippet. + +```java +package org.example; + +import redis.clients.jedis.*; + +import javax.net.ssl.*; +import java.io.FileInputStream; +import java.io.IOException; +import java.security.GeneralSecurityException; +import java.security.KeyStore; + +public class Main { + + public static void main(String[] args) throws GeneralSecurityException, IOException { + HostAndPort address = new HostAndPort("my-redis-instance.cloud.redislabs.com", 6379); + + SSLSocketFactory sslFactory = createSslSocketFactory( + "./truststore.jks", + "secret!", // use the password you specified for keytool command + "./redis-user-keystore.p12", + "secret!" // use the password you specified for openssl command + ); + + JedisClientConfig config = DefaultJedisClientConfig.builder() + .ssl(true).sslSocketFactory(sslFactory) + .user("default") // use your Redis user. More info https://redis.io/docs/management/security/acl/ + .password("secret!") // use your Redis password + .build(); + + JedisPooled jedis = new JedisPooled(address, config); + jedis.set("foo", "bar"); + System.out.println(jedis.get("foo")); // prints bar + } + + private static SSLSocketFactory createSslSocketFactory( + String caCertPath, String caCertPassword, String userCertPath, String userCertPassword) + throws IOException, GeneralSecurityException { + + KeyStore keyStore = KeyStore.getInstance("pkcs12"); + keyStore.load(new FileInputStream(userCertPath), userCertPassword.toCharArray()); + + KeyStore trustStore = KeyStore.getInstance("jks"); + trustStore.load(new FileInputStream(caCertPath), caCertPassword.toCharArray()); + + TrustManagerFactory trustManagerFactory = TrustManagerFactory.getInstance("X509"); + trustManagerFactory.init(trustStore); + + KeyManagerFactory keyManagerFactory = KeyManagerFactory.getInstance("PKIX"); + keyManagerFactory.init(keyStore, userCertPassword.toCharArray()); + + SSLContext sslContext = SSLContext.getInstance("TLS"); + sslContext.init(keyManagerFactory.getKeyManagers(), trustManagerFactory.getTrustManagers(), null); + + return sslContext.getSocketFactory(); + } +} +``` + +### Production usage + +### Configuring Connection pool +As mentioned in the previous section, use `JedisPool` or `JedisPooled` to create a connection pool. +`JedisPooled`, added in Jedis version 4.0.0, provides capabilities similar to `JedisPool` but with a more straightforward API. +A connection pool holds a specified number of connections, creates more connections when necessary, and terminates them when they are no longer needed. + +Here is a simplified connection lifecycle in a pool: + +1. A connection is requested from the pool. +2. A connection is served: + - An idle connection is served when non-active connections are available, or + - A new connection is created when the number of connections is under `maxTotal`. +3. The connection becomes active. +4. The connection is released back to the pool. +5. The connection is marked as stale. +6. The connection is kept idle for `minEvictableIdleTime`. +7. The connection becomes evictable if the number of connections is greater than `minIdle`. +8. The connection is ready to be closed. + +It's important to configure the connection pool correctly. +Use `GenericObjectPoolConfig` from [Apache Commons Pool2](https://commons.apache.org/proper/commons-pool/apidocs/org/apache/commons/pool2/impl/GenericObjectPoolConfig.html). + +```java +ConnectionPoolConfig poolConfig = new ConnectionPoolConfig(); +// maximum active connections in the pool, +// tune this according to your needs and application type +// default is 8 +poolConfig.setMaxTotal(8); + +// maximum idle connections in the pool, default is 8 +poolConfig.setMaxIdle(8); +// minimum idle connections in the pool, default 0 +poolConfig.setMinIdle(0); + +// Enables waiting for a connection to become available. +poolConfig.setBlockWhenExhausted(true); +// The maximum number of seconds to wait for a connection to become available +poolConfig.setMaxWait(Duration.ofSeconds(1)); + +// Enables sending a PING command periodically while the connection is idle. +poolConfig.setTestWhileIdle(true); +// controls the period between checks for idle connections in the pool +poolConfig.setTimeBetweenEvictionRuns(Duration.ofSeconds(1)); + +// JedisPooled does all hard work on fetching and releasing connection to the pool +// to prevent connection starvation +JedisPooled jedis = new JedisPooled(poolConfig, "localhost", 6379); +``` + +### Timeout + +To set a timeout for a connection, use the `JedisPooled` or `JedisPool` constructor with the `timeout` parameter, or use `JedisClientConfig` with the `socketTimeout` and `connectionTimeout` parameters: + +```java +HostAndPort hostAndPort = new HostAndPort("localhost", 6379); + +JedisPooled jedisWithTimeout = new JedisPooled(hostAndPort, + DefaultJedisClientConfig.builder() + .socketTimeoutMillis(5000) // set timeout to 5 seconds + .connectionTimeoutMillis(5000) // set connection timeout to 5 seconds + .build(), + poolConfig +); +``` + +### Exception handling +The Jedis Exception Hierarchy is rooted on `JedisException`, which implements `RuntimeException`, and are therefore all unchecked exceptions. + +``` +JedisException +├── JedisDataException +│ ├── JedisRedirectionException +│ │ ├── JedisMovedDataException +│ │ └── JedisAskDataException +│ ├── AbortedTransactionException +│ ├── JedisAccessControlException +│ └── JedisNoScriptException +├── JedisClusterException +│ ├── JedisClusterOperationException +│ ├── JedisConnectionException +│ └── JedisValidationException +└── InvalidURIException +``` + +#### General Exceptions +In general, Jedis can throw the following exceptions while executing commands: + +- `JedisConnectionException` - when the connection to Redis is lost or closed unexpectedly. Configure failover to handle this exception automatically with Resilience4J and the built-in Jedis failover mechanism. +- `JedisAccessControlException` - when the user does not have the permission to execute the command or the user ID and/or password are incorrect. +- `JedisDataException` - when there is a problem with the data being sent to or received from the Redis server. Usually, the error message will contain more information about the failed command. +- `JedisException` - this exception is a catch-all exception that can be thrown for any other unexpected errors. + +Conditions when `JedisException` can be thrown: +- Bad return from a health check with the `PING` command +- Failure during SHUTDOWN +- Pub/Sub failure when issuing commands (disconnect) +- Any unknown server messages +- Sentinel: can connect to sentinel but master is not monitored or all Sentinels are down. +- MULTI or DISCARD command failed +- Shard commands key hash check failed or no Reachable Shards +- Retry deadline exceeded/number of attempts (Retry Command Executor) +- POOL - pool exhausted, error adding idle objects, returning broken resources to the pool + +All the Jedis exceptions are runtime exceptions and in most cases irrecoverable, so in general bubble up to the API capturing the error message. + +## DNS cache and Redis + +When you connect to a Redis with multiple endpoints, such as [Redis Enterprise Active-Active](https://redis.com/redis-enterprise/technology/active-active-geo-distribution/), it's recommended to disable the JVM's DNS cache to load-balance requests across multiple endpoints. + +You can do this in your application's code with the following snippet: +```java +java.security.Security.setProperty("networkaddress.cache.ttl","0"); +java.security.Security.setProperty("networkaddress.cache.negative.ttl", "0"); +``` + +### Learn more + +* [Jedis API reference](https://www.javadoc.io/doc/redis.clients/jedis/latest/index.html) +* [Failover with Jedis](https://github.com/redis/jedis/blob/master/docs/failover.md) +* [GitHub](https://github.com/redis/jedis) diff --git a/docs/connect/clients/java/lettuce.md b/docs/connect/clients/java/lettuce.md new file mode 100644 index 0000000000..47b182c5c6 --- /dev/null +++ b/docs/connect/clients/java/lettuce.md @@ -0,0 +1,246 @@ +--- +title: "Lettuce guide" +linkTitle: "Lettuce" +description: Connect your Lettuce application to a Redis database +weight: 2 +--- + +Install Redis and the Redis client, then connect your Lettuce application to a Redis database. + +## Lettuce + +Lettuce offers a powerful and efficient way to interact with Redis through its asynchronous and reactive APIs. By leveraging these capabilities, you can build high-performance, scalable Java applications that make optimal use of Redis's capabilities. + +## Install + +To include Lettuce as a dependency in your application, edit the appropriate dependency file as shown below. + +If you use Maven, add the following dependency to your `pom.xml`: + +```xml + + io.lettuce + lettuce-core + 6.3.2.RELEASE + +``` + +If you use Gradle, include this line in your `build.gradle` file: + +``` +dependencies { + compile 'io.lettuce:lettuce-core:6.3.2.RELEASE +} +``` + +If you wish to use the JAR files directly, download the latest Lettuce and, optionally, Apache Commons Pool2 JAR files from Maven Central or any other Maven repository. + +To build from source, see the instructions on the [Lettuce source code GitHub repo](https://github.com/lettuce-io/lettuce-core). + +## Connect + +Start by creating a connection to your Redis server. There are many ways to achieve this using Lettuce. Here are a few. + +### Asynchronous connection + +```java +package org.example; +import java.util.*; +import java.util.concurrent.ExecutionException; + +import io.lettuce.core.*; +import io.lettuce.core.api.async.RedisAsyncCommands; +import io.lettuce.core.api.StatefulRedisConnection; + +public class Async { + public static void main(String[] args) { + RedisClient redisClient = RedisClient.create("redis://localhost:6379"); + + try (StatefulRedisConnection connection = redisClient.connect()) { + RedisAsyncCommands asyncCommands = connection.async(); + + // Asynchronously store & retrieve a simple string + asyncCommands.set("foo", "bar").get(); + System.out.println(asyncCommands.get("foo").get()); // prints bar + + // Asynchronously store key-value pairs in a hash directly + Map hash = new HashMap<>(); + hash.put("name", "John"); + hash.put("surname", "Smith"); + hash.put("company", "Redis"); + hash.put("age", "29"); + asyncCommands.hset("user-session:123", hash).get(); + + System.out.println(asyncCommands.hgetall("user-session:123").get()); + // Prints: {name=John, surname=Smith, company=Redis, age=29} + } catch (ExecutionException | InterruptedException e) { + throw new RuntimeException(e); + } finally { + redisClient.shutdown(); + } + } +} +``` + +Learn more about asynchronous Lettuce API in [the reference guide](https://lettuce.io/core/release/reference/index.html#asynchronous-api). + +### Reactive connection + +```java +package org.example; +import java.util.*; +import io.lettuce.core.*; +import io.lettuce.core.api.reactive.RedisReactiveCommands; +import io.lettuce.core.api.StatefulRedisConnection; + +public class Main { + public static void main(String[] args) { + RedisClient redisClient = RedisClient.create("redis://localhost:6379"); + + try (StatefulRedisConnection connection = redisClient.connect()) { + RedisReactiveCommands reactiveCommands = connection.reactive(); + + // Reactively store & retrieve a simple string + reactiveCommands.set("foo", "bar").block(); + reactiveCommands.get("foo").doOnNext(System.out::println).block(); // prints bar + + // Reactively store key-value pairs in a hash directly + Map hash = new HashMap<>(); + hash.put("name", "John"); + hash.put("surname", "Smith"); + hash.put("company", "Redis"); + hash.put("age", "29"); + + reactiveCommands.hset("user-session:124", hash).then( + reactiveCommands.hgetall("user-session:124") + .collectMap(KeyValue::getKey, KeyValue::getValue).doOnNext(System.out::println)) + .block(); + // Prints: {surname=Smith, name=John, company=Redis, age=29} + + } finally { + redisClient.shutdown(); + } + } +} +``` + +Learn more about reactive Lettuce API in [the reference guide](https://lettuce.io/core/release/reference/index.html#reactive-api). + +### Redis Cluster connection + +```java +import io.lettuce.core.RedisURI; +import io.lettuce.core.cluster.RedisClusterClient; +import io.lettuce.core.cluster.api.StatefulRedisClusterConnection; +import io.lettuce.core.cluster.api.async.RedisAdvancedClusterAsyncCommands; + +// ... + +RedisURI redisUri = RedisURI.Builder.redis("localhost").withPassword("authentication").build(); + +RedisClusterClient clusterClient = RedisClusterClient.create(redisUri); +StatefulRedisClusterConnection connection = clusterClient.connect(); +RedisAdvancedClusterAsyncCommands commands = connection.async(); + +// ... + +connection.close(); +clusterClient.shutdown(); +``` + +### TLS connection + +When you deploy your application, use TLS and follow the [Redis security guidelines](/docs/management/security/). + +```java +RedisURI redisUri = RedisURI.Builder.redis("localhost") + .withSsl(true) + .withPassword("secret!") // use your Redis password + .build(); + +RedisClient client = RedisClient.create(redisUri); +``` + + + +## Connection Management in Lettuce + +Lettuce uses `ClientResources` for efficient management of shared resources like event loop groups and thread pools. +For connection pooling, Lettuce leverages `RedisClient` or `RedisClusterClient`, which can handle multiple concurrent connections efficiently. + +A typical approach with Lettuce is to create a single `RedisClient` instance and reuse it to establish connections to your Redis server(s). +These connections are multiplexed; that is, multiple commands can be run concurrently over a single or a small set of connections, making explicit pooling less critical. + +Lettuce provides pool config to be used with Lettuce asynchronous connection methods. + + +```java +package org.example; +import io.lettuce.core.RedisClient; +import io.lettuce.core.RedisURI; +import io.lettuce.core.TransactionResult; +import io.lettuce.core.api.StatefulRedisConnection; +import io.lettuce.core.api.async.RedisAsyncCommands; +import io.lettuce.core.codec.StringCodec; +import io.lettuce.core.support.*; + +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CompletionStage; + +public class Pool { + public static void main(String[] args) { + RedisClient client = RedisClient.create(); + + String host = "localhost"; + int port = 6379; + + CompletionStage>> poolFuture + = AsyncConnectionPoolSupport.createBoundedObjectPoolAsync( + () -> client.connectAsync(StringCodec.UTF8, RedisURI.create(host, port)), + BoundedPoolConfig.create()); + + // await poolFuture initialization to avoid NoSuchElementException: Pool exhausted when starting your application + AsyncPool> pool = poolFuture.toCompletableFuture() + .join(); + + // execute work + CompletableFuture transactionResult = pool.acquire() + .thenCompose(connection -> { + + RedisAsyncCommands async = connection.async(); + + async.multi(); + async.set("key", "value"); + async.set("key2", "value2"); + System.out.println("Executed commands in pipeline"); + return async.exec().whenComplete((s, throwable) -> pool.release(connection)); + }); + transactionResult.join(); + + // terminating + pool.closeAsync(); + + // after pool completion + client.shutdownAsync(); + } +} +``` + +In this setup, `LettuceConnectionFactory` is a custom class you would need to implement, adhering to Apache Commons Pool's `PooledObjectFactory` interface, to manage lifecycle events of pooled `StatefulRedisConnection` objects. + +## DNS cache and Redis + +When you connect to a Redis database with multiple endpoints, such as Redis Enterprise Active-Active, it's recommended to disable the JVM's DNS cache to load-balance requests across multiple endpoints. + +You can do this in your application's code with the following snippet: + +```java +java.security.Security.setProperty("networkaddress.cache.ttl","0"); +java.security.Security.setProperty("networkaddress.cache.negative.ttl", "0"); +``` + +## Learn more + +- [Lettuce reference documentation](https://lettuce.io/docs/) +- [Redis commands](https://redis.io/commands) +- [Project Reactor](https://projectreactor.io/) \ No newline at end of file diff --git a/docs/connect/clients/nodejs.md b/docs/connect/clients/nodejs.md new file mode 100644 index 0000000000..4c7aa13c0f --- /dev/null +++ b/docs/connect/clients/nodejs.md @@ -0,0 +1,214 @@ +--- +title: "Node.js guide" +linkTitle: "Node.js" +description: Connect your Node.js application to a Redis database +weight: 4 +aliases: + - /docs/clients/nodejs/ + - /docs/redis-clients/nodejs/ +--- + +Install Redis and the Redis client, then connect your Node.js application to a Redis database. + +## node-redis + +[node-redis](https://github.com/redis/node-redis) is a modern, high-performance Redis client for Node.js. +`node-redis` requires a running Redis or [Redis Stack](https://redis.io/docs/getting-started/install-stack/) server. See [Getting started](/docs/getting-started/) for Redis installation instructions. + +### Install + +To install node-redis, run: + +``` +npm install redis +``` + +### Connect + +Connect to localhost on port 6379. + +```js +import { createClient } from 'redis'; + +const client = createClient(); + +client.on('error', err => console.log('Redis Client Error', err)); + +await client.connect(); +``` + +Store and retrieve a simple string. + +```js +await client.set('key', 'value'); +const value = await client.get('key'); +``` + +Store and retrieve a map. + +```js +await client.hSet('user-session:123', { + name: 'John', + surname: 'Smith', + company: 'Redis', + age: 29 +}) + +let userSession = await client.hGetAll('user-session:123'); +console.log(JSON.stringify(userSession, null, 2)); +/* +{ + "surname": "Smith", + "name": "John", + "company": "Redis", + "age": "29" +} + */ +``` + +To connect to a different host or port, use a connection string in the format `redis[s]://[[username][:password]@][host][:port][/db-number]`: + +```js +createClient({ + url: 'redis://alice:foobared@awesome.redis.server:6380' +}); +``` +To check if the client is connected and ready to send commands, use `client.isReady`, which returns a Boolean. `client.isOpen` is also available. This returns `true` when the client's underlying socket is open, and `false` when it isn't (for example, when the client is still connecting or reconnecting after a network error). + +#### Connect to a Redis cluster + +To connect to a Redis cluster, use `createCluster`. + +```js +import { createCluster } from 'redis'; + +const cluster = createCluster({ + rootNodes: [ + { + url: 'redis://127.0.0.1:16379' + }, + { + url: 'redis://127.0.0.1:16380' + }, + // ... + ] +}); + +cluster.on('error', (err) => console.log('Redis Cluster Error', err)); + +await cluster.connect(); + +await cluster.set('foo', 'bar'); +const value = await cluster.get('foo'); +console.log(value); // returns 'bar' + +await cluster.quit(); +``` + +#### Connect to your production Redis with TLS + +When you deploy your application, use TLS and follow the [Redis security](/docs/management/security/) guidelines. + +```js +const client = createClient({ + username: 'default', // use your Redis user. More info https://redis.io/docs/management/security/acl/ + password: 'secret', // use your password here + socket: { + host: 'my-redis.cloud.redislabs.com', + port: 6379, + tls: true, + key: readFileSync('./redis_user_private.key'), + cert: readFileSync('./redis_user.crt'), + ca: [readFileSync('./redis_ca.pem')] + } +}); + +client.on('error', (err) => console.log('Redis Client Error', err)); + +await client.connect(); + +await client.set('foo', 'bar'); +const value = await client.get('foo'); +console.log(value) // returns 'bar' + +await client.disconnect(); +``` + +You can also use discrete parameters and UNIX sockets. Details can be found in the [client configuration guide](https://github.com/redis/node-redis/blob/master/docs/client-configuration.md). + +### Production usage + +#### Handling errors +Node-Redis provides [multiple events to handle various scenarios](https://github.com/redis/node-redis?tab=readme-ov-file#events), among which the most critical is the `error` event. + +This event is triggered whenever an error occurs within the client. + +It is crucial to listen for error events. + + +If a client does not register at least one error listener and an error occurs, the system will throw that error, potentially causing the Node.js process to exit unexpectedly. +See [the EventEmitter docs](https://nodejs.org/api/events.html#events_error_events) for more details. + +```typescript +const client = createClient({ + // ... client options +}); +// Always ensure there's a listener for errors in the client to prevent process crashes due to unhandled errors +client.on('error', error => { + console.error(`Redis client error:`, error); +}); +``` + + +#### Handling reconnections + +If network issues or other problems unexpectedly close the socket, the client will reject all commands already sent, since the server might have already executed them. +The rest of the pending commands will remain queued in memory until a new socket is established. +This behaviour is controlled by the `enableOfflineQueue` option, which is enabled by default. + +The client uses `reconnectStrategy` to decide when to attempt to reconnect. +The default strategy is to calculate the delay before each attempt based on the attempt number `Math.min(retries * 50, 500)`. You can customize this strategy by passing a supported value to `reconnectStrategy` option: + + +1. Define a callback `(retries: number, cause: Error) => false | number | Error` **(recommended)** +```typescript +const client = createClient({ + socket: { + reconnectStrategy: function(retries) { + if (retries > 20) { + console.log("Too many attempts to reconnect. Redis connection was terminated"); + return new Error("Too many retries."); + } else { + return retries * 500; + } + } + } +}); +client.on('error', error => console.error('Redis client error:', error)); +``` +In the provided reconnection strategy callback, the client attempts to reconnect up to 20 times with a delay of `retries * 500` milliseconds between attempts. +After approximately two minutes, the client logs an error message and terminates the connection if the maximum retry limit is exceeded. + + +2. Use a numerical value to set a fixed delay in milliseconds. +3. Use `false` to disable reconnection attempts. This option should only be used for testing purposes. + +#### Timeout + +To set a timeout for a connection, use the `connectTimeout` option: +```typescript +const client = createClient({ + // setting a 10-second timeout + connectTimeout: 10000 // in milliseconds +}); +client.on('error', error => console.error('Redis client error:', error)); +``` + +### Learn more + +* [Node-Redis Configuration Options](https://github.com/redis/node-redis/blob/master/docs/client-configuration.md) +* [Redis commands](https://redis.js.org/#node-redis-usage-redis-commands) +* [Programmability](https://redis.js.org/#node-redis-usage-programmability) +* [Clustering](https://redis.js.org/#node-redis-usage-clustering) +* [GitHub](https://github.com/redis/node-redis) + diff --git a/docs/connect/clients/python.md b/docs/connect/clients/python.md new file mode 100644 index 0000000000..7c0e5e87aa --- /dev/null +++ b/docs/connect/clients/python.md @@ -0,0 +1,218 @@ +--- +title: "Python guide" +linkTitle: "Python" +description: Connect your Python application to a Redis database +weight: 5 +aliases: + - /docs/clients/python/ + - /docs/redis-clients/python/ +--- + +Install Redis and the Redis client, then connect your Python application to a Redis database. + +## redis-py + +Get started with the [redis-py](https://github.com/redis/redis-py) client for Redis. + +`redis-py` requires a running Redis or [Redis Stack](/docs/getting-started/install-stack/) server. See [Getting started](/docs/getting-started/) for Redis installation instructions. + +### Install + +To install `redis-py`, enter: + +```bash +pip install redis +``` + +For faster performance, install Redis with [`hiredis`](https://github.com/redis/hiredis) support. This provides a compiled response parser, and for most cases requires zero code changes. By default, if `hiredis` >= 1.0 is available, `redis-py` attempts to use it for response parsing. + +{{% alert title="Note" %}} +The Python `distutils` packaging scheme is no longer part of Python 3.12 and greater. If you're having difficulties getting `redis-py` installed in a Python 3.12 environment, consider updating to a recent release of `redis-py`. +{{% /alert %}} + +```bash +pip install redis[hiredis] +``` + +### Connect + +Connect to localhost on port 6379, set a value in Redis, and retrieve it. All responses are returned as bytes in Python. To receive decoded strings, set `decode_responses=True`. For more connection options, see [these examples](https://redis.readthedocs.io/en/stable/examples.html). + +```python +r = redis.Redis(host='localhost', port=6379, decode_responses=True) +``` + +Store and retrieve a simple string. + +```python +r.set('foo', 'bar') +# True +r.get('foo') +# bar +``` + +Store and retrieve a dict. + +```python +r.hset('user-session:123', mapping={ + 'name': 'John', + "surname": 'Smith', + "company": 'Redis', + "age": 29 +}) +# True + +r.hgetall('user-session:123') +# {'surname': 'Smith', 'name': 'John', 'company': 'Redis', 'age': '29'} +``` + +#### Connect to a Redis cluster + +To connect to a Redis cluster, use `RedisCluster`. + +```python +from redis.cluster import RedisCluster + +rc = RedisCluster(host='localhost', port=16379) + +print(rc.get_nodes()) +# [[host=127.0.0.1,port=16379,name=127.0.0.1:16379,server_type=primary,redis_connection=Redis>>], ... + +rc.set('foo', 'bar') +# True + +rc.get('foo') +# b'bar' +``` +For more information, see [redis-py Clustering](https://redis-py.readthedocs.io/en/stable/clustering.html). + +#### Connect to your production Redis with TLS + +When you deploy your application, use TLS and follow the [Redis security](/docs/management/security/) guidelines. + +```python +import redis + +r = redis.Redis( + host="my-redis.cloud.redislabs.com", port=6379, + username="default", # use your Redis user. More info https://redis.io/docs/management/security/acl/ + password="secret", # use your Redis password + ssl=True, + ssl_certfile="./redis_user.crt", + ssl_keyfile="./redis_user_private.key", + ssl_ca_certs="./redis_ca.pem", +) +r.set('foo', 'bar') +# True + +r.get('foo') +# b'bar' +``` +For more information, see [redis-py TLS examples](https://redis-py.readthedocs.io/en/stable/examples/ssl_connection_examples.html). + +### Example: Indexing and querying JSON documents + +Make sure that you have Redis Stack and `redis-py` installed. Import dependencies: + +```python +import redis +from redis.commands.json.path import Path +import redis.commands.search.aggregation as aggregations +import redis.commands.search.reducers as reducers +from redis.commands.search.field import TextField, NumericField, TagField +from redis.commands.search.indexDefinition import IndexDefinition, IndexType +from redis.commands.search.query import NumericFilter, Query +``` + +Connect to your Redis database. + +```python +r = redis.Redis(host='localhost', port=6379) +``` + +Let's create some test data to add to your database. + +```python +user1 = { + "name": "Paul John", + "email": "paul.john@example.com", + "age": 42, + "city": "London" +} +user2 = { + "name": "Eden Zamir", + "email": "eden.zamir@example.com", + "age": 29, + "city": "Tel Aviv" +} +user3 = { + "name": "Paul Zamir", + "email": "paul.zamir@example.com", + "age": 35, + "city": "Tel Aviv" +} +``` + +Define indexed fields and their data types using `schema`. Use JSON path expressions to map specific JSON elements to the schema fields. + +```python +schema = ( + TextField("$.name", as_name="name"), + TagField("$.city", as_name="city"), + NumericField("$.age", as_name="age") +) +``` + +Create an index. In this example, all JSON documents with the key prefix `user:` will be indexed. For more information, see [Query syntax](/docs/interact/search-and-query/query/). + +```python +rs = r.ft("idx:users") +rs.create_index( + schema, + definition=IndexDefinition( + prefix=["user:"], index_type=IndexType.JSON + ) +) +# b'OK' +``` + +Use `JSON.SET` to set each user value at the specified path. + +```python +r.json().set("user:1", Path.root_path(), user1) +r.json().set("user:2", Path.root_path(), user2) +r.json().set("user:3", Path.root_path(), user3) +``` + +Let's find user `Paul` and filter the results by age. + +```python +res = rs.search( + Query("Paul @age:[30 40]") +) +# Result{1 total, docs: [Document {'id': 'user:3', 'payload': None, 'json': '{"name":"Paul Zamir","email":"paul.zamir@example.com","age":35,"city":"Tel Aviv"}'}]} +``` + +Query using JSON Path expressions. + +```python +rs.search( + Query("Paul").return_field("$.city", as_field="city") +).docs +# [Document {'id': 'user:1', 'payload': None, 'city': 'London'}, Document {'id': 'user:3', 'payload': None, 'city': 'Tel Aviv'}] +``` + +Aggregate your results using `FT.AGGREGATE`. + +```python +req = aggregations.AggregateRequest("*").group_by('@city', reducers.count().alias('count')) +print(rs.aggregate(req).rows) +# [[b'city', b'Tel Aviv', b'count', b'2'], [b'city', b'London', b'count', b'1']] +``` + +### Learn more + +* [Command reference](https://redis-py.readthedocs.io/en/stable/commands.html) +* [Tutorials](https://redis.readthedocs.io/en/stable/examples.html) +* [GitHub](https://github.com/redis/redis-py) + diff --git a/docs/data-types/_index.md b/docs/data-types/_index.md new file mode 100644 index 0000000000..ca95521d16 --- /dev/null +++ b/docs/data-types/_index.md @@ -0,0 +1,112 @@ +--- +title: "Understand Redis data types" +linkTitle: "Understand data types" +description: Overview of data types supported by Redis +weight: 35 +aliases: + - /docs/manual/data-types + - /topics/data-types + - /docs/data-types/tutorial +--- + +Redis is a data structure server. +At its core, Redis provides a collection of native data types that help you solve a wide variety of problems, from [caching](/docs/manual/client-side-caching/) to [queuing](/docs/data-types/lists/) to [event processing](/docs/data-types/streams/). +Below is a short description of each data type, with links to broader overviews and command references. + +If you'd like to try a comprehensive tutorial for each data structure, see their overview pages below. + + +## Core + +### Strings + +[Redis strings](/docs/data-types/strings) are the most basic Redis data type, representing a sequence of bytes. +For more information, see: + +* [Overview of Redis strings](/docs/data-types/strings/) +* [Redis string command reference](/commands/?group=string) + +### Lists + +[Redis lists](/docs/data-types/lists) are lists of strings sorted by insertion order. +For more information, see: + +* [Overview of Redis lists](/docs/data-types/lists/) +* [Redis list command reference](/commands/?group=list) + +### Sets + +[Redis sets](/docs/data-types/sets) are unordered collections of unique strings that act like the sets from your favorite programming language (for example, [Java HashSets](https://docs.oracle.com/javase/7/docs/api/java/util/HashSet.html), [Python sets](https://docs.python.org/3.10/library/stdtypes.html#set-types-set-frozenset), and so on). +With a Redis set, you can add, remove, and test for existence in O(1) time (in other words, regardless of the number of set elements). +For more information, see: + +* [Overview of Redis sets](/docs/data-types/sets/) +* [Redis set command reference](/commands/?group=set) + +### Hashes + +[Redis hashes](/docs/data-types/hashes) are record types modeled as collections of field-value pairs. +As such, Redis hashes resemble [Python dictionaries](https://docs.python.org/3/tutorial/datastructures.html#dictionaries), [Java HashMaps](https://docs.oracle.com/javase/8/docs/api/java/util/HashMap.html), and [Ruby hashes](https://ruby-doc.org/core-3.1.2/Hash.html). +For more information, see: + +* [Overview of Redis hashes](/docs/data-types/hashes/) +* [Redis hashes command reference](/commands/?group=hash) + +### Sorted sets + +[Redis sorted sets](/docs/data-types/sorted-sets) are collections of unique strings that maintain order by each string's associated score. +For more information, see: + +* [Overview of Redis sorted sets](/docs/data-types/sorted-sets) +* [Redis sorted set command reference](/commands/?group=sorted-set) + +### Streams + +A [Redis stream](/docs/data-types/streams) is a data structure that acts like an append-only log. +Streams help record events in the order they occur and then syndicate them for processing. +For more information, see: + +* [Overview of Redis Streams](/docs/data-types/streams) +* [Redis Streams command reference](/commands/?group=stream) + +### Geospatial indexes + +[Redis geospatial indexes](/docs/data-types/geospatial) are useful for finding locations within a given geographic radius or bounding box. +For more information, see: + +* [Overview of Redis geospatial indexes](/docs/data-types/geospatial/) +* [Redis geospatial indexes command reference](/commands/?group=geo) + +### Bitmaps + +[Redis bitmaps](/docs/data-types/bitmaps/) let you perform bitwise operations on strings. +For more information, see: + +* [Overview of Redis bitmaps](/docs/data-types/bitmaps/) +* [Redis bitmap command reference](/commands/?group=bitmap) + +### Bitfields + +[Redis bitfields](/docs/data-types/bitfields/) efficiently encode multiple counters in a string value. +Bitfields provide atomic get, set, and increment operations and support different overflow policies. +For more information, see: + +* [Overview of Redis bitfields](/docs/data-types/bitfields/) +* The `BITFIELD` command. + +### HyperLogLog + +The [Redis HyperLogLog](/docs/data-types/hyperloglogs) data structures provide probabilistic estimates of the cardinality (i.e., number of elements) of large sets. For more information, see: + +* [Overview of Redis HyperLogLog](/docs/data-types/hyperloglogs) +* [Redis HyperLogLog command reference](/commands/?group=hyperloglog) + +## Extensions + +To extend the features provided by the included data types, use one of these options: + +1. Write your own custom [server-side functions in Lua](/docs/manual/programmability/). +1. Write your own Redis module using the [modules API](/docs/reference/modules/) or check out the [community-supported modules](/docs/modules/). +1. Use [JSON](/docs/stack/json/), [querying](/docs/stack/search/), [time series](/docs/stack/timeseries/), and other capabilities provided by [Redis Stack](/docs/stack/). + +
diff --git a/docs/data-types/bitfields.md b/docs/data-types/bitfields.md new file mode 100644 index 0000000000..0f693c2277 --- /dev/null +++ b/docs/data-types/bitfields.md @@ -0,0 +1,47 @@ +--- +title: "Redis bitfields" +linkTitle: "Bitfields" +weight: 130 +description: > + Introduction to Redis bitfields +--- + +Redis bitfields let you set, increment, and get integer values of arbitrary bit length. +For example, you can operate on anything from unsigned 1-bit integers to signed 63-bit integers. + +These values are stored using binary-encoded Redis strings. +Bitfields support atomic read, write and increment operations, making them a good choice for managing counters and similar numerical values. + + +## Basic commands + +* `BITFIELD` atomically sets, increments and reads one or more values. +* `BITFIELD_RO` is a read-only variant of `BITFIELD`. + + +## Examples + +## Example + +Suppose you want to maintain two metrics for various bicycles: the current price and the number of owners over time. You can represent these counters with a 32-bit wide bitfield per for each bike. + +* Bike 1 initially costs 1,000 (counter in offset 0) and has never had an owner. After being sold, it's now considered used and the price instantly drops to reflect its new condition, and it now has an owner (offset 1). After quite some time, the bike becomes a classic. The original owner sells it for a profit, so the price goes up and the number of owners does as well.Finally, you can look at the bike's current price and number of owners. + +{{< clients-example bitfield_tutorial bf >}} +> BITFIELD bike:1:stats SET u32 #0 1000 +1) (integer) 0 +> BITFIELD bike:1:stats INCRBY u32 #0 -50 INCRBY u32 #1 1 +1) (integer) 950 +2) (integer) 1 +> BITFIELD bike:1:stats INCRBY u32 #0 500 INCRBY u32 #1 1 +1) (integer) 1450 +2) (integer) 2 +> BITFIELD bike:1:stats GET u32 #0 GET u32 #1 +1) (integer) 1450 +2) (integer) 2 +{{< /clients-example >}} + + +## Performance + +`BITFIELD` is O(n), where _n_ is the number of counters accessed. diff --git a/docs/data-types/bitmaps.md b/docs/data-types/bitmaps.md new file mode 100644 index 0000000000..d71ea742f1 --- /dev/null +++ b/docs/data-types/bitmaps.md @@ -0,0 +1,113 @@ +--- +title: "Redis bitmaps" +linkTitle: "Bitmaps" +weight: 120 +description: > + Introduction to Redis bitmaps +--- + +Bitmaps are not an actual data type, but a set of bit-oriented operations +defined on the String type which is treated like a bit vector. +Since strings are binary safe blobs and their maximum length is 512 MB, +they are suitable to set up to 2^32 different bits. + +You can perform bitwise operations on one or more strings. +Some examples of bitmap use cases include: + +* Efficient set representations for cases where the members of a set correspond to the integers 0-N. +* Object permissions, where each bit represents a particular permission, similar to the way that file systems store permissions. + +## Basic commands + +* `SETBIT` sets a bit at the provided offset to 0 or 1. +* `GETBIT` returns the value of a bit at a given offset. + +See the [complete list of bitmap commands](https://redis.io/commands/?group=bitmap). + + +## Example + +Suppose you have 1000 cyclists racing through the country-side, with sensors on their bikes labeled 0-999. +You want to quickly determine whether a given sensor has pinged a tracking server within the hour to check in on a rider. + +You can represent this scenario using a bitmap whose key references the current hour. + +* Rider 123 pings the server on January 1, 2024 within the 00:00 hour. You can then confirm that rider 123 pinged the server. You can also check to see if rider 456 has pinged the server for that same hour. + +{{< clients-example bitmap_tutorial ping >}} +> SETBIT pings:2024-01-01-00:00 123 1 +(integer) 0 +> GETBIT pings:2024-01-01-00:00 123 +1 +> GETBIT pings:2024-01-01-00:00 456 +0 +{{< /clients-example >}} + + +## Bit Operations + +Bit operations are divided into two groups: constant-time single bit +operations, like setting a bit to 1 or 0, or getting its value, and +operations on groups of bits, for example counting the number of set +bits in a given range of bits (e.g., population counting). + +One of the biggest advantages of bitmaps is that they often provide +extreme space savings when storing information. For example in a system +where different users are represented by incremental user IDs, it is possible +to remember a single bit information (for example, knowing whether +a user wants to receive a newsletter) of 4 billion users using just 512 MB of memory. + +The `SETBIT` command takes as its first argument the bit number, and as its second +argument the value to set the bit to, which is 1 or 0. The command +automatically enlarges the string if the addressed bit is outside the +current string length. + +`GETBIT` just returns the value of the bit at the specified index. +Out of range bits (addressing a bit that is outside the length of the string +stored into the target key) are always considered to be zero. + +There are three commands operating on group of bits: + +1. `BITOP` performs bit-wise operations between different strings. The provided operations are AND, OR, XOR and NOT. +2. `BITCOUNT` performs population counting, reporting the number of bits set to 1. +3. `BITPOS` finds the first bit having the specified value of 0 or 1. + +Both `BITPOS` and `BITCOUNT` are able to operate with byte ranges of the +string, instead of running for the whole length of the string. We can trivially see the number of bits that have been set in a bitmap. + +{{< clients-example bitmap_tutorial bitcount >}} +> BITCOUNT pings:2024-01-01-00:00 +(integer) 1 +{{< /clients-example >}} + +For example imagine you want to know the longest streak of daily visits of +your web site users. You start counting days starting from zero, that is the +day you made your web site public, and set a bit with `SETBIT` every time +the user visits the web site. As a bit index you simply take the current unix +time, subtract the initial offset, and divide by the number of seconds in a day +(normally, 3600\*24). + +This way for each user you have a small string containing the visit +information for each day. With `BITCOUNT` it is possible to easily get +the number of days a given user visited the web site, while with +a few `BITPOS` calls, or simply fetching and analyzing the bitmap client-side, +it is possible to easily compute the longest streak. + +Bitmaps are trivial to split into multiple keys, for example for +the sake of sharding the data set and because in general it is better to +avoid working with huge keys. To split a bitmap across different keys +instead of setting all the bits into a key, a trivial strategy is just +to store M bits per key and obtain the key name with `bit-number/M` and +the Nth bit to address inside the key with `bit-number MOD M`. + + + +## Performance + +`SETBIT` and `GETBIT` are O(1). +`BITOP` is O(n), where _n_ is the length of the longest string in the comparison. + +## Learn more + +* [Redis Bitmaps Explained](https://www.youtube.com/watch?v=oj8LdJQjhJo) teaches you how to use bitmaps for map exploration in an online game. +* [Redis University's RU101](https://university.redis.com/courses/ru101/) covers Redis bitmaps in detail. diff --git a/docs/data-types/geospatial.md b/docs/data-types/geospatial.md new file mode 100644 index 0000000000..1f87de74c3 --- /dev/null +++ b/docs/data-types/geospatial.md @@ -0,0 +1,48 @@ +--- +title: "Redis geospatial" +linkTitle: "Geospatial" +weight: 80 +description: > + Introduction to the Redis Geospatial data type +--- + +Redis geospatial indexes let you store coordinates and search for them. +This data structure is useful for finding nearby points within a given radius or bounding box. + +## Basic commands + +* `GEOADD` adds a location to a given geospatial index (note that longitude comes before latitude with this command). +* `GEOSEARCH` returns locations with a given radius or a bounding box. + +See the [complete list of geospatial index commands](https://redis.io/commands/?group=geo). + + +## Examples + +Suppose you're building a mobile app that lets you find all of the bike rental stations closest to your current location. + +Add several locations to a geospatial index: +{{< clients-example geo_tutorial geoadd >}} +> GEOADD bikes:rentable -122.27652 37.805186 station:1 +(integer) 1 +> GEOADD bikes:rentable -122.2674626 37.8062344 station:2 +(integer) 1 +> GEOADD bikes:rentable -122.2469854 37.8104049 station:3 +(integer) 1 +{{< /clients-example >}} + +Find all locations within a 5 kilometer radius of a given location, and return the distance to each location: +{{< clients-example geo_tutorial geosearch >}} +> GEOSEARCH bikes:rentable FROMLONLAT -122.2612767 37.7936847 BYRADIUS 5 km WITHDIST +1) 1) "station:1" + 2) "1.8523" +2) 1) "station:2" + 2) "1.4979" +3) 1) "station:3" + 2) "2.2441" +{{< /clients-example >}} + +## Learn more + +* [Redis Geospatial Explained](https://www.youtube.com/watch?v=qftiVQraxmI) introduces geospatial indexes by showing you how to build a map of local park attractions. +* [Redis University's RU101](https://university.redis.com/courses/ru101/) covers Redis geospatial indexes in detail. diff --git a/docs/data-types/hashes.md b/docs/data-types/hashes.md new file mode 100644 index 0000000000..68f9825e9d --- /dev/null +++ b/docs/data-types/hashes.md @@ -0,0 +1,106 @@ +--- +title: "Redis hashes" +linkTitle: "Hashes" +weight: 40 +description: > + Introduction to Redis hashes +--- + +Redis hashes are record types structured as collections of field-value pairs. +You can use hashes to represent basic objects and to store groupings of counters, among other things. + +{{< clients-example hash_tutorial set_get_all >}} +> HSET bike:1 model Deimos brand Ergonom type 'Enduro bikes' price 4972 +(integer) 4 +> HGET bike:1 model +"Deimos" +> HGET bike:1 price +"4972" +> HGETALL bike:1 +1) "model" +2) "Deimos" +3) "brand" +4) "Ergonom" +5) "type" +6) "Enduro bikes" +7) "price" +8) "4972" + +{{< /clients-example >}} + +While hashes are handy to represent *objects*, actually the number of fields you can +put inside a hash has no practical limits (other than available memory), so you can use +hashes in many different ways inside your application. + +The command `HSET` sets multiple fields of the hash, while `HGET` retrieves +a single field. `HMGET` is similar to `HGET` but returns an array of values: + +{{< clients-example hash_tutorial hmget >}} +> HMGET bike:1 model price no-such-field +1) "Deimos" +2) "4972" +3) (nil) +{{< /clients-example >}} + +There are commands that are able to perform operations on individual fields +as well, like `HINCRBY`: + +{{< clients-example hash_tutorial hincrby >}} +> HINCRBY bike:1 price 100 +(integer) 5072 +> HINCRBY bike:1 price -100 +(integer) 4972 +{{< /clients-example >}} + +You can find the [full list of hash commands in the documentation](https://redis.io/commands#hash). + +It is worth noting that small hashes (i.e., a few elements with small values) are +encoded in special way in memory that make them very memory efficient. + +## Basic commands + +* `HSET` sets the value of one or more fields on a hash. +* `HGET` returns the value at a given field. +* `HMGET` returns the values at one or more given fields. +* `HINCRBY` increments the value at a given field by the integer provided. + +See the [complete list of hash commands](https://redis.io/commands/?group=hash). + + +## Examples + +* Store counters for the number of times bike:1 has been ridden, has crashed, or has changed owners: +{{< clients-example hash_tutorial incrby_get_mget >}} +> HINCRBY bike:1:stats rides 1 +(integer) 1 +> HINCRBY bike:1:stats rides 1 +(integer) 2 +> HINCRBY bike:1:stats rides 1 +(integer) 3 +> HINCRBY bike:1:stats crashes 1 +(integer) 1 +> HINCRBY bike:1:stats owners 1 +(integer) 1 +> HGET bike:1:stats rides +"3" +> HMGET bike:1:stats owners crashes +1) "1" +2) "1" +{{< /clients-example >}} + + +## Performance + +Most Redis hash commands are O(1). + +A few commands - such as `HKEYS`, `HVALS`, and `HGETALL` - are O(n), where _n_ is the number of field-value pairs. + +## Limits + +Every hash can store up to 4,294,967,295 (2^32 - 1) field-value pairs. +In practice, your hashes are limited only by the overall memory on the VMs hosting your Redis deployment. + +## Learn more + +* [Redis Hashes Explained](https://www.youtube.com/watch?v=-KdITaRkQ-U) is a short, comprehensive video explainer covering Redis hashes. +* [Redis University's RU101](https://university.redis.com/courses/ru101/) covers Redis hashes in detail. \ No newline at end of file diff --git a/docs/data-types/lists.md b/docs/data-types/lists.md new file mode 100644 index 0000000000..f2275e2332 --- /dev/null +++ b/docs/data-types/lists.md @@ -0,0 +1,415 @@ +--- +title: "Redis lists" +linkTitle: "Lists" +weight: 20 +description: > + Introduction to Redis lists +--- + +Redis lists are linked lists of string values. +Redis lists are frequently used to: + +* Implement stacks and queues. +* Build queue management for background worker systems. + +## Basic commands + +* `LPUSH` adds a new element to the head of a list; `RPUSH` adds to the tail. +* `LPOP` removes and returns an element from the head of a list; `RPOP` does the same but from the tails of a list. +* `LLEN` returns the length of a list. +* `LMOVE` atomically moves elements from one list to another. +* `LTRIM` reduces a list to the specified range of elements. + +### Blocking commands + +Lists support several blocking commands. +For example: + +* `BLPOP` removes and returns an element from the head of a list. + If the list is empty, the command blocks until an element becomes available or until the specified timeout is reached. +* `BLMOVE` atomically moves elements from a source list to a target list. + If the source list is empty, the command will block until a new element becomes available. + +See the [complete series of list commands](https://redis.io/commands/?group=list). + +## Examples + +* Treat a list like a queue (first in, first out): +{{< clients-example list_tutorial queue >}} +> LPUSH bikes:repairs bike:1 +(integer) 1 +> LPUSH bikes:repairs bike:2 +(integer) 2 +> RPOP bikes:repairs +"bike:1" +> RPOP bikes:repairs +"bike:2" +{{< /clients-example >}} + +* Treat a list like a stack (first in, last out): +{{< clients-example list_tutorial stack >}} +> LPUSH bikes:repairs bike:1 +(integer) 1 +> LPUSH bikes:repairs bike:2 +(integer) 2 +> LPOP bikes:repairs +"bike:2" +> LPOP bikes:repairs +"bike:1" +{{< /clients-example >}} + +* Check the length of a list: +{{< clients-example list_tutorial llen >}} +> LLEN bikes:repairs +(integer) 0 +{{< /clients-example >}} + +* Atomically pop an element from one list and push to another: +{{< clients-example list_tutorial lmove_lrange >}} +> LPUSH bikes:repairs bike:1 +(integer) 1 +> LPUSH bikes:repairs bike:2 +(integer) 2 +> LMOVE bikes:repairs bikes:finished LEFT LEFT +"bike:2" +> LRANGE bikes:repairs 0 -1 +1) "bike:1" +> LRANGE bikes:finished 0 -1 +1) "bike:2" +{{< /clients-example >}} + +* To limit the length of a list you can call `LTRIM`: +{{< clients-example list_tutorial ltrim.1 >}} +> RPUSH bikes:repairs bike:1 bike:2 bike:3 bike:4 bike:5 +(integer) 5 +> LTRIM bikes:repairs 0 2 +OK +> LRANGE bikes:repairs 0 -1 +1) "bike:1" +2) "bike:2" +3) "bike:3" +{{< /clients-example >}} + +### What are Lists? +To explain the List data type it's better to start with a little bit of theory, +as the term *List* is often used in an improper way by information technology +folks. For instance "Python Lists" are not what the name may suggest (Linked +Lists), but rather Arrays (the same data type is called Array in +Ruby actually). + +From a very general point of view a List is just a sequence of ordered +elements: 10,20,1,2,3 is a list. But the properties of a List implemented using +an Array are very different from the properties of a List implemented using a +*Linked List*. + +Redis lists are implemented via Linked Lists. This means that even if you have +millions of elements inside a list, the operation of adding a new element in +the head or in the tail of the list is performed *in constant time*. The speed of adding a +new element with the `LPUSH` command to the head of a list with ten +elements is the same as adding an element to the head of list with 10 +million elements. + +What's the downside? Accessing an element *by index* is very fast in lists +implemented with an Array (constant time indexed access) and not so fast in +lists implemented by linked lists (where the operation requires an amount of +work proportional to the index of the accessed element). + +Redis Lists are implemented with linked lists because for a database system it +is crucial to be able to add elements to a very long list in a very fast way. +Another strong advantage, as you'll see in a moment, is that Redis Lists can be +taken at constant length in constant time. + +When fast access to the middle of a large collection of elements is important, +there is a different data structure that can be used, called sorted sets. +Sorted sets are covered in the [Sorted sets](/docs/data-types/sorted-sets) tutorial page. + +### First steps with Redis Lists + +The `LPUSH` command adds a new element into a list, on the +left (at the head), while the `RPUSH` command adds a new +element into a list, on the right (at the tail). Finally the +`LRANGE` command extracts ranges of elements from lists: + +{{< clients-example list_tutorial lpush_rpush >}} +> RPUSH bikes:repairs bike:1 +(integer) 1 +> RPUSH bikes:repairs bike:2 +(integer) 2 +> LPUSH bikes:repairs bike:important_bike +(integer) 3 +> LRANGE bikes:repairs 0 -1 +1) "bike:important_bike" +2) "bike:1" +3) "bike:2" +{{< /clients-example >}} + +Note that `LRANGE` takes two indexes, the first and the last +element of the range to return. Both the indexes can be negative, telling Redis +to start counting from the end: so -1 is the last element, -2 is the +penultimate element of the list, and so forth. + +As you can see `RPUSH` appended the elements on the right of the list, while +the final `LPUSH` appended the element on the left. + +Both commands are *variadic commands*, meaning that you are free to push +multiple elements into a list in a single call: + +{{< clients-example list_tutorial variadic >}} +> RPUSH bikes:repairs bike:1 bike:2 bike:3 +(integer) 3 +> LPUSH bikes:repairs bike:important_bike bike:very_important_bike +> LRANGE mylist 0 -1 +1) "bike:very_important_bike" +2) "bike:important_bike" +3) "bike:1" +4) "bike:2" +5) "bike:3" +{{< /clients-example >}} + +An important operation defined on Redis lists is the ability to *pop elements*. +Popping elements is the operation of both retrieving the element from the list, +and eliminating it from the list, at the same time. You can pop elements +from left and right, similarly to how you can push elements in both sides +of the list. We'll add three elements and pop three elements, so at the end of this +sequence of commands the list is empty and there are no more elements to +pop: + +{{< clients-example list_tutorial lpop_rpop >}} +> RPUSH bikes:repairs bike:1 bike:2 bike:3 +(integer) 3 +> RPOP bikes:repairs +"bike:3" +> LPOP bikes:repairs +"bike:1" +> RPOP bikes:repairs +"bike:2" +> RPOP bikes:repairs +(nil) +{{< /clients-example >}} + +Redis returned a NULL value to signal that there are no elements in the +list. + +### Common use cases for lists + +Lists are useful for a number of tasks, two very representative use cases +are the following: + +* Remember the latest updates posted by users into a social network. +* Communication between processes, using a consumer-producer pattern where the producer pushes items into a list, and a consumer (usually a *worker*) consumes those items and executes actions. Redis has special list commands to make this use case both more reliable and efficient. + +For example both the popular Ruby libraries [resque](https://github.com/resque/resque) and +[sidekiq](https://github.com/mperham/sidekiq) use Redis lists under the hood in order to +implement background jobs. + +The popular Twitter social network [takes the latest tweets](http://www.infoq.com/presentations/Real-Time-Delivery-Twitter) +posted by users into Redis lists. + +To describe a common use case step by step, imagine your home page shows the latest +photos published in a photo sharing social network and you want to speedup access. + +* Every time a user posts a new photo, we add its ID into a list with `LPUSH`. +* When users visit the home page, we use `LRANGE 0 9` in order to get the latest 10 posted items. + +### Capped lists + +In many use cases we just want to use lists to store the *latest items*, +whatever they are: social network updates, logs, or anything else. + +Redis allows us to use lists as a capped collection, only remembering the latest +N items and discarding all the oldest items using the `LTRIM` command. + +The `LTRIM` command is similar to `LRANGE`, but **instead of displaying the +specified range of elements** it sets this range as the new list value. All +the elements outside the given range are removed. + +For example, if you're adding bikes on the end of a list of repairs, but only +want to worry about the 3 that have been on the list the longest: + +{{< clients-example list_tutorial ltrim >}} +> RPUSH bikes:repairs bike:1 bike:2 bike:3 bike:4 bike:5 +(integer) 5 +> LTRIM bikes:repairs 0 2 +OK +> LRANGE bikes:repairs 0 -1 +1) "bike:1" +2) "bike:2" +3) "bike:3" +{{< /clients-example >}} + +The above `LTRIM` command tells Redis to keep just list elements from index +0 to 2, everything else will be discarded. This allows for a very simple but +useful pattern: doing a List push operation + a List trim operation together +to add a new element and discard elements exceeding a limit. Using +`LTRIM` with negative indexes can then be used to keep only the 3 most recently added: + +{{< clients-example list_tutorial ltrim_end_of_list >}} +> RPUSH bikes:repairs bike:1 bike:2 bike:3 bike:4 bike:5 +(integer) 5 +> LTRIM bikes:repairs -3 -1 +OK +> LRANGE bikes:repairs 0 -1 +1) "bike:3" +2) "bike:4" +3) "bike:5" +{{< /clients-example >}} + +The above combination adds new elements and keeps only the 3 +newest elements into the list. With `LRANGE` you can access the top items +without any need to remember very old data. + +Note: while `LRANGE` is technically an O(N) command, accessing small ranges +towards the head or the tail of the list is a constant time operation. + +Blocking operations on lists +--- + +Lists have a special feature that make them suitable to implement queues, +and in general as a building block for inter process communication systems: +blocking operations. + +Imagine you want to push items into a list with one process, and use +a different process in order to actually do some kind of work with those +items. This is the usual producer / consumer setup, and can be implemented +in the following simple way: + +* To push items into the list, producers call `LPUSH`. +* To extract / process items from the list, consumers call `RPOP`. + +However it is possible that sometimes the list is empty and there is nothing +to process, so `RPOP` just returns NULL. In this case a consumer is forced to wait +some time and retry again with `RPOP`. This is called *polling*, and is not +a good idea in this context because it has several drawbacks: + +1. Forces Redis and clients to process useless commands (all the requests when the list is empty will get no actual work done, they'll just return NULL). +2. Adds a delay to the processing of items, since after a worker receives a NULL, it waits some time. To make the delay smaller, we could wait less between calls to `RPOP`, with the effect of amplifying problem number 1, i.e. more useless calls to Redis. + +So Redis implements commands called `BRPOP` and `BLPOP` which are versions +of `RPOP` and `LPOP` able to block if the list is empty: they'll return to +the caller only when a new element is added to the list, or when a user-specified +timeout is reached. + +This is an example of a `BRPOP` call we could use in the worker: + +{{< clients-example list_tutorial brpop >}} +> RPUSH bikes:repairs bike:1 bike:2 +(integer) 2 +> BRPOP bikes:repairs 1 +1) "bikes:repairs" +2) "bike:2" +> BRPOP bikes:repairs 1 +1) "bikes:repairs" +2) "bike:1" +> BRPOP bikes:repairs 1 +(nil) +(2.01s) +{{< /clients-example >}} + +It means: "wait for elements in the list `bikes:repairs`, but return if after 1 second +no element is available". + +Note that you can use 0 as timeout to wait for elements forever, and you can +also specify multiple lists and not just one, in order to wait on multiple +lists at the same time, and get notified when the first list receives an +element. + +A few things to note about `BRPOP`: + +1. Clients are served in an ordered way: the first client that blocked waiting for a list, is served first when an element is pushed by some other client, and so forth. +2. The return value is different compared to `RPOP`: it is a two-element array since it also includes the name of the key, because `BRPOP` and `BLPOP` are able to block waiting for elements from multiple lists. +3. If the timeout is reached, NULL is returned. + +There are more things you should know about lists and blocking ops. We +suggest that you read more on the following: + +* It is possible to build safer queues or rotating queues using `LMOVE`. +* There is also a blocking variant of the command, called `BLMOVE`. + +## Automatic creation and removal of keys + +So far in our examples we never had to create empty lists before pushing +elements, or removing empty lists when they no longer have elements inside. +It is Redis' responsibility to delete keys when lists are left empty, or to create +an empty list if the key does not exist and we are trying to add elements +to it, for example, with `LPUSH`. + +This is not specific to lists, it applies to all the Redis data types +composed of multiple elements -- Streams, Sets, Sorted Sets and Hashes. + +Basically we can summarize the behavior with three rules: + +1. When we add an element to an aggregate data type, if the target key does not exist, an empty aggregate data type is created before adding the element. +2. When we remove elements from an aggregate data type, if the value remains empty, the key is automatically destroyed. The Stream data type is the only exception to this rule. +3. Calling a read-only command such as `LLEN` (which returns the length of the list), or a write command removing elements, with an empty key, always produces the same result as if the key is holding an empty aggregate type of the type the command expects to find. + +Examples of rule 1: + +{{< clients-example list_tutorial rule_1 >}} +> DEL new_bikes +(integer) 0 +> LPUSH new_bikes bike:1 bike:2 bike:3 +(integer) 3 +{{< /clients-example >}} + +However we can't perform operations against the wrong type if the key exists: + +{{< clients-example list_tutorial rule_1.1 >}} +> SET new_bikes bike:1 +OK +> TYPE new_bikes +string +> LPUSH new_bikes bike:2 bike:3 +(error) WRONGTYPE Operation against a key holding the wrong kind of value +{{< /clients-example >}} + +Example of rule 2: + +{{< clients-example list_tutorial rule_2 >}} +> RPUSH bikes:repairs bike:1 bike:2 bike:3 +(integer) 3 +> EXISTS bikes:repairs +(integer) 1 +> LPOP bikes:repairs +"bike:3" +> LPOP bikes:repairs +"bike:2" +> LPOP bikes:repairs +"bike:1" +> EXISTS bikes:repairs +(integer) 0 +{{< /clients-example >}} + +The key no longer exists after all the elements are popped. + +Example of rule 3: + +{{< clients-example list_tutorial rule_3 >}} +> DEL bikes:repairs +(integer) 0 +> LLEN bikes:repairs +(integer) 0 +> LPOP bikes:repairs +(nil) +{{< /clients-example >}} + + +## Limits + +The max length of a Redis list is 2^32 - 1 (4,294,967,295) elements. + + +## Performance + +List operations that access its head or tail are O(1), which means they're highly efficient. +However, commands that manipulate elements within a list are usually O(n). +Examples of these include `LINDEX`, `LINSERT`, and `LSET`. +Exercise caution when running these commands, mainly when operating on large lists. + +## Alternatives + +Consider [Redis streams](/docs/data-types/streams) as an alternative to lists when you need to store and process an indeterminate series of events. + +## Learn more + +* [Redis Lists Explained](https://www.youtube.com/watch?v=PB5SeOkkxQc) is a short, comprehensive video explainer on Redis lists. +* [Redis University's RU101](https://university.redis.com/courses/ru101/) covers Redis lists in detail. diff --git a/docs/data-types/probabilistic/hyperloglogs.md b/docs/data-types/probabilistic/hyperloglogs.md new file mode 100644 index 0000000000..89bbd11a38 --- /dev/null +++ b/docs/data-types/probabilistic/hyperloglogs.md @@ -0,0 +1,100 @@ +--- +title: "HyperLogLog" +linkTitle: "HyperLogLog" +weight: 1 +description: > + HyperLogLog is a probabilistic data structure that estimates the cardinality of a set. +aliases: + - /docs/data-types/hyperloglogs/ +--- + +HyperLogLog is a probabilistic data structure that estimates the cardinality of a set. As a probabilistic data structure, HyperLogLog trades perfect accuracy for efficient space utilization. + +The Redis HyperLogLog implementation uses up to 12 KB and provides a standard error of 0.81%. + +Counting unique items usually requires an amount of memory +proportional to the number of items you want to count, because you need +to remember the elements you have already seen in the past in order to avoid +counting them multiple times. However, a set of algorithms exist that trade +memory for precision: they return an estimated measure with a standard error, +which, in the case of the Redis implementation for HyperLogLog, is less than 1%. +The magic of this algorithm is that you no longer need to use an amount of memory +proportional to the number of items counted, and instead can use a +constant amount of memory; 12k bytes in the worst case, or a lot less if your +HyperLogLog (We'll just call them HLL from now) has seen very few elements. + +HLLs in Redis, while technically a different data structure, are encoded +as a Redis string, so you can call `GET` to serialize a HLL, and `SET` +to deserialize it back to the server. + +Conceptually the HLL API is like using Sets to do the same task. You would +`SADD` every observed element into a set, and would use `SCARD` to check the +number of elements inside the set, which are unique since `SADD` will not +re-add an existing element. + +While you don't really *add items* into an HLL, because the data structure +only contains a state that does not include actual elements, the API is the +same: + +* Every time you see a new element, you add it to the count with `PFADD`. +* When you want to retrieve the current approximation of unique elements added using the `PFADD` command, you can use the `PFCOUNT` command. If you need to merge two different HLLs, the `PFMERGE` command is available. Since HLLs provide approximate counts of unique elements, the result of the merge will give you an approximation of the number of unique elements across both source HLLs. + +{{< clients-example hll_tutorial pfadd >}} +> PFADD bikes Hyperion Deimos Phoebe Quaoar +(integer) 1 +> PFCOUNT bikes +(integer) 4 +> PFADD commuter_bikes Salacia Mimas Quaoar +(integer) 1 +> PFMERGE all_bikes bikes commuter_bikes +OK +> PFCOUNT all_bikes +(integer) 6 +{{< /clients-example >}} + +Some examples of use cases for this data structure is counting unique queries +performed by users in a search form every day, number of unique visitors to a web page and other similar cases. + +Redis is also able to perform the union of HLLs, please check the +[full documentation](/commands#hyperloglog) for more information. + +## Use cases + +**Anonymous unique visits of a web page (SaaS, analytics tools)** + +This application answers these questions: + +- How many unique visits has this page had on this day? +- How many unique users have played this song? +- How many unique users have viewed this video? + +{{% alert title="Note" color="warning" %}} + +Storing the IP address or any other kind of personal identifier is against the law in some countries, which makes it impossible to get unique visitor statistics on your website. + +{{% /alert %}} + +One HyperLogLog is created per page (video/song) per period, and every IP/identifier is added to it on every visit. + +## Basic commands + +* `PFADD` adds an item to a HyperLogLog. +* `PFCOUNT` returns an estimate of the number of items in the set. +* `PFMERGE` combines two or more HyperLogLogs into one. + +See the [complete list of HyperLogLog commands](https://redis.io/commands/?group=hyperloglog). + +## Performance + +Writing (`PFADD`) to and reading from (`PFCOUNT`) the HyperLogLog is done in constant time and space. +Merging HLLs is O(n), where _n_ is the number of sketches. + +## Limits + +The HyperLogLog can estimate the cardinality of sets with up to 18,446,744,073,709,551,616 (2^64) members. + +## Learn more + +* [Redis new data structure: the HyperLogLog](http://antirez.com/news/75) has a lot of details about the data structure and its implementation in Redis. +* [Redis HyperLogLog Explained](https://www.youtube.com/watch?v=MunL8nnwscQ) shows you how to use Redis HyperLogLog data structures to build a traffic heat map. + diff --git a/docs/data-types/sets.md b/docs/data-types/sets.md new file mode 100644 index 0000000000..f4f51f5e2f --- /dev/null +++ b/docs/data-types/sets.md @@ -0,0 +1,180 @@ +--- +title: "Redis sets" +linkTitle: "Sets" +weight: 30 +description: > + Introduction to Redis sets +--- + +A Redis set is an unordered collection of unique strings (members). +You can use Redis sets to efficiently: + +* Track unique items (e.g., track all unique IP addresses accessing a given blog post). +* Represent relations (e.g., the set of all users with a given role). +* Perform common set operations such as intersection, unions, and differences. + +## Basic commands + +* `SADD` adds a new member to a set. +* `SREM` removes the specified member from the set. +* `SISMEMBER` tests a string for set membership. +* `SINTER` returns the set of members that two or more sets have in common (i.e., the intersection). +* `SCARD` returns the size (a.k.a. cardinality) of a set. + +See the [complete list of set commands](https://redis.io/commands/?group=set). + +## Examples + +* Store the sets of bikes racing in France and the USA. Note that +if you add a member that already exists, it will be ignored. +{{< clients-example sets_tutorial sadd >}} +> SADD bikes:racing:france bike:1 +(integer) 1 +> SADD bikes:racing:france bike:1 +(integer) 0 +> SADD bikes:racing:france bike:2 bike:3 +(integer) 2 +> SADD bikes:racing:usa bike:1 bike:4 +(integer) 2 +{{< /clients-example >}} + +* Check whether bike:1 or bike:2 are racing in the US. +{{< clients-example sets_tutorial sismember >}} +> SISMEMBER bikes:racing:usa bike:1 +(integer) 1 +> SISMEMBER bikes:racing:usa bike:2 +(integer) 0 +{{< /clients-example >}} + +* Which bikes are competing in both races? +{{< clients-example sets_tutorial sinter >}} +> SINTER bikes:racing:france bikes:racing:usa +1) "bike:1" +{{< /clients-example >}} + +* How many bikes are racing in France? +{{< clients-example sets_tutorial scard >}} +> SCARD bikes:racing:france +(integer) 3 +{{< /clients-example >}} +## Tutorial + +The `SADD` command adds new elements to a set. It's also possible +to do a number of other operations against sets like testing if a given element +already exists, performing the intersection, union or difference between +multiple sets, and so forth. + +{{< clients-example sets_tutorial sadd_smembers >}} +> SADD bikes:racing:france bike:1 bike:2 bike:3 +(integer) 3 +> SMEMBERS bikes:racing:france +1) bike:3 +2) bike:1 +3) bike:2 +{{< /clients-example >}} + +Here I've added three elements to my set and told Redis to return all the +elements. There is no order guarantee with a set. Redis is free to return the +elements in any order at every call. + +Redis has commands to test for set membership. These commands can be used on single as well as multiple items: + +{{< clients-example sets_tutorial smismember >}} +> SISMEMBER bikes:racing:france bike:1 +(integer) 1 +> SMISMEMBER bikes:racing:france bike:2 bike:3 bike:4 +1) (integer) 1 +2) (integer) 1 +3) (integer) 0 +{{< /clients-example >}} + +We can also find the difference between two sets. For instance, we may want +to know which bikes are racing in France but not in the USA: + +{{< clients-example sets_tutorial sdiff >}} +> SADD bikes:racing:usa bike:1 bike:4 +(integer) 2 +> SDIFF bikes:racing:france bikes:racing:usa +1) "bike:3" +2) "bike:2" +{{< /clients-example >}} + +There are other non trivial operations that are still easy to implement +using the right Redis commands. For instance we may want a list of all the +bikes racing in France, the USA, and some other races. We can do this using +the `SINTER` command, which performs the intersection between different +sets. In addition to intersection you can also perform +unions, difference, and more. For example +if we add a third race we can see some of these commands in action: + +{{< clients-example sets_tutorial multisets >}} +> SADD bikes:racing:france bike:1 bike:2 bike:3 +(integer) 3 +> SADD bikes:racing:usa bike:1 bike:4 +(integer) 2 +> SADD bikes:racing:italy bike:1 bike:2 bike:3 bike:4 +(integer) 4 +> SINTER bikes:racing:france bikes:racing:usa bikes:racing:italy +1) "bike:1" +> SUNION bikes:racing:france bikes:racing:usa bikes:racing:italy +1) "bike:2" +2) "bike:1" +3) "bike:4" +4) "bike:3" +> SDIFF bikes:racing:france bikes:racing:usa bikes:racing:italy +(empty array) +> SDIFF bikes:racing:france bikes:racing:usa +1) "bike:3" +2) "bike:2" +> SDIFF bikes:racing:usa bikes:racing:france +1) "bike:4" +{{< /clients-example >}} + +You'll note that the `SDIFF` command returns an empty array when the +difference between all sets is empty. You'll also note that the order of sets +passed to `SDIFF` matters, since the difference is not commutative. + +When you want to remove items from a set, you can use the `SREM` command to +remove one or more items from a set, or you can use the `SPOP` command to +remove a random item from a set. You can also _return_ a random item from a +set without removing it using the `SRANDMEMBER` command: + +{{< clients-example sets_tutorial srem >}} +> SADD bikes:racing:france bike:1 bike:2 bike:3 bike:4 bike:5 +(integer) 5 +> SREM bikes:racing:france bike:1 +(integer) 1 +> SPOP bikes:racing:france +"bike:3" +> SMEMBERS bikes:racing:france +1) "bike:2" +2) "bike:4" +3) "bike:5" +> SRANDMEMBER bikes:racing:france +"bike:2" +{{< /clients-example >}} + +## Limits + +The max size of a Redis set is 2^32 - 1 (4,294,967,295) members. + +## Performance + +Most set operations, including adding, removing, and checking whether an item is a set member, are O(1). +This means that they're highly efficient. +However, for large sets with hundreds of thousands of members or more, you should exercise caution when running the `SMEMBERS` command. +This command is O(n) and returns the entire set in a single response. +As an alternative, consider the `SSCAN`, which lets you retrieve all members of a set iteratively. + +## Alternatives + +Sets membership checks on large datasets (or on streaming data) can use a lot of memory. +If you're concerned about memory usage and don't need perfect precision, consider a [Bloom filter or Cuckoo filter](/docs/stack/bloom) as an alternative to a set. + +Redis sets are frequently used as a kind of index. +If you need to index and query your data, consider the [JSON](/docs/stack/json) data type and the [Search and query](/docs/stack/search) features. + +## Learn more + +* [Redis Sets Explained](https://www.youtube.com/watch?v=PKdCppSNTGQ) and [Redis Sets Elaborated](https://www.youtube.com/watch?v=aRw5ME_5kMY) are two short but thorough video explainers covering Redis sets. +* [Redis University's RU101](https://university.redis.com/courses/ru101/) explores Redis sets in detail. diff --git a/docs/data-types/sorted-sets.md b/docs/data-types/sorted-sets.md new file mode 100644 index 0000000000..9351e2a09c --- /dev/null +++ b/docs/data-types/sorted-sets.md @@ -0,0 +1,246 @@ +--- +title: "Redis sorted sets" +linkTitle: "Sorted sets" +weight: 50 +description: > + Introduction to Redis sorted sets +--- + +A Redis sorted set is a collection of unique strings (members) ordered by an associated score. +When more than one string has the same score, the strings are ordered lexicographically. +Some use cases for sorted sets include: + +* Leaderboards. For example, you can use sorted sets to easily maintain ordered lists of the highest scores in a massive online game. +* Rate limiters. In particular, you can use a sorted set to build a sliding-window rate limiter to prevent excessive API requests. + +You can think of sorted sets as a mix between a Set and +a Hash. Like sets, sorted sets are composed of unique, non-repeating +string elements, so in some sense a sorted set is a set as well. + +However while elements inside sets are not ordered, every element in +a sorted set is associated with a floating point value, called *the score* +(this is why the type is also similar to a hash, since every element +is mapped to a value). + +Moreover, elements in a sorted set are *taken in order* (so they are not +ordered on request, order is a peculiarity of the data structure used to +represent sorted sets). They are ordered according to the following rule: + +* If B and A are two elements with a different score, then A > B if A.score is > B.score. +* If B and A have exactly the same score, then A > B if the A string is lexicographically greater than the B string. B and A strings can't be equal since sorted sets only have unique elements. + +Let's start with a simple example, we'll add all our racers and the score they got in the first race: + +{{< clients-example ss_tutorial zadd >}} +> ZADD racer_scores 10 "Norem" +(integer) 1 +> ZADD racer_scores 12 "Castilla" +(integer) 1 +> ZADD racer_scores 8 "Sam-Bodden" 10 "Royce" 6 "Ford" 14 "Prickett" +(integer) 4 +{{< /clients-example >}} + + +As you can see `ZADD` is similar to `SADD`, but takes one additional argument +(placed before the element to be added) which is the score. +`ZADD` is also variadic, so you are free to specify multiple score-value +pairs, even if this is not used in the example above. + +With sorted sets it is trivial to return a list of hackers sorted by their +birth year because actually *they are already sorted*. + +Implementation note: Sorted sets are implemented via a +dual-ported data structure containing both a skip list and a hash table, so +every time we add an element Redis performs an O(log(N)) operation. That's +good, but when we ask for sorted elements Redis does not have to do any work at +all, it's already sorted. Note that the `ZRANGE` order is low to high, while the `ZREVRANGE` order is high to low: + +{{< clients-example ss_tutorial zrange >}} +> ZRANGE racer_scores 0 -1 +1) "Ford" +2) "Sam-Bodden" +3) "Norem" +4) "Royce" +5) "Castilla" +6) "Prickett" +> ZREVRANGE racer_scores 0 -1 +1) "Prickett" +2) "Castilla" +3) "Royce" +4) "Norem" +5) "Sam-Bodden" +6) "Ford" +{{< /clients-example >}} + +Note: 0 and -1 means from element index 0 to the last element (-1 works +here just as it does in the case of the `LRANGE` command). + +It is possible to return scores as well, using the `WITHSCORES` argument: + +{{< clients-example ss_tutorial zrange_withscores >}} +> ZRANGE racer_scores 0 -1 withscores + 1) "Ford" + 2) "6" + 3) "Sam-Bodden" + 4) "8" + 5) "Norem" + 6) "10" + 7) "Royce" + 8) "10" + 9) "Castilla" +10) "12" +11) "Prickett" +12) "14" +{{< /clients-example >}} + +### Operating on ranges + +Sorted sets are more powerful than this. They can operate on ranges. +Let's get all the racers with 10 or fewer points. We +use the `ZRANGEBYSCORE` command to do it: + +{{< clients-example ss_tutorial zrangebyscore >}} +> ZRANGEBYSCORE racer_scores -inf 10 +1) "Ford" +2) "Sam-Bodden" +3) "Norem" +4) "Royce" +{{< /clients-example >}} + +We asked Redis to return all the elements with a score between negative +infinity and 10 (both extremes are included). + +To remove an element we'd simply call `ZREM` with the racer's name. +It's also possible to remove ranges of elements. Let's remove racer Castilla along with all +the racers with strictly fewer than 10 points: + +{{< clients-example ss_tutorial zremrangebyscore >}} +> ZREM racer_scores "Castilla" +(integer) 1 +> ZREMRANGEBYSCORE racer_scores -inf 9 +(integer) 2 +> ZRANGE racer_scores 0 -1 +1) "Norem" +2) "Royce" +3) "Prickett" +{{< /clients-example >}} + +`ZREMRANGEBYSCORE` is perhaps not the best command name, +but it can be very useful, and returns the number of removed elements. + +Another extremely useful operation defined for sorted set elements +is the get-rank operation. It is possible to ask what is the +position of an element in the set of ordered elements. +The `ZREVRANK` command is also available in order to get the rank, considering +the elements sorted in a descending way. + +{{< clients-example ss_tutorial zrank >}} +> ZRANK racer_scores "Norem" +(integer) 0 +> ZREVRANK racer_scores "Norem" +(integer) 3 +{{< /clients-example >}} + +### Lexicographical scores + +In version Redis 2.8, a new feature was introduced that allows +getting ranges lexicographically, assuming elements in a sorted set are all +inserted with the same identical score (elements are compared with the C +`memcmp` function, so it is guaranteed that there is no collation, and every +Redis instance will reply with the same output). + +The main commands to operate with lexicographical ranges are `ZRANGEBYLEX`, +`ZREVRANGEBYLEX`, `ZREMRANGEBYLEX` and `ZLEXCOUNT`. + +For example, let's add again our list of famous hackers, but this time +using a score of zero for all the elements. We'll see that because of the sorted sets ordering rules, they are already sorted lexicographically. Using `ZRANGEBYLEX` we can ask for lexicographical ranges: + +{{< clients-example ss_tutorial zadd_lex >}} +> ZADD racer_scores 0 "Norem" 0 "Sam-Bodden" 0 "Royce" 0 "Castilla" 0 "Prickett" 0 "Ford" +(integer) 3 +> ZRANGE racer_scores 0 -1 +1) "Castilla" +2) "Ford" +3) "Norem" +4) "Prickett" +5) "Royce" +6) "Sam-Bodden" +> ZRANGEBYLEX racer_scores [A [L +1) "Castilla" +2) "Ford" +{{< /clients-example >}} + +Ranges can be inclusive or exclusive (depending on the first character), +also string infinite and minus infinite are specified respectively with +the `+` and `-` strings. See the documentation for more information. + +This feature is important because it allows us to use sorted sets as a generic +index. For example, if you want to index elements by a 128-bit unsigned +integer argument, all you need to do is to add elements into a sorted +set with the same score (for example 0) but with a 16 byte prefix +consisting of **the 128 bit number in big endian**. Since numbers in big +endian, when ordered lexicographically (in raw bytes order) are actually +ordered numerically as well, you can ask for ranges in the 128 bit space, +and get the element's value discarding the prefix. + +If you want to see the feature in the context of a more serious demo, +check the [Redis autocomplete demo](http://autocomplete.redis.io). + +Updating the score: leaderboards +--- + +Just a final note about sorted sets before switching to the next topic. +Sorted sets' scores can be updated at any time. Just calling `ZADD` against +an element already included in the sorted set will update its score +(and position) with O(log(N)) time complexity. As such, sorted sets are suitable +when there are tons of updates. + +Because of this characteristic a common use case is leaderboards. +The typical application is a Facebook game where you combine the ability to +take users sorted by their high score, plus the get-rank operation, in order +to show the top-N users, and the user rank in the leader board (e.g., "you are +the #4932 best score here"). + +## Examples + +* There are two ways we can use a sorted set to represent a leaderboard. If we know a racer's new score, we can update it directly via the `ZADD` command. However, if we want to add points to an existing score, we can use the `ZINCRBY` command. +{{< clients-example ss_tutorial leaderboard >}} +> ZADD racer_scores 100 "Wood" +(integer) 1 +> ZADD racer_scores 100 "Henshaw" +(integer) 1 +> ZADD racer_scores 150 "Henshaw" +(integer) 0 +> ZINCRBY racer_scores 50 "Wood" +"150" +> ZINCRBY racer_scores 50 "Henshaw" +"200" +{{< /clients-example >}} + +You'll see that `ZADD` returns 0 when the member already exists (the score is updated), while `ZINCRBY` returns the new score. The score for racer Henshaw went from 100, was changed to 150 with no regard for what score was there before, and then was incremented by 50 to 200. + +## Basic commands + +* `ZADD` adds a new member and associated score to a sorted set. If the member already exists, the score is updated. +* `ZRANGE` returns members of a sorted set, sorted within a given range. +* `ZRANK` returns the rank of the provided member, assuming the sorted is in ascending order. +* `ZREVRANK` returns the rank of the provided member, assuming the sorted set is in descending order. + +See the [complete list of sorted set commands](https://redis.io/commands/?group=sorted-set). + +## Performance + +Most sorted set operations are O(log(n)), where _n_ is the number of members. + +Exercise some caution when running the `ZRANGE` command with large returns values (e.g., in the tens of thousands or more). +This command's time complexity is O(log(n) + m), where _m_ is the number of results returned. + +## Alternatives + +Redis sorted sets are sometimes used for indexing other Redis data structures. +If you need to index and query your data, consider the [JSON](/docs/stack/json) data type and the [Search and query](/docs/stack/search) features. + +## Learn more + +* [Redis Sorted Sets Explained](https://www.youtube.com/watch?v=MUKlxdBQZ7g) is an entertaining introduction to sorted sets in Redis. +* [Redis University's RU101](https://university.redis.com/courses/ru101/) explores Redis sorted sets in detail. diff --git a/docs/manual/data-types/streams.md b/docs/data-types/streams.md similarity index 73% rename from docs/manual/data-types/streams.md rename to docs/data-types/streams.md index 7388f10fde..86152a6b82 100644 --- a/docs/manual/data-types/streams.md +++ b/docs/data-types/streams.md @@ -1,38 +1,108 @@ ---- -title: "Redis streams" +--- +title: "Redis Streams" linkTitle: "Streams" -weight: 2 +weight: 60 description: > - An introduction to the Redis stream data type + Introduction to Redis streams aliases: - /topics/streams-intro + - /docs/manual/data-types/streams + - /docs/data-types/streams-tutorial/ --- -The Stream is a new data type introduced with Redis 5.0, which models a *log data structure* in a more abstract way. However the essence of a log is still intact: like a log file, often implemented as a file open in append-only mode, Redis Streams are primarily an append-only data structure. At least conceptually, because being an abstract data type represented in memory, Redis Streams implement powerful operations to overcome the limitations of a log file. - -What makes Redis streams the most complex type of Redis, despite the data structure itself being quite simple, is the fact that it implements additional, non-mandatory features: a set of blocking operations allowing consumers to wait for new data added to a stream by producers, and in addition to that a concept called **Consumer Groups**. +A Redis stream is a data structure that acts like an append-only log but also implements several operations to overcome some of the limits of a typical append-only log. These include random access in O(1) time and complex consumption strategies, such as consumer groups. +You can use streams to record and simultaneously syndicate events in real time. +Examples of Redis stream use cases include: + +* Event sourcing (e.g., tracking user actions, clicks, etc.) +* Sensor monitoring (e.g., readings from devices in the field) +* Notifications (e.g., storing a record of each user's notifications in a separate stream) + +Redis generates a unique ID for each stream entry. +You can use these IDs to retrieve their associated entries later or to read and process all subsequent entries in the stream. Note that because these IDs are related to time, the ones shown here may vary and will be different from the IDs you see in your own Redis instance. + +Redis streams support several trimming strategies (to prevent streams from growing unbounded) and more than one consumption strategy (see `XREAD`, `XREADGROUP`, and `XRANGE`). + +## Basic commands +* `XADD` adds a new entry to a stream. +* `XREAD` reads one or more entries, starting at a given position and moving forward in time. +* `XRANGE` returns a range of entries between two supplied entry IDs. +* `XLEN` returns the length of a stream. + +See the [complete list of stream commands](https://redis.io/commands/?group=stream). + + +## Examples + +* When our racers pass a checkpoint, we add a stream entry for each racer that includes the racer's name, speed, position, and location ID: +{{< clients-example stream_tutorial xadd >}} +> XADD race:france * rider Castilla speed 30.2 position 1 location_id 1 +"1692632086370-0" +> XADD race:france * rider Norem speed 28.8 position 3 location_id 1 +"1692632094485-0" +> XADD race:france * rider Prickett speed 29.7 position 2 location_id 1 +"1692632102976-0" +{{< /clients-example >}} + +* Read two stream entries starting at ID `1692632086370-0`: +{{< clients-example stream_tutorial xrange >}} +> XRANGE race:france 1692632086370-0 + COUNT 2 +1) 1) "1692632086370-0" + 2) 1) "rider" + 2) "Castilla" + 3) "speed" + 4) "30.2" + 5) "position" + 6) "1" + 7) "location_id" + 8) "1" +2) 1) "1692632094485-0" + 2) 1) "rider" + 2) "Norem" + 3) "speed" + 4) "28.8" + 5) "position" + 6) "3" + 7) "location_id" + 8) "1" +{{< /clients-example >}} + +* Read up to 100 new stream entries, starting at the end of the stream, and block for up to 300 ms if no entries are being written: +{{< clients-example stream_tutorial xread_block >}} +> XREAD COUNT 100 BLOCK 300 STREAMS race:france $ +(nil) +{{< /clients-example >}} + +## Performance + +Adding an entry to a stream is O(1). +Accessing any single entry is O(n), where _n_ is the length of the ID. +Since stream IDs are typically short and of a fixed length, this effectively reduces to a constant time lookup. +For details on why, note that streams are implemented as [radix trees](https://en.wikipedia.org/wiki/Radix_tree). + +Simply put, Redis streams provide highly efficient inserts and reads. +See each command's time complexity for the details. -Consumer groups were initially introduced by the popular messaging system Kafka (TM). Redis reimplements a similar idea in completely different terms, but the goal is the same: to allow a group of clients to cooperate in consuming a different portion of the same stream of messages. ## Streams basics -For the goal of understanding what Redis Streams are and how to use them, we will ignore all the advanced features, and instead focus on the data structure itself, in terms of commands used to manipulate and access it. This is, basically, the part which is common to most of the other Redis data types, like Lists, Sets, Sorted Sets and so forth. However, note that Lists also have an optional more complex blocking API, exported by commands like `BLPOP` and similar. So Streams are not much different than Lists in this regard, it's just that the additional API is more complex and more powerful. +Streams are an append-only data structure. The fundamental write command, called `XADD`, appends a new entry to the specified stream. -Because Streams are an append only data structure, the fundamental write command, called `XADD`, appends a new entry into the specified stream. A stream entry is not just a string, but is instead composed of one or more field-value pairs. This way, each entry of a stream is already structured, like an append only file written in CSV format where multiple separated fields are present in each line. +Each stream entry consists of one or more field-value pairs, somewhat like a dictionary or a Redis hash: -``` -> XADD mystream * sensor-id 1234 temperature 19.8 -1518951480106-0 -``` +{{< clients-example stream_tutorial xadd_2 >}} +> XADD race:france * rider Castilla speed 29.9 position 1 location_id 2 +"1692632147973-0" +{{< /clients-example >}} -The above call to the `XADD` command adds an entry `sensor-id: 1234, temperature: 19.8` to the stream at key `mystream`, using an auto-generated entry ID, which is the one returned by the command, specifically `1518951480106-0`. It gets as its first argument the key name `mystream`, the second argument is the entry ID that identifies every entry inside a stream. However, in this case, we passed `*` because we want the server to generate a new ID for us. Every new ID will be monotonically increasing, so in more simple terms, every new entry added will have a higher ID compared to all the past entries. Auto-generation of IDs by the server is almost always what you want, and the reasons for specifying an ID explicitly are very rare. We'll talk more about this later. The fact that each Stream entry has an ID is another similarity with log files, where line numbers, or the byte offset inside the file, can be used in order to identify a given entry. Returning back at our `XADD` example, after the key name and ID, the next arguments are the field-value pairs composing our stream entry. +The above call to the `XADD` command adds an entry `rider: Castilla, speed: 29.9, position: 1, location_id: 2` to the stream at key `race:france`, using an auto-generated entry ID, which is the one returned by the command, specifically `1692632147973-0`. It gets as its first argument the key name `race:france`, the second argument is the entry ID that identifies every entry inside a stream. However, in this case, we passed `*` because we want the server to generate a new ID for us. Every new ID will be monotonically increasing, so in more simple terms, every new entry added will have a higher ID compared to all the past entries. Auto-generation of IDs by the server is almost always what you want, and the reasons for specifying an ID explicitly are very rare. We'll talk more about this later. The fact that each Stream entry has an ID is another similarity with log files, where line numbers, or the byte offset inside the file, can be used in order to identify a given entry. Returning back at our `XADD` example, after the key name and ID, the next arguments are the field-value pairs composing our stream entry. It is possible to get the number of items inside a Stream just using the `XLEN` command: -``` -> XLEN mystream -(integer) 1 -``` +{{< clients-example stream_tutorial xlen >}} +> XLEN race:france +(integer) 4 +{{< /clients-example >}} ### Entry IDs @@ -48,26 +118,26 @@ The format of such IDs may look strange at first, and the gentle reader may wond If for some reason the user needs incremental IDs that are not related to time but are actually associated to another external system ID, as previously mentioned, the `XADD` command can take an explicit ID instead of the `*` wildcard ID that triggers auto-generation, like in the following examples: -``` -> XADD somestream 0-1 field value +{{< clients-example stream_tutorial xadd_id >}} +> XADD race:usa 0-1 racer Castilla 0-1 -> XADD somestream 0-2 foo bar +> XADD race:usa 0-2 racer Norem 0-2 -``` +{{< /clients-example >}} Note that in this case, the minimum ID is 0-1 and that the command will not accept an ID equal or smaller than a previous one: -``` -> XADD somestream 0-1 foo bar +{{< clients-example stream_tutorial xadd_bad_id >}} +> XADD race:usa 0-1 racer Prickett (error) ERR The ID specified in XADD is equal or smaller than the target stream top item -``` +{{< /clients-example >}} -It is also possible to use an explicit ID that only consists of the milliseconds part, and have the sequence part be automatically generated for the entry: +If you're running Redis 7 or later, you can also provide an explicit ID consisting of the milliseconds part only. In this case, the sequence portion of the ID will be automatically generated. To do this, use the syntax below: -``` -> XADD somestream 0-* baz qux +{{< clients-example stream_tutorial xadd_7 >}} +> XADD race:usa 0-* racer Prickett 0-3 -``` +{{< /clients-example >}} ## Getting data from Streams @@ -83,65 +153,132 @@ Redis Streams support all three of the query modes described above via different To query the stream by range we are only required to specify two IDs, *start* and *end*. The range returned will include the elements having start or end as ID, so the range is inclusive. The two special IDs `-` and `+` respectively mean the smallest and the greatest ID possible. -``` -> XRANGE mystream - + -1) 1) 1518951480106-0 - 2) 1) "sensor-id" - 2) "1234" - 3) "temperature" - 4) "19.8" -2) 1) 1518951482479-0 - 2) 1) "sensor-id" - 2) "9999" - 3) "temperature" - 4) "18.2" -``` +{{< clients-example stream_tutorial xrange_all >}} +> XRANGE race:france - + +1) 1) "1692632086370-0" + 2) 1) "rider" + 2) "Castilla" + 3) "speed" + 4) "30.2" + 5) "position" + 6) "1" + 7) "location_id" + 8) "1" +2) 1) "1692632094485-0" + 2) 1) "rider" + 2) "Norem" + 3) "speed" + 4) "28.8" + 5) "position" + 6) "3" + 7) "location_id" + 8) "1" +3) 1) "1692632102976-0" + 2) 1) "rider" + 2) "Prickett" + 3) "speed" + 4) "29.7" + 5) "position" + 6) "2" + 7) "location_id" + 8) "1" +4) 1) "1692632147973-0" + 2) 1) "rider" + 2) "Castilla" + 3) "speed" + 4) "29.9" + 5) "position" + 6) "1" + 7) "location_id" + 8) "2" +{{< /clients-example >}} Each entry returned is an array of two items: the ID and the list of field-value pairs. We already said that the entry IDs have a relation with the time, because the part at the left of the `-` character is the Unix time in milliseconds of the local node that created the stream entry, at the moment the entry was created (however note that streams are replicated with fully specified `XADD` commands, so the replicas will have identical IDs to the master). This means that I could query a range of time using `XRANGE`. In order to do so, however, I may want to omit the sequence part of the ID: if omitted, in the start of the range it will be assumed to be 0, while in the end part it will be assumed to be the maximum sequence number available. This way, querying using just two milliseconds Unix times, we get all the entries that were generated in that range of time, in an inclusive way. For instance, if I want to query a two milliseconds period I could use: -``` -> XRANGE mystream 1518951480106 1518951480107 -1) 1) 1518951480106-0 - 2) 1) "sensor-id" - 2) "1234" - 3) "temperature" - 4) "19.8" -``` - -I have only a single entry in this range, however in real data sets, I could query for ranges of hours, or there could be many items in just two milliseconds, and the result returned could be huge. For this reason, `XRANGE` supports an optional **COUNT** option at the end. By specifying a count, I can just get the first *N* items. If I want more, I can get the last ID returned, increment the sequence part by one, and query again. Let's see this in the following example. We start adding 10 items with `XADD` (I won't show that, lets assume that the stream `mystream` was populated with 10 items). To start my iteration, getting 2 items per command, I start with the full range, but with a count of 2. - -``` -> XRANGE mystream - + COUNT 2 -1) 1) 1519073278252-0 - 2) 1) "foo" - 2) "value_1" -2) 1) 1519073279157-0 - 2) 1) "foo" - 2) "value_2" -``` - -In order to continue the iteration with the next two items, I have to pick the last ID returned, that is `1519073279157-0` and add the prefix `(` to it. The resulting exclusive range interval, that is `(1519073279157-0` in this case, can now be used as the new *start* argument for the next `XRANGE` call: - -``` -> XRANGE mystream (1519073279157-0 + COUNT 2 -1) 1) 1519073280281-0 - 2) 1) "foo" - 2) "value_3" -2) 1) 1519073281432-0 - 2) 1) "foo" - 2) "value_4" -``` - -And so forth. Since `XRANGE` complexity is *O(log(N))* to seek, and then *O(M)* to return M elements, with a small count the command has a logarithmic time complexity, which means that each step of the iteration is fast. So `XRANGE` is also the de facto *streams iterator* and does not require an **XSCAN** command. +{{< clients-example stream_tutorial xrange_time >}} +> XRANGE race:france 1692632086369 1692632086371 +1) 1) "1692632086370-0" + 2) 1) "rider" + 2) "Castilla" + 3) "speed" + 4) "30.2" + 5) "position" + 6) "1" + 7) "location_id" + 8) "1" +{{< /clients-example >}} + +I have only a single entry in this range. However in real data sets, I could query for ranges of hours, or there could be many items in just two milliseconds, and the result returned could be huge. For this reason, `XRANGE` supports an optional **COUNT** option at the end. By specifying a count, I can just get the first *N* items. If I want more, I can get the last ID returned, increment the sequence part by one, and query again. Let's see this in the following example. Let's assume that the stream `race:france` was populated with 4 items. To start my iteration, getting 2 items per command, I start with the full range, but with a count of 2. + +{{< clients-example stream_tutorial xrange_step_1 >}} +> XRANGE race:france - + COUNT 2 +1) 1) "1692632086370-0" + 2) 1) "rider" + 2) "Castilla" + 3) "speed" + 4) "30.2" + 5) "position" + 6) "1" + 7) "location_id" + 8) "1" +2) 1) "1692632094485-0" + 2) 1) "rider" + 2) "Norem" + 3) "speed" + 4) "28.8" + 5) "position" + 6) "3" + 7) "location_id" + 8) "1" +{{< /clients-example >}} + +To continue the iteration with the next two items, I have to pick the last ID returned, that is `1692632094485-0`, and add the prefix `(` to it. The resulting exclusive range interval, that is `(1692632094485-0` in this case, can now be used as the new *start* argument for the next `XRANGE` call: + +{{< clients-example stream_tutorial xrange_step_2 >}} +> XRANGE race:france (1692632094485-0 + COUNT 2 +1) 1) "1692632102976-0" + 2) 1) "rider" + 2) "Prickett" + 3) "speed" + 4) "29.7" + 5) "position" + 6) "2" + 7) "location_id" + 8) "1" +2) 1) "1692632147973-0" + 2) 1) "rider" + 2) "Castilla" + 3) "speed" + 4) "29.9" + 5) "position" + 6) "1" + 7) "location_id" + 8) "2" +{{< /clients-example >}} + +Now that we've retrieved 4 items out of a stream that only had 4 entries in it, if we try to retrieve more items, we'll get an empty array: + +{{< clients-example stream_tutorial xrange_empty >}} +> XRANGE race:france (1692632147973-0 + COUNT 2 +(empty array) +{{< /clients-example >}} + +Since `XRANGE` complexity is *O(log(N))* to seek, and then *O(M)* to return M elements, with a small count the command has a logarithmic time complexity, which means that each step of the iteration is fast. So `XRANGE` is also the de facto *streams iterator* and does not require an **XSCAN** command. The command `XREVRANGE` is the equivalent of `XRANGE` but returning the elements in inverted order, so a practical use for `XREVRANGE` is to check what is the last item in a Stream: -``` -> XREVRANGE mystream + - COUNT 1 -1) 1) 1519073287312-0 - 2) 1) "foo" - 2) "value_10" -``` +{{< clients-example stream_tutorial xrevrange >}} +> XREVRANGE race:france + - COUNT 1 +1) 1) "1692632147973-0" + 2) 1) "rider" + 2) "Castilla" + 3) "speed" + 4) "29.9" + 5) "position" + 6) "1" + 7) "location_id" + 8) "2" +{{< /clients-example >}} Note that the `XREVRANGE` command takes the *start* and *stop* arguments in reverse order. @@ -155,25 +292,38 @@ When we do not want to access items by a range in a stream, usually what we want The command that provides the ability to listen for new messages arriving into a stream is called `XREAD`. It's a bit more complex than `XRANGE`, so we'll start showing simple forms, and later the whole command layout will be provided. -``` -> XREAD COUNT 2 STREAMS mystream 0 -1) 1) "mystream" - 2) 1) 1) 1519073278252-0 - 2) 1) "foo" - 2) "value_1" - 2) 1) 1519073279157-0 - 2) 1) "foo" - 2) "value_2" -``` +{{< clients-example stream_tutorial xread >}} +> XREAD COUNT 2 STREAMS race:france 0 +1) 1) "race:france" + 2) 1) 1) "1692632086370-0" + 2) 1) "rider" + 2) "Castilla" + 3) "speed" + 4) "30.2" + 5) "position" + 6) "1" + 7) "location_id" + 8) "1" + 2) 1) "1692632094485-0" + 2) 1) "rider" + 2) "Norem" + 3) "speed" + 4) "28.8" + 5) "position" + 6) "3" + 7) "location_id" + 8) "1" +{{< /clients-example >}} The above is the non-blocking form of `XREAD`. Note that the **COUNT** option is not mandatory, in fact the only mandatory option of the command is the **STREAMS** option, that specifies a list of keys together with the corresponding maximum ID already seen for each stream by the calling consumer, so that the command will provide the client only with messages with an ID greater than the one we specified. -In the above command we wrote `STREAMS mystream 0` so we want all the messages in the Stream `mystream` having an ID greater than `0-0`. As you can see in the example above, the command returns the key name, because actually it is possible to call this command with more than one key to read from different streams at the same time. I could write, for instance: `STREAMS mystream otherstream 0 0`. Note how after the **STREAMS** option we need to provide the key names, and later the IDs. For this reason, the **STREAMS** option must always be the last one. +In the above command we wrote `STREAMS race:france 0` so we want all the messages in the Stream `race:france` having an ID greater than `0-0`. As you can see in the example above, the command returns the key name, because actually it is possible to call this command with more than one key to read from different streams at the same time. I could write, for instance: `STREAMS race:france race:italy 0 0`. Note how after the **STREAMS** option we need to provide the key names, and later the IDs. For this reason, the **STREAMS** option must always be the last option. +Any other options must come before the **STREAMS** option. Apart from the fact that `XREAD` can access multiple streams at once, and that we are able to specify the last ID we own to just get newer messages, in this simple form the command is not doing something so different compared to `XRANGE`. However, the interesting part is that we can turn `XREAD` into a *blocking command* easily, by specifying the **BLOCK** argument: ``` -> XREAD BLOCK 0 STREAMS mystream $ +> XREAD BLOCK 0 STREAMS race:france $ ``` Note that in the example above, other than removing **COUNT**, I specified the new **BLOCK** option with a timeout of 0 milliseconds (that means to never timeout). Moreover, instead of passing a normal ID for the stream `mystream` I passed the special ID `$`. This special ID means that `XREAD` should use as last ID the maximum ID already stored in the stream `mystream`, so that we will receive only *new* messages, starting from the time we started listening. This is similar to the `tail -f` Unix command in some way. @@ -239,52 +389,46 @@ Now it's time to zoom in to see the fundamental consumer group commands. They ar ## Creating a consumer group -Assuming I have a key `mystream` of type stream already existing, in order to create a consumer group I just need to do the following: +Assuming I have a key `race:france` of type stream already existing, in order to create a consumer group I just need to do the following: -``` -> XGROUP CREATE mystream mygroup $ +{{< clients-example stream_tutorial xgroup_create >}} +> XGROUP CREATE race:france france_riders $ OK -``` +{{< /clients-example >}} As you can see in the command above when creating the consumer group we have to specify an ID, which in the example is just `$`. This is needed because the consumer group, among the other states, must have an idea about what message to serve next at the first consumer connecting, that is, what was the *last message ID* when the group was just created. If we provide `$` as we did, then only new messages arriving in the stream from now on will be provided to the consumers in the group. If we specify `0` instead the consumer group will consume *all* the messages in the stream history to start with. Of course, you can specify any other valid ID. What you know is that the consumer group will start delivering messages that are greater than the ID you specify. Because `$` means the current greatest ID in the stream, specifying `$` will have the effect of consuming only new messages. `XGROUP CREATE` also supports creating the stream automatically, if it doesn't exist, using the optional `MKSTREAM` subcommand as the last argument: -``` -> XGROUP CREATE newstream mygroup $ MKSTREAM +{{< clients-example stream_tutorial xgroup_create_mkstream >}} +> XGROUP CREATE race:italy italy_riders $ MKSTREAM OK -``` +{{< /clients-example >}} Now that the consumer group is created we can immediately try to read messages via the consumer group using the `XREADGROUP` command. We'll read from consumers, that we will call Alice and Bob, to see how the system will return different messages to Alice or Bob. `XREADGROUP` is very similar to `XREAD` and provides the same **BLOCK** option, otherwise it is a synchronous command. However there is a *mandatory* option that must be always specified, which is **GROUP** and has two arguments: the name of the consumer group, and the name of the consumer that is attempting to read. The option **COUNT** is also supported and is identical to the one in `XREAD`. -Before reading from the stream, let's put some messages inside: - -``` -> XADD mystream * message apple -1526569495631-0 -> XADD mystream * message orange -1526569498055-0 -> XADD mystream * message strawberry -1526569506935-0 -> XADD mystream * message apricot -1526569535168-0 -> XADD mystream * message banana -1526569544280-0 -``` - -Note: *here message is the field name, and the fruit is the associated value, remember that stream items are small dictionaries.* - -It is time to try reading something using the consumer group: - -``` -> XREADGROUP GROUP mygroup Alice COUNT 1 STREAMS mystream > -1) 1) "mystream" - 2) 1) 1) 1526569495631-0 - 2) 1) "message" - 2) "apple" -``` +We'll add riders to the race:italy stream and try reading something using the consumer group: +Note: *here rider is the field name, and the name is the associated value. Remember that stream items are small dictionaries.* + +{{< clients-example stream_tutorial xgroup_read >}} +> XADD race:italy * rider Castilla +"1692632639151-0" +> XADD race:italy * rider Royce +"1692632647899-0" +> XADD race:italy * rider Sam-Bodden +"1692632662819-0" +> XADD race:italy * rider Prickett +"1692632670501-0" +> XADD race:italy * rider Norem +"1692632678249-0" +> XREADGROUP GROUP italy_riders Alice COUNT 1 STREAMS race:italy > +1) 1) "race:italy" + 2) 1) 1) "1692632639151-0" + 2) 1) "rider" + 2) "Castilla" +{{< /clients-example >}} `XREADGROUP` replies are just like `XREAD` replies. Note however the `GROUP ` provided above. It states that I want to read from the stream using the consumer group `mygroup` and I'm the consumer `Alice`. Every time a consumer performs an operation with a consumer group, it must specify its name, uniquely identifying this consumer inside the group. @@ -295,42 +439,42 @@ This is almost always what you want, however it is also possible to specify a re * If the ID is the special ID `>` then the command will return only new messages never delivered to other consumers so far, and as a side effect, will update the consumer group's *last ID*. * If the ID is any other valid numerical ID, then the command will let us access our *history of pending messages*. That is, the set of messages that were delivered to this specified consumer (identified by the provided name), and never acknowledged so far with `XACK`. -We can test this behavior immediately specifying an ID of 0, without any **COUNT** option: we'll just see the only pending message, that is, the one about apples: +We can test this behavior immediately specifying an ID of 0, without any **COUNT** option: we'll just see the only pending message, that is, the one about Castilla: -``` -> XREADGROUP GROUP mygroup Alice STREAMS mystream 0 -1) 1) "mystream" - 2) 1) 1) 1526569495631-0 - 2) 1) "message" - 2) "apple" -``` +{{< clients-example stream_tutorial xgroup_read_id >}} +> XREADGROUP GROUP italy_riders Alice STREAMS race:italy 0 +1) 1) "race:italy" + 2) 1) 1) "1692632639151-0" + 2) 1) "rider" + 2) "Castilla" +{{< /clients-example >}} However, if we acknowledge the message as processed, it will no longer be part of the pending messages history, so the system will no longer report anything: -``` -> XACK mystream mygroup 1526569495631-0 +{{< clients-example stream_tutorial xack >}} +> XACK race:italy italy_riders 1692632639151-0 (integer) 1 -> XREADGROUP GROUP mygroup Alice STREAMS mystream 0 -1) 1) "mystream" - 2) (empty list or set) -``` +> XREADGROUP GROUP italy_riders Alice STREAMS race:italy 0 +1) 1) "race:italy" + 2) (empty array) +{{< /clients-example >}} Don't worry if you yet don't know how `XACK` works, the idea is just that processed messages are no longer part of the history that we can access. Now it's Bob's turn to read something: -``` -> XREADGROUP GROUP mygroup Bob COUNT 2 STREAMS mystream > -1) 1) "mystream" - 2) 1) 1) 1526569498055-0 - 2) 1) "message" - 2) "orange" - 2) 1) 1526569506935-0 - 2) 1) "message" - 2) "strawberry" -``` +{{< clients-example stream_tutorial xgroup_read_bob >}} +> XREADGROUP GROUP italy_riders Bob COUNT 2 STREAMS race:italy > +1) 1) "race:italy" + 2) 1) 1) "1692632647899-0" + 2) 1) "rider" + 2) "Royce" + 2) 1) "1692632662819-0" + 2) 1) "rider" + 2) "Sam-Bodden" +{{< /clients-example >}} -Bob asked for a maximum of two messages and is reading via the same group `mygroup`. So what happens is that Redis reports just *new* messages. As you can see the "apple" message is not delivered, since it was already delivered to Alice, so Bob gets orange and strawberry, and so forth. +Bob asked for a maximum of two messages and is reading via the same group `mygroup`. So what happens is that Redis reports just *new* messages. As you can see the "Castilla" message is not delivered, since it was already delivered to Alice, so Bob gets Royce and Sam-Bodden and so forth. This way Alice, Bob, and any other consumer in the group, are able to read different messages from the same stream, to read their history of yet to process messages, or to mark messages as processed. This allows creating different topologies and semantics for consuming messages from a stream. @@ -411,14 +555,14 @@ The first step of this process is just a command that provides observability of This is a read-only command which is always safe to call and will not change ownership of any message. In its simplest form, the command is called with two arguments, which are the name of the stream and the name of the consumer group. -``` -> XPENDING mystream mygroup +{{< clients-example stream_tutorial xpending >}} +> XPENDING race:italy italy_riders 1) (integer) 2 -2) 1526569498055-0 -3) 1526569506935-0 +2) "1692632647899-0" +3) "1692632662819-0" 4) 1) 1) "Bob" 2) "2" -``` +{{< /clients-example >}} When called in this way, the command outputs the total number of pending messages in the consumer group (two in this case), the lower and higher message ID among the pending messages, and finally a list of consumers and the number of pending messages they have. We have only Bob with two pending messages because the single message that Alice requested was acknowledged using `XACK`. @@ -431,31 +575,31 @@ XPENDING [[IDLE ] [ By providing a start and end ID (that can be just `-` and `+` as in `XRANGE`) and a count to control the amount of information returned by the command, we are able to know more about the pending messages. The optional final argument, the consumer name, is used if we want to limit the output to just messages pending for a given consumer, but won't use this feature in the following example. -``` -> XPENDING mystream mygroup - + 10 -1) 1) 1526569498055-0 +{{< clients-example stream_tutorial xpending_plus_minus >}} +> XPENDING race:italy italy_riders - + 10 +1) 1) "1692632647899-0" 2) "Bob" - 3) (integer) 74170458 + 3) (integer) 74642 4) (integer) 1 -2) 1) 1526569506935-0 +2) 1) "1692632662819-0" 2) "Bob" - 3) (integer) 74170458 + 3) (integer) 74642 4) (integer) 1 -``` +{{< /clients-example >}} Now we have the details for each message: the ID, the consumer name, the *idle time* in milliseconds, which is how many milliseconds have passed since the last time the message was delivered to some consumer, and finally the number of times that a given message was delivered. -We have two messages from Bob, and they are idle for 74170458 milliseconds, about 20 hours. +We have two messages from Bob, and they are idle for 60000+ milliseconds, about a minute. Note that nobody prevents us from checking what the first message content was by just using `XRANGE`. -``` -> XRANGE mystream 1526569498055-0 1526569498055-0 -1) 1) 1526569498055-0 - 2) 1) "message" - 2) "orange" -``` +{{< clients-example stream_tutorial xrange_pending >}} +> XRANGE race:italy 1692632647899-0 1692632647899-0 +1) 1) "1692632647899-0" + 2) 1) "rider" + 2) "Royce" +{{< /clients-example >}} -We have just to repeat the same ID twice in the arguments. Now that we have some ideas, Alice may decide that after 20 hours of not processing messages, Bob will probably not recover in time, and it's time to *claim* such messages and resume the processing in place of Bob. To do so, we use the `XCLAIM` command. +We have just to repeat the same ID twice in the arguments. Now that we have some ideas, Alice may decide that after 1 minute of not processing messages, Bob will probably not recover quickly, and it's time to *claim* such messages and resume the processing in place of Bob. To do so, we use the `XCLAIM` command. This command is very complex and full of options in its full form, since it is used for replication of consumer groups changes, but we'll use just the arguments that we need normally. In this case it is as simple as: @@ -466,20 +610,20 @@ XCLAIM ... Basically we say, for this specific key and group, I want that the message IDs specified will change ownership, and will be assigned to the specified consumer name ``. However, we also provide a minimum idle time, so that the operation will only work if the idle time of the mentioned messages is greater than the specified idle time. This is useful because maybe two clients are retrying to claim a message at the same time: ``` -Client 1: XCLAIM mystream mygroup Alice 3600000 1526569498055-0 -Client 2: XCLAIM mystream mygroup Lora 3600000 1526569498055-0 +Client 1: XCLAIM race:italy italy_riders Alice 60000 1692632647899-0 +Client 2: XCLAIM race:italy italy_riders Lora 60000 1692632647899-0 ``` However, as a side effect, claiming a message will reset its idle time and will increment its number of deliveries counter, so the second client will fail claiming it. In this way we avoid trivial re-processing of messages (even if in the general case you cannot obtain exactly once processing). This is the result of the command execution: -``` -> XCLAIM mystream mygroup Alice 3600000 1526569498055-0 -1) 1) 1526569498055-0 - 2) 1) "message" - 2) "orange" -``` +{{< clients-example stream_tutorial xclaim >}} +> XCLAIM race:italy italy_riders Alice 60000 1692632647899-0 +1) 1) "1692632647899-0" + 2) 1) "rider" + 2) "Royce" +{{< /clients-example >}} The message was successfully claimed by Alice, who can now process the message and acknowledge it, and move things forward even if the original consumer is not recovering. @@ -502,24 +646,25 @@ XAUTOCLAIM [COUNT count] [JUSTI So, in the example above, I could have used automatic claiming to claim a single message like this: -``` -> XAUTOCLAIM mystream mygroup Alice 3600000 0-0 COUNT 1 -1) 1526569498055-0 -2) 1) 1526569498055-0 - 2) 1) "message" - 2) "orange" -``` +{{< clients-example stream_tutorial xautoclaim >}} +> XAUTOCLAIM race:italy italy_riders Alice 60000 0-0 COUNT 1 +1) "0-0" +2) 1) 1) "1692632662819-0" + 2) 1) "rider" + 2) "Sam-Bodden" +{{< /clients-example >}} Like `XCLAIM`, the command replies with an array of the claimed messages, but it also returns a stream ID that allows iterating the pending entries. The stream ID is a cursor, and I can use it in my next call to continue in claiming idle pending messages: -``` -> XAUTOCLAIM mystream mygroup Lora 3600000 1526569498055-0 COUNT 1 -1) 0-0 -2) 1) 1526569506935-0 - 2) 1) "message" - 2) "strawberry" -``` +{{< clients-example stream_tutorial xautoclaim_cursor >}} +> XAUTOCLAIM race:italy italy_riders Lora 60000 (1692632662819-0 COUNT 1 +1) "1692632662819-0" +2) 1) 1) "1692632647899-0" + 2) 1) "rider" + 2) "Royce" +{{< /clients-example >}} + When `XAUTOCLAIM` returns the "0-0" stream ID as a cursor, that means that it reached the end of the consumer group pending entries list. That doesn't mean that there are no new idle pending messages, so the process continues by calling `XAUTOCLAIM` from the beginning of the stream. @@ -537,81 +682,67 @@ However we may want to do more than that, and the `XINFO` command is an observab This command uses subcommands in order to show different information about the status of the stream and its consumer groups. For instance **XINFO STREAM ** reports information about the stream itself. -``` -> XINFO STREAM mystream +{{< clients-example stream_tutorial xinfo >}} +> XINFO STREAM race:italy 1) "length" - 2) (integer) 2 + 2) (integer) 5 3) "radix-tree-keys" 4) (integer) 1 5) "radix-tree-nodes" 6) (integer) 2 7) "last-generated-id" - 8) "1638125141232-0" - 9) "max-deleted-entryid" -10) "0-0" -11) "entries-added" -12) (integer) 2 -13) "groups" -14) (integer) 1 -15) "first-entry" -16) 1) "1638125133432-0" - 2) 1) "message" - 2) "apple" -17) "last-entry" -18) 1) "1638125141232-0" - 2) 1) "message" - 2) "banana" -``` + 8) "1692632678249-0" + 9) "groups" +10) (integer) 1 +11) "first-entry" +12) 1) "1692632639151-0" + 2) 1) "rider" + 2) "Castilla" +13) "last-entry" +14) 1) "1692632678249-0" + 2) 1) "rider" + 2) "Norem" +{{< /clients-example >}} The output shows information about how the stream is encoded internally, and also shows the first and last message in the stream. Another piece of information available is the number of consumer groups associated with this stream. We can dig further asking for more information about the consumer groups. -``` -> XINFO GROUPS mystream -1) 1) "name" - 2) "mygroup" - 3) "consumers" - 4) (integer) 2 - 5) "pending" - 6) (integer) 2 - 7) "last-delivered-id" - 8) "1638126030001-0" - 9) "entries-read" - 10) (integer) 2 - 11) "lag" - 12) (integer) 0 -2) 1) "name" - 2) "some-other-group" - 3) "consumers" - 4) (integer) 1 - 5) "pending" - 6) (integer) 0 - 7) "last-delivered-id" - 8) "1638126028070-0" - 9) "entries-read" - 10) (integer) 1 - 11) "lag" - 12) (integer) 1 -``` +{{< clients-example stream_tutorial xinfo_groups >}} +> XINFO GROUPS race:italy +1) 1) "name" + 2) "italy_riders" + 3) "consumers" + 4) (integer) 3 + 5) "pending" + 6) (integer) 2 + 7) "last-delivered-id" + 8) "1692632662819-0" +{{< /clients-example >}} As you can see in this and in the previous output, the `XINFO` command outputs a sequence of field-value items. Because it is an observability command this allows the human user to immediately understand what information is reported, and allows the command to report more information in the future by adding more fields without breaking compatibility with older clients. Other commands that must be more bandwidth efficient, like `XPENDING`, just report the information without the field names. The output of the example above, where the **GROUPS** subcommand is used, should be clear observing the field names. We can check in more detail the state of a specific consumer group by checking the consumers that are registered in the group. -``` -> XINFO CONSUMERS mystream mygroup -1) 1) name +{{< clients-example stream_tutorial xinfo_consumers >}} +> XINFO CONSUMERS race:italy italy_riders +1) 1) "name" 2) "Alice" - 3) pending + 3) "pending" 4) (integer) 1 - 5) idle - 6) (integer) 9104628 -2) 1) name + 5) "idle" + 6) (integer) 177546 +2) 1) "name" 2) "Bob" - 3) pending + 3) "pending" + 4) (integer) 0 + 5) "idle" + 6) (integer) 424686 +3) 1) "name" + 2) "Lora" + 3) "pending" 4) (integer) 1 - 5) idle - 6) (integer) 83841983 -``` + 5) "idle" + 6) (integer) 72241 +{{< /clients-example >}} In case you do not remember the syntax of the command, just ask the command itself for help: @@ -648,45 +779,47 @@ So basically Kafka partitions are more similar to using N different Redis keys, Many applications do not want to collect data into a stream forever. Sometimes it is useful to have at maximum a given number of items inside a stream, other times once a given size is reached, it is useful to move data from Redis to a storage which is not in memory and not as fast but suited to store the history for, potentially, decades to come. Redis streams have some support for this. One is the **MAXLEN** option of the `XADD` command. This option is very simple to use: -``` -> XADD mystream MAXLEN 2 * value 1 -1526654998691-0 -> XADD mystream MAXLEN 2 * value 2 -1526654999635-0 -> XADD mystream MAXLEN 2 * value 3 -1526655000369-0 -> XLEN mystream +{{< clients-example stream_tutorial maxlen >}} +> XADD race:italy MAXLEN 2 * rider Jones +"1692633189161-0" +> XADD race:italy MAXLEN 2 * rider Wood +"1692633198206-0" +> XADD race:italy MAXLEN 2 * rider Henshaw +"1692633208557-0" +> XLEN race:italy (integer) 2 -> XRANGE mystream - + -1) 1) 1526654999635-0 - 2) 1) "value" - 2) "2" -2) 1) 1526655000369-0 - 2) 1) "value" - 2) "3" -``` +> XRANGE race:italy - + +1) 1) "1692633198206-0" + 2) 1) "rider" + 2) "Wood" +2) 1) "1692633208557-0" + 2) 1) "rider" + 2) "Henshaw" +{{< /clients-example >}} Using **MAXLEN** the old entries are automatically evicted when the specified length is reached, so that the stream is left at a constant size. There is currently no option to tell the stream to just retain items that are not older than a given period, because such command, in order to run consistently, would potentially block for a long time in order to evict items. Imagine for example what happens if there is an insertion spike, then a long pause, and another insertion, all with the same maximum time. The stream would block to evict the data that became too old during the pause. So it is up to the user to do some planning and understand what is the maximum stream length desired. Moreover, while the length of the stream is proportional to the memory used, trimming by time is less simple to control and anticipate: it depends on the insertion rate which often changes over time (and when it does not change, then to just trim by size is trivial). However trimming with **MAXLEN** can be expensive: streams are represented by macro nodes into a radix tree, in order to be very memory efficient. Altering the single macro node, consisting of a few tens of elements, is not optimal. So it's possible to use the command in the following special form: ``` -XADD mystream MAXLEN ~ 1000 * ... entry fields here ... +XADD race:italy MAXLEN ~ 1000 * ... entry fields here ... ``` -The `~` argument between the **MAXLEN** option and the actual count means, I don't really need this to be exactly 1000 items. It can be 1000 or 1010 or 1030, just make sure to save at least 1000 items. With this argument, the trimming is performed only when we can remove a whole node. This makes it much more efficient, and it is usually what you want. +The `~` argument between the **MAXLEN** option and the actual count means, I don't really need this to be exactly 1000 items. It can be 1000 or 1010 or 1030, just make sure to save at least 1000 items. With this argument, the trimming is performed only when we can remove a whole node. This makes it much more efficient, and it is usually what you want. You'll note here that the client libraries have various implementations of this. For example, the Python client defaults to approximate and has to be explicitly set to a true length. There is also the `XTRIM` command, which performs something very similar to what the **MAXLEN** option does above, except that it can be run by itself: -``` -> XTRIM mystream MAXLEN 10 -``` +{{< clients-example stream_tutorial xtrim >}} +> XTRIM race:italy MAXLEN 10 +(integer) 0 +{{< /clients-example >}} Or, as for the `XADD` option: -``` +{{< clients-example stream_tutorial xtrim2 >}} > XTRIM mystream MAXLEN ~ 10 -``` +(integer) 0 +{{< /clients-example >}} However, `XTRIM` is designed to accept different trimming strategies. Another trimming strategy is **MINID**, that evicts entries with IDs lower than the one specified. @@ -726,21 +859,21 @@ So when designing an application using Redis streams and consumer groups, make s Streams also have a special command for removing items from the middle of a stream, just by ID. Normally for an append only data structure this may look like an odd feature, but it is actually useful for applications involving, for instance, privacy regulations. The command is called `XDEL` and receives the name of the stream followed by the IDs to delete: -``` -> XRANGE mystream - + COUNT 2 -1) 1) 1526654999635-0 - 2) 1) "value" - 2) "2" -2) 1) 1526655000369-0 - 2) 1) "value" - 2) "3" -> XDEL mystream 1526654999635-0 +{{< clients-example stream_tutorial xdel >}} +> XRANGE race:italy - + COUNT 2 +1) 1) "1692633198206-0" + 2) 1) "rider" + 2) "Wood" +2) 1) "1692633208557-0" + 2) 1) "rider" + 2) "Henshaw" +> XDEL race:italy 1692633208557-0 (integer) 1 -> XRANGE mystream - + COUNT 2 -1) 1) 1526655000369-0 - 2) 1) "value" - 2) "3" -``` +> XRANGE race:italy - + COUNT 2 +1) 1) "1692633198206-0" + 2) 1) "rider" + 2) "Wood" +{{< /clients-example >}} However in the current implementation, memory is not really reclaimed until a macro node is completely empty, so you should not abuse this feature. @@ -790,3 +923,12 @@ A few remarks: * Here we processed up to 10k messages per iteration, this means that the `COUNT` parameter of `XREADGROUP` was set to 10000. This adds a lot of latency but is needed in order to allow the slow consumers to be able to keep with the message flow. So you can expect a real world latency that is a lot smaller. * The system used for this benchmark is very slow compared to today's standards. + + + + +## Learn more + +* The [Redis Streams Tutorial](/docs/data-types/streams-tutorial) explains Redis streams with many examples. +* [Redis Streams Explained](https://www.youtube.com/watch?v=Z8qcpXyMAiA) is an entertaining introduction to streams in Redis. +* [Redis University's RU202](https://university.redis.com/courses/ru202/) is a free, online course dedicated to Redis Streams. diff --git a/docs/data-types/strings.md b/docs/data-types/strings.md new file mode 100644 index 0000000000..52348dcf59 --- /dev/null +++ b/docs/data-types/strings.md @@ -0,0 +1,133 @@ +--- +title: "Redis Strings" +linkTitle: "Strings" +weight: 10 +description: > + Introduction to Redis strings +--- + +Redis strings store sequences of bytes, including text, serialized objects, and binary arrays. +As such, strings are the simplest type of value you can associate with +a Redis key. +They're often used for caching, but they support additional functionality that lets you implement counters and perform bitwise operations, too. + +Since Redis keys are strings, when we use the string type as a value too, +we are mapping a string to another string. The string data type is useful +for a number of use cases, like caching HTML fragments or pages. + +{{< clients-example set_tutorial set_get >}} + > SET bike:1 Deimos + OK + > GET bike:1 + "Deimos" +{{< /clients-example >}} + +As you can see using the `SET` and the `GET` commands are the way we set +and retrieve a string value. Note that `SET` will replace any existing value +already stored into the key, in the case that the key already exists, even if +the key is associated with a non-string value. So `SET` performs an assignment. + +Values can be strings (including binary data) of every kind, for instance you +can store a jpeg image inside a value. A value can't be bigger than 512 MB. + +The `SET` command has interesting options, that are provided as additional +arguments. For example, I may ask `SET` to fail if the key already exists, +or the opposite, that it only succeed if the key already exists: + +{{< clients-example set_tutorial setnx_xx >}} + > set bike:1 bike nx + (nil) + > set bike:1 bike xx + OK +{{< /clients-example >}} + +There are a number of other commands for operating on strings. For example +the `GETSET` command sets a key to a new value, returning the old value as the +result. You can use this command, for example, if you have a +system that increments a Redis key using `INCR` +every time your web site receives a new visitor. You may want to collect this +information once every hour, without losing a single increment. +You can `GETSET` the key, assigning it the new value of "0" and reading the +old value back. + +The ability to set or retrieve the value of multiple keys in a single +command is also useful for reduced latency. For this reason there are +the `MSET` and `MGET` commands: + +{{< clients-example set_tutorial mset >}} + > mset bike:1 "Deimos" bike:2 "Ares" bike:3 "Vanth" + OK + > mget bike:1 bike:2 bike:3 + 1) "Deimos" + 2) "Ares" + 3) "Vanth" +{{< /clients-example >}} + +When `MGET` is used, Redis returns an array of values. + +### Strings as counters +Even if strings are the basic values of Redis, there are interesting operations +you can perform with them. For instance, one is atomic increment: + +{{< clients-example set_tutorial incr >}} + > set total_crashes 0 + OK + > incr total_crashes + (integer) 1 + > incrby total_crashes 10 + (integer) 11 +{{< /clients-example >}} + +The `INCR` command parses the string value as an integer, +increments it by one, and finally sets the obtained value as the new value. +There are other similar commands like `INCRBY`, +`DECR` and `DECRBY`. Internally it's +always the same command, acting in a slightly different way. + +What does it mean that INCR is atomic? +That even multiple clients issuing INCR against +the same key will never enter into a race condition. For instance, it will never +happen that client 1 reads "10", client 2 reads "10" at the same time, both +increment to 11, and set the new value to 11. The final value will always be +12 and the read-increment-set operation is performed while all the other +clients are not executing a command at the same time. + + +## Limits + +By default, a single Redis string can be a maximum of 512 MB. + +## Basic commands + +### Getting and setting Strings + +* `SET` stores a string value. +* `SETNX` stores a string value only if the key doesn't already exist. Useful for implementing locks. +* `GET` retrieves a string value. +* `MGET` retrieves multiple string values in a single operation. + +### Managing counters + +* `INCRBY` atomically increments (and decrements when passing a negative number) counters stored at a given key. +* Another command exists for floating point counters: `INCRBYFLOAT`. + +### Bitwise operations + +To perform bitwise operations on a string, see the [bitmaps data type](/docs/data-types/bitmaps) docs. + +See the [complete list of string commands](/commands/?group=string). + +## Performance + +Most string operations are O(1), which means they're highly efficient. +However, be careful with the `SUBSTR`, `GETRANGE`, and `SETRANGE` commands, which can be O(n). +These random-access string commands may cause performance issues when dealing with large strings. + +## Alternatives + +If you're storing structured data as a serialized string, you may also want to consider Redis [hashes](/docs/data-types/hashes) or [JSON](/docs/stack/json). + +## Learn more + +* [Redis Strings Explained](https://www.youtube.com/watch?v=7CUt4yWeRQE) is a short, comprehensive video explainer on Redis strings. +* [Redis University's RU101](https://university.redis.com/courses/ru101/) covers Redis strings in detail. diff --git a/docs/get-started/_index.md b/docs/get-started/_index.md new file mode 100644 index 0000000000..e94be52b99 --- /dev/null +++ b/docs/get-started/_index.md @@ -0,0 +1,20 @@ +--- +title: "Quick starts" +linkTitle: "Quick starts" +hideListLinks: true +weight: 20 +description: > + Redis quick start guides +aliases: + - /docs/getting-started/ +--- + +Redis can be used as a database, cache, streaming engine, message broker, and more. The following quick start guides will show you how to use Redis for the following specific purposes: + +1. [Data structure store](/docs/get-started/data-store) +2. [Document database](/docs/get-started/document-database) +3. [Vector database](/docs/get-started/vector-database) + +Please select the guide that aligns best with your specific usage scenario. + +You can find answers to frequently asked questions in the [FAQ](/docs/get-started/faq/). diff --git a/docs/get-started/data-store.md b/docs/get-started/data-store.md new file mode 100644 index 0000000000..7c9455a620 --- /dev/null +++ b/docs/get-started/data-store.md @@ -0,0 +1,88 @@ +--- +title: "Redis as an in-memory data structure store quick start guide" +linkTitle: "Data structure store" +weight: 1 +description: Understand how to use basic Redis data types +--- + +This quick start guide shows you how to: + +1. Get started with Redis +2. Store data under a key in Redis +3. Retrieve data with a key from Redis +4. Scan the keyspace for keys that match a specific pattern + +The examples in this article refer to a simple bicycle inventory. + +## Setup + +The easiest way to get started with Redis is to use Redis Cloud: + +1. Create a [free account](https://redis.com/try-free?utm_source=redisio&utm_medium=referral&utm_campaign=2023-09-try_free&utm_content=cu-redis_cloud_users). + + +2. Follow the instructions to create a free database. + +You can alternatively follow the [installation guides](/docs/install/install-stack/) to install Redis on your local machine. + +## Connect + +The first step is to connect to Redis. You can find further details about the connection options in this documentation site's [connection section](/docs/connect). The following example shows how to connect to a Redis server that runs on localhost (`-h 127.0.0.1`) and listens on the default port (`-p 6379`): + +{{< clients-example search_quickstart connect >}} +> redis-cli -h 127.0.0.1 -p 6379 +{{< /clients-example>}} +
+{{% alert title="Tip" color="warning" %}} +You can copy and paste the connection details from the Redis Cloud database configuration page. Here is an example connection string of a Cloud database that is hosted in the AWS region `us-east-1` and listens on port 16379: `redis-16379.c283.us-east-1-4.ec2.cloud.redislabs.com:16379`. The connection string has the format `host:port`. You must also copy and paste the username and password of your Cloud database and then either pass the credentials to your client or use the [AUTH command](/commands/auth/) after the connection is established. +{{% /alert %}} + +## Store and retrieve data + +Redis stands for Remote Dictionary Server. You can use the same data types as in your local programming environment but on the server side within Redis. + +Similar to byte arrays, Redis strings store sequences of bytes, including text, serialized objects, counter values, and binary arrays. The following example shows you how to set and get a string value: + +{{< clients-example set_and_get >}} +SET bike:1 "Process 134" +GET bike:1 +{{< /clients-example >}} + +Hashes are the equivalent of dictionaries (dicts or hash maps). Among other things, you can use hashes to represent plain objects and to store groupings of counters. The following example explains how to set and access field values of an object: + +{{< clients-example hash_tutorial set_get_all >}} +> HSET bike:1 model Deimos brand Ergonom type 'Enduro bikes' price 4972 +(integer) 4 +> HGET bike:1 model +"Deimos" +> HGET bike:1 price +"4972" +> HGETALL bike:1 +1) "model" +2) "Deimos" +3) "brand" +4) "Ergonom" +5) "type" +6) "Enduro bikes" +7) "price" +8) "4972" +{{< /clients-example >}} + +You can get a complete overview of available data types in this documentation site's [data types section](/docs/data-types/). Each data type has commands allowing you to manipulate or retrieve data. The [commands reference](/commands/) provides a sophisticated explanation. + +## Scan the keyspace + +Each item within Redis has a unique key. All items live within the Redis [keyspace](/docs/manual/keyspace/). You can scan the Redis keyspace via the [SCAN command](/commands/scan/). Here is an example that scans for the first 100 keys that have the prefix `bike:`: + +{{< clients-example scan_example >}} +SCAN 0 MATCH "bike:*" COUNT 100 +{{< /clients-example >}} + +[SCAN](/commands/scan/) returns a cursor position, allowing you to scan iteratively for the next batch of keys until you reach the cursor value 0. + +## Next steps + +You can address more use cases with Redis by learning about Redis Stack. Here are two additional quick start guides: + +* [Redis as a document database](/docs/get-started/document-database/) +* [Redis as a vector database](/docs/get-started/vector-database/) \ No newline at end of file diff --git a/docs/get-started/data/bikes.json b/docs/get-started/data/bikes.json new file mode 100644 index 0000000000..f8186ec93c --- /dev/null +++ b/docs/get-started/data/bikes.json @@ -0,0 +1,127 @@ +[ + { + "model": "Jigger", + "brand": "Velorim", + "price": 270, + "type": "Kids bikes", + "specs": { + "material": "aluminium", + "weight": "10" + }, + "description": "Small and powerful, the Jigger is the best ride for the smallest of tikes! This is the tiniest kids’ pedal bike on the market available without a coaster brake, the Jigger is the vehicle of choice for the rare tenacious little rider raring to go. We say rare because this smokin’ little bike is not ideal for a nervous first-time rider, but it’s a true giddy up for a true speedster. The Jigger is a 12 inch lightweight kids bicycle and it will meet your little one’s need for speed. It’s a single speed bike that makes learning to pump pedals simple and intuitive. It even has a handle in the bottom of the saddle so you can easily help your child during training! The Jigger is among the most lightweight children’s bikes on the planet. It is designed so that 2-3 year-olds fit comfortably in a molded ride position that allows for efficient riding, balanced handling and agility. The Jigger’s frame design and gears work together so your buddingbiker can stand up out of the seat, stop rapidly, rip over trails and pump tracks. The Jigger’s is amazing on dirt or pavement. Your tike will speed down the bike path in no time. The Jigger will ship with a coaster brake. A freewheel kit is provided at no cost. " + }, + { + "model": "Hillcraft", + "brand": "Bicyk", + "price": 1200, + "type": "Kids Mountain Bikes", + "specs": { + "material": "carbon", + "weight": "11" + }, + "description": "Kids want to ride with as little weight as possible. Especially on an incline! They may be at the age when a 27.5\" wheel bike is just too clumsy coming off a 24\" bike. The Hillcraft 26 is just the solution they need! Imagine 120mm travel. Boost front/rear. You have NOTHING to tweak because it is easy to assemble right out of the box. The Hillcraft 26 is an efficient trail trekking machine. Up or down does not matter - dominate the trails going both down and up with this amazing bike. The name Monarch comes from Monarch trail in Colorado where we love to ride. It’s a highly technical, steep and rocky trail but the rip on the waydown is so fulfilling. Don’t ride the trail on a hardtail! It is so much more fun on the full suspension Hillcraft! Hit your local trail with the Hillcraft Monarch 26 to get to where you want to go. " + }, + { + "model": "Chook air 5", + "brand": "Nord", + "price": 815, + "type": "Kids Mountain Bikes", + "specs": { + "material": "alloy", + "weight": "9.1" + }, + "description": "The Chook Air 5 gives kids aged six years and older a durable and uberlight mountain bike for their first experience on tracks and easy cruising through forests and fields. The lower top tube makes it easy to mount and dismount in any situation, giving your kids greater safety on the trails. The Chook Air 5 is the perfect intro to mountain biking." + }, + { + "model": "Eva 291", + "brand": "Eva", + "price": 3400, + "type": "Mountain Bikes", + "specs": { + "material": "carbon", + "weight": "9.1" + }, + "description": "The sister company to Nord, Eva launched in 2005 as the first and only women-dedicated bicycle brand. Designed by women for women, allEva bikes are optimized for the feminine physique using analytics from a body metrics database. If you like 29ers, try the Eva 291. It’s a brand new bike for 2022.. This full-suspension, cross-country ride has been designed for velocity. The 291 has 100mm of front and rear travel, a superlight aluminum frame and fast-rolling 29-inch wheels. Yippee!" + }, + { + "model": "Kahuna", + "brand": "Noka Bikes", + "price": 3200, + "type": "Mountain Bikes", + "specs": { + "material": "alloy", + "weight": "9.8" + }, + "description": "Whether you want to try your hand at XC racing or are looking for a lively trail bike that's just as inspiring on the climbs as it is over rougher ground, the Wilder is one heck of a bike built specifically for short women. Both the frames and components have been tweaked to include a women’s saddle, different bars and unique colourway." + }, + { + "model": "XBN 2.1 Alloy", + "brand": "Breakout", + "price": 810, + "type": "Road Bikes", + "specs": { + "material": "alloy", + "weight": "7.2" + }, + "description": "The XBN 2.1 Alloy is our entry-level road bike – but that’s not to say that it’s a basic machine. With an internal weld aluminium frame, a full carbon fork, and the slick-shifting Claris gears from Shimano’s, this is a bike which doesn’t break the bank and delivers craved performance. The 6061 alloy frame is triple-butted which ensures a lighter weight and smoother ride. And it’s comfortable with dropped seat stays and the carbon fork. The carefully crafted 50-34 tooth chainset and 11-32 tooth cassette give an easy-on-the-legs bottom gear for climbing, and the high-quality Vittoria Zaffiro tires balance grip, rolling friction and puncture protection when coasting down the other side. " + }, + { + "model": "WattBike", + "brand": "ScramBikes", + "price": 2300, + "type": "eBikes", + "specs": { + "material": "alloy", + "weight": "15" + }, + "description": "The WattBike is the best e-bike for people who still feel young at heart. It has a Bafang 500 watt geared hub motor that can reach 20 miles per hour on both steep inclines and city streets. The lithium-ion battery, which gets nearly 40 miles per charge, has a lightweight form factor, making it easier for seniors to use. It comes fully assembled (no convoluted instructions!) and includes a sturdy helmet at no cost. The Plush saddle softens over time with use. The included Seatpost, however, is easily adjustable and adds to this bike’s fantastic rating for seniors, as do the hydraulic disc brakes from Tektro. " + }, + { + "model": "Soothe Electric bike", + "brand": "Peaknetic", + "price": 1950, + "type": "eBikes", + "specs": { + "material": "alloy", + "weight": "14.7" + }, + "description": "The Soothe is an everyday electric bike, from the makers of Exercycle bikes, that conveys style while you get around the city. The Soothe lives up to its name by keeping your posture upright and relaxed for the ride ahead, keeping those aches and pains from riding at bay. It includes a low-step frame , our memory foam seat, bump-resistant shocks and conveniently placed thumb throttle. " + }, + { + "model": "Secto", + "brand": "Peaknetic", + "price": 430, + "type": "Commuter bikes", + "specs": { + "material": "aluminium", + "weight": "10.0" + }, + "description": "If you struggle with stiff fingers or a kinked neck or back after a few minutes on the road, this lightweight, aluminum bike alleviates those issues and allows you to enjoy the ride. From the ergonomic grips to the lumbar-supporting seat position, the Roll Low-Entry offers incredible comfort. The rear-inclined seat tube facilitates stability by allowing you to put a foot on the ground to balance at a stop, and the low step-over frame makes it accessible for all ability and mobility levels. The saddle is very soft, with a wide back to support your hip joints and a cutout in the center to redistribute that pressure. Rim brakes deliver satisfactory braking control, and the wide tires provide a smooth, stable ride on paved roads and gravel. Rack and fender mounts facilitate setting up the Roll Low-Entry as your preferred commuter, and the BMX-like handlebar offers space for mounting a flashlight, bell, or phone holder." + }, + { + "model": "Summit", + "brand": "nHill", + "price": 1200, + "type": "Mountain Bike", + "specs": { + "material": "alloy", + "weight": "11.3" + }, + "description": "This budget mountain bike from nHill performs well both on bike paths and on the trail. The fork with 100mm of travel absorbs rough terrain. Fat Kenda Booster tires give you grip in corners and on wet trails. The Shimano Tourney drivetrain offered enough gears for finding a comfortable pace to ride uphill, and the Tektro hydraulic disc brakes break smoothly. Whether you want an affordable bike that you can take to work, but also take trail riding on the weekends or you’re just after a stable, comfortable ride for the bike path, the Summit gives a good value for money." + }, + { + "model": "ThrillCycle", + "brand": "BikeShind", + "price": 815, + "type": "Commuter Bikes", + "specs": { + "material": "alloy", + "weight": "12.7" + }, + "description": "An artsy, retro-inspired bicycle that’s as functional as it is pretty: The ThrillCycle steel frame offers a smooth ride. A 9-speed drivetrain has enough gears for coasting in the city, but we wouldn’t suggest taking it to the mountains. Fenders protect you from mud, and a rear basket lets you transport groceries, flowers and books. The ThrillCycle comes with a limited lifetime warranty, so this little guy will last you long past graduation." + } + + + + +] diff --git a/docs/getting-started/faq.md b/docs/get-started/faq.md similarity index 95% rename from docs/getting-started/faq.md rename to docs/get-started/faq.md index c9183b14c8..0c8531ac97 100644 --- a/docs/getting-started/faq.md +++ b/docs/get-started/faq.md @@ -63,10 +63,9 @@ and not refreshed on cache misses. ## How can I reduce Redis' overall memory usage? -If you can, use Redis 32 bit instances. Also make good use of small hashes, -lists, sorted sets, and sets of integers, since Redis is able to represent -those data types in the special case of a few elements in a much more compact -way. There is more info in the [Memory Optimization page](/topics/memory-optimization). +A good practice is to consider memory consumption when mapping your logical data model to the physical data model within Redis. These considerations include using specific data types, key patterns, and normalization. + +Beyond data modeling, there is more info in the [Memory Optimization page](/topics/memory-optimization). ## What happens if Redis runs out of memory? @@ -77,7 +76,7 @@ with an error to write commands (but will continue to accept read-only commands). You can also configure Redis to evict keys when the max memory limit -is reached. See the [eviction policy docs] for more information on this. +is reached. See the [eviction policy docs](/docs/manual/eviction/) for more information on this. ## Background saving fails with a fork() error on Linux? diff --git a/docs/get-started/img/free-cloud-db.png b/docs/get-started/img/free-cloud-db.png new file mode 100644 index 0000000000..3336f3353a Binary files /dev/null and b/docs/get-started/img/free-cloud-db.png differ diff --git a/docs/getting-started/_index.md b/docs/getting-started/_index.md deleted file mode 100644 index b3c38f3481..0000000000 --- a/docs/getting-started/_index.md +++ /dev/null @@ -1,161 +0,0 @@ ---- -title: "Getting started with Redis" -linkTitle: "Getting started" -weight: 1 -description: > - How to get up and running with Redis -aliases: - - /docs/getting-started/tutorial ---- - -This is a guide to getting started with Redis. You'll learn how to install, run, and experiment with the Redis server process. - -## Install Redis - -How you install Redis depends on your operating system. See the guide below that best fits your needs: - -* [Install Redis from Source]({{< ref "/docs/getting-started/installation/install-redis-from-source.md" >}}) -* [Install Redis on Linux]({{< ref "/docs/getting-started/installation/install-redis-on-linux.md" >}}) -* [Install Redis on macOS]({{< ref "/docs/getting-started/installation/install-redis-on-mac-os.md" >}}) -* [Install Redis on Windows]({{< ref "/docs/getting-started/installation/install-redis-on-windows.md" >}}) - -Once you have Redis up and running, and can connect using `redis-cli`, you can continue with the steps below. - -## Exploring Redis with the CLI - -External programs talk to Redis using a TCP socket and a Redis specific protocol. This protocol is implemented in the Redis client libraries for the different programming languages. However to make hacking with Redis simpler Redis provides a command line utility that can be used to send commands to Redis. This program is called **redis-cli**. - -The first thing to do in order to check if Redis is working properly is sending a **PING** command using redis-cli: - - $ redis-cli ping - PONG - -Running **redis-cli** followed by a command name and its arguments will send this command to the Redis instance running on localhost at port 6379. You can change the host and port used by `redis-cli` - just try the `--help` option to check the usage information. - -Another interesting way to run `redis-cli` is without arguments: the program will start in interactive mode. You can type different commands and see their replies. - - $ redis-cli - redis 127.0.0.1:6379> ping - PONG - redis 127.0.0.1:6379> set mykey somevalue - OK - redis 127.0.0.1:6379> get mykey - "somevalue" - -At this point you are able to talk with Redis. It is the right time to pause a bit with this tutorial and start the [fifteen minutes introduction to Redis data types](https://redis.io/topics/data-types-intro) in order to learn a few Redis commands. Otherwise if you already know a few basic Redis commands you can keep reading. - -Securing Redis -=== - -By default Redis binds to **all the interfaces** and has no authentication at -all. If you use Redis in a very controlled environment, separated from the -external internet and in general from attackers, that's fine. However if an unhardened Redis -is exposed to the internet, it is a big security concern. If you are not 100% sure your environment is secured properly, please check the following steps in order to make Redis more secure, which are enlisted in order of increased security. - -1. Make sure the port Redis uses to listen for connections (by default 6379 and additionally 16379 if you run Redis in cluster mode, plus 26379 for Sentinel) is firewalled, so that it is not possible to contact Redis from the outside world. -2. Use a configuration file where the `bind` directive is set in order to guarantee that Redis listens on only the network interfaces you are using. For example only the loopback interface (127.0.0.1) if you are accessing Redis just locally from the same computer, and so forth. -3. Use the `requirepass` option in order to add an additional layer of security so that clients will require to authenticate using the `AUTH` command. -4. Use [spiped](http://www.tarsnap.com/spiped.html) or another SSL tunneling software in order to encrypt traffic between Redis servers and Redis clients if your environment requires encryption. - -Note that a Redis instance exposed to the internet without any security [is very simple to exploit](http://antirez.com/news/96), so make sure you understand the above and apply **at least** a firewall layer. After the firewall is in place, try to connect with `redis-cli` from an external host in order to prove yourself the instance is actually not reachable. - -Using Redis from your application -=== - -Of course using Redis just from the command line interface is not enough as -the goal is to use it from your application. In order to do so you need to -download and install a Redis client library for your programming language. -You'll find a [full list of clients for different languages in this page](https://redis.io/clients). - -For instance if you happen to use the Ruby programming language our best advice -is to use the [Redis-rb](https://github.com/redis/redis-rb) client. -You can install it using the command **gem install redis**. - -These instructions are Ruby specific but actually many library clients for -popular languages look quite similar: you create a Redis object and execute -commands calling methods. A short interactive example using Ruby: - - >> require 'rubygems' - => false - >> require 'redis' - => true - >> r = Redis.new - => # - >> r.ping - => "PONG" - >> r.set('foo','bar') - => "OK" - >> r.get('foo') - => "bar" - -Redis persistence -================= - -You can learn [how Redis persistence works on this page](https://redis.io/topics/persistence), however what is important to understand for a quick start is that by default, if you start Redis with the default configuration, Redis will spontaneously save the dataset only from time to time (for instance after at least five minutes if you have at least 100 changes in your data), so if you want your database to persist and be reloaded after a restart make sure to call the **SAVE** command manually every time you want to force a data set snapshot. Otherwise make sure to shutdown the database using the **SHUTDOWN** command: - - $ redis-cli shutdown - -This way Redis will make sure to save the data on disk before quitting. -Reading the [persistence page](https://redis.io/topics/persistence) is strongly suggested in order to better understand how Redis persistence works. - -Installing Redis more properly -============================== - -Running Redis from the command line is fine just to hack a bit or for development. However, at some point you'll have some actual application to run on a real server. For this kind of usage you have two different choices: - -* Run Redis using screen. -* Install Redis in your Linux box in a proper way using an init script, so that after a restart everything will start again properly. - -A proper install using an init script is strongly suggested. -The following instructions can be used to perform a proper installation using the init script shipped with Redis version 2.4 or higher in a Debian or Ubuntu based distribution. - -We assume you already copied **redis-server** and **redis-cli** executables under /usr/local/bin. - -* Create a directory in which to store your Redis config files and your data: - - sudo mkdir /etc/redis - sudo mkdir /var/redis - -* Copy the init script that you'll find in the Redis distribution under the **utils** directory into `/etc/init.d`. We suggest calling it with the name of the port where you are running this instance of Redis. For example: - - sudo cp utils/redis_init_script /etc/init.d/redis_6379 - -* Edit the init script. - - sudo vi /etc/init.d/redis_6379 - -Make sure to modify **REDISPORT** accordingly to the port you are using. -Both the pid file path and the configuration file name depend on the port number. - -* Copy the template configuration file you'll find in the root directory of the Redis distribution into `/etc/redis/` using the port number as name, for instance: - - sudo cp redis.conf /etc/redis/6379.conf - -* Create a directory inside `/var/redis` that will work as data and working directory for this Redis instance: - - sudo mkdir /var/redis/6379 - -* Edit the configuration file, making sure to perform the following changes: - * Set **daemonize** to yes (by default it is set to no). - * Set the **pidfile** to `/var/run/redis_6379.pid` (modify the port if needed). - * Change the **port** accordingly. In our example it is not needed as the default port is already 6379. - * Set your preferred **loglevel**. - * Set the **logfile** to `/var/log/redis_6379.log` - * Set the **dir** to `/var/redis/6379` (very important step!) -* Finally add the new Redis init script to all the default runlevels using the following command: - - sudo update-rc.d redis_6379 defaults - -You are done! Now you can try running your instance with: - - sudo /etc/init.d/redis_6379 start - -Make sure that everything is working as expected: - -* Try pinging your instance with redis-cli. -* Do a test save with `redis-cli save` and check that the dump file is correctly stored into `/var/redis/6379/` (you should find a file called `dump.rdb`). -* Check that your Redis instance is correctly logging in the log file. -* If it's a new machine where you can try it without problems make sure that after a reboot everything is still working. - -Note: In the above instructions we skipped many Redis configuration parameters that you would like to change, for instance in order to use AOF persistence instead of RDB persistence, or to setup replication, and so forth. -Make sure to read the example [`redis.conf`](https://github.com/redis/redis/blob/6.2/redis.conf) file (that is heavily commented) and the other documentation you can find in this web site for more information. diff --git a/docs/getting-started/installation/_index.md b/docs/getting-started/installation/_index.md deleted file mode 100644 index aa07f2716a..0000000000 --- a/docs/getting-started/installation/_index.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -title: "Installing Redis" -linkTitle: "Install" -weight: 1 -description: > - Install Redis on Linux, macOS, and Windows ---- diff --git a/docs/install/_index.md b/docs/install/_index.md new file mode 100644 index 0000000000..4685cefe7e --- /dev/null +++ b/docs/install/_index.md @@ -0,0 +1,18 @@ +--- +title: "Install Redis or Redis Stack" +linkTitle: "Install" +weight: 30 +hideListLinks: true +description: How to install your preferred Redis flavor on your target platform +aliases: + - /docs/getting-started +--- + +You can install [Redis](https://redis.io/docs/about/) or [Redis Stack](/docs/about/about-stack) locally on your machine. Redis and Redis Stack are available on Linux, macOS, and Windows. + +Here are the installation instructions: + +* [Install Redis](/docs/install/install-redis) +* [Install Redis Stack](/docs/install/install-stack) + +While you can install Redis (Stack) locally, you might also consider using Redis Cloud by creating a [free account](https://redis.com/try-free/?utm_source=redisio&utm_medium=referral&utm_campaign=2023-09-try_free&utm_content=cu-redis_cloud_users). diff --git a/docs/install/install-redis/_index.md b/docs/install/install-redis/_index.md new file mode 100644 index 0000000000..e316a5565b --- /dev/null +++ b/docs/install/install-redis/_index.md @@ -0,0 +1,166 @@ +--- +title: "Install Redis" +linkTitle: "Install Redis" +weight: 1 +description: > + Install Redis on Linux, macOS, and Windows +aliases: +- /docs/getting-started/installation +- /docs/getting-started/tutorial +--- + +This is a an installation guide. You'll learn how to install, run, and experiment with the Redis server process. + +While you can install Redis on any of the platforms listed below, you might also consider using Redis Cloud by creating a [free account](https://redis.com/try-free?utm_source=redisio&utm_medium=referral&utm_campaign=2023-09-try_free&utm_content=cu-redis_cloud_users). + +## Install Redis + +How you install Redis depends on your operating system and whether you'd like to install it bundled with Redis Stack and Redis UI. See the guide below that best fits your needs: + +* [Install Redis from Source](/docs/install/install-redis/install-redis-from-source) +* [Install Redis on Linux](/docs/install/install-redis/install-redis-on-linux) +* [Install Redis on macOS](/docs/install/install-redis/install-redis-on-mac-os) +* [Install Redis on Windows](/docs/install/install-redis/install-redis-on-windows) +* [Install Redis with Redis Stack and RedisInsight](/docs/install/install-stack/) + +Refer to [Redis Administration](/docs/management/admin/) for detailed setup tips. + +## Test if you can connect using the CLI + +After you have Redis up and running, you can connect using `redis-cli`. + +External programs talk to Redis using a TCP socket and a Redis specific protocol. This protocol is implemented in the Redis client libraries for the different programming languages. However, to make hacking with Redis simpler, Redis provides a command line utility that can be used to send commands to Redis. This program is called **redis-cli**. + +The first thing to do to check if Redis is working properly is sending a **PING** command using redis-cli: + +``` +$ redis-cli ping +PONG +``` + +Running **redis-cli** followed by a command name and its arguments will send this command to the Redis instance running on localhost at port 6379. You can change the host and port used by `redis-cli` - just try the `--help` option to check the usage information. + +Another interesting way to run `redis-cli` is without arguments: the program will start in interactive mode. You can type different commands and see their replies. + +``` +$ redis-cli +redis 127.0.0.1:6379> ping +PONG +``` + +## Securing Redis + +By default Redis binds to **all the interfaces** and has no authentication at all. If you use Redis in a very controlled environment, separated from the external internet and in general from attackers, that's fine. However, if an unhardened Redis is exposed to the internet, it is a big security concern. If you are not 100% sure your environment is secured properly, please check the following steps in order to make Redis more secure: + +1. Make sure the port Redis uses to listen for connections (by default 6379 and additionally 16379 if you run Redis in cluster mode, plus 26379 for Sentinel) is firewalled, so that it is not possible to contact Redis from the outside world. +2. Use a configuration file where the `bind` directive is set in order to guarantee that Redis listens on only the network interfaces you are using. For example, only the loopback interface (127.0.0.1) if you are accessing Redis locally from the same computer. +3. Use the `requirepass` option to add an additional layer of security so that clients will be required to authenticate using the `AUTH` command. +4. Use [spiped](http://www.tarsnap.com/spiped.html) or another SSL tunneling software to encrypt traffic between Redis servers and Redis clients if your environment requires encryption. + +Note that a Redis instance exposed to the internet without any security [is very simple to exploit](http://antirez.com/news/96), so make sure you understand the above and apply **at least** a firewall layer. After the firewall is in place, try to connect with `redis-cli` from an external host to confirm that the instance is not reachable. + +## Use Redis from your application + +Of course using Redis just from the command line interface is not enough as the goal is to use it from your application. To do so, you need to download and install a Redis client library for your programming language. + +You'll find a [full list of clients for different languages in this page](/clients). + + +## Redis persistence + +You can learn [how Redis persistence works on this page](/docs/management/persistence/). It is important to understand that, if you start Redis with the default configuration, Redis will spontaneously save the dataset only from time to time. For example, after at least five minutes if you have at least 100 changes in your data. If you want your database to persist and be reloaded after a restart make sure to call the **SAVE** command manually every time you want to force a data set snapshot. Alternatively, you can save the data on disk before quitting by using the **SHUTDOWN** command: + +``` +$ redis-cli shutdown +``` + +This way, Redis will save the data on disk before quitting. Reading the [persistence page](/docs/management/persistence/) is strongly suggested to better understand how Redis persistence works. + +## Install Redis properly + +Running Redis from the command line is fine just to hack a bit or for development. However, at some point you'll have some actual application to run on a real server. For this kind of usage you have two different choices: + +* Run Redis using screen. +* Install Redis in your Linux box in a proper way using an init script, so that after a restart everything will start again properly. + +A proper install using an init script is strongly recommended. + +{{% alert title="Note" color="warning" %}} +The available packages for supported Linux distributions already include the capability of starting the Redis server from `/etc/init`. +{{% /alert %}} + +{{% alert title="Note" color="warning" %}} +The remainder of this section assumes you've [installed Redis from its source code](/docs/install/install-redis/install-redis-from-source). If instead you have installed Redis Stack, you will need to download a [basic init script](https://raw.githubusercontent.com/redis/redis/7.2/utils/redis_init_script) and then modify both it and the following instructions to conform to the way Redis Stack was installed on your platform. For example, on Ubuntu 20.04 LTS, Redis Stack is installed in `/opt/redis-stack`, not `/usr/local`, so you'll need to adjust accordingly. +{{% /alert %}} + +The following instructions can be used to perform a proper installation using the init script shipped with the Redis source code, `/path/to/redis-stable/utils/redis_init_script`. + +If you have not yet run `make install` after building the Redis source, you will need to do so before continuing. By default, `make install` will copy the `redis-server` and `redis-cli` binaries to `/usr/local/bin`. + +* Create a directory in which to store your Redis config files and your data: + + ``` + sudo mkdir /etc/redis + sudo mkdir /var/redis + ``` + +* Copy the init script that you'll find in the Redis distribution under the **utils** directory into `/etc/init.d`. We suggest calling it with the name of the port where you are running this instance of Redis. Make sure the resulting file has `0755` permissions. + + ``` + sudo cp utils/redis_init_script /etc/init.d/redis_6379 + ``` + +* Edit the init script. + + ``` + sudo vi /etc/init.d/redis_6379 + ``` + +Make sure to set the **REDISPORT** variable to the port you are using. +Both the pid file path and the configuration file name depend on the port number. + +* Copy the template configuration file you'll find in the root directory of the Redis distribution into `/etc/redis/` using the port number as the name, for instance: + + ``` + sudo cp redis.conf /etc/redis/6379.conf + ``` + +* Create a directory inside `/var/redis` that will work as both data and working directory for this Redis instance: + + ``` + sudo mkdir /var/redis/6379 + ``` + +* Edit the configuration file, making sure to perform the following changes: + * Set **daemonize** to yes (by default it is set to no). + * Set the **pidfile** to `/var/run/redis_6379.pid`, modifying the port as necessary. + * Change the **port** accordingly. In our example it is not needed as the default port is already `6379`. + * Set your preferred **loglevel**. + * Set the **logfile** to `/var/log/redis_6379.log`. + * Set the **dir** to `/var/redis/6379` (very important step!). +* Finally, add the new Redis init script to all the default runlevels using the following command: + + ``` + sudo update-rc.d redis_6379 defaults + ``` + +You are done! Now you can try running your instance with: + +``` +sudo /etc/init.d/redis_6379 start +``` + +Make sure that everything is working as expected: + +1. Try pinging your instance within a `redis-cli` session using the `PING` command. +2. Do a test save with `redis-cli save` and check that a dump file is correctly saved to `/var/redis/6379/dump.rdb`. +3. Check that your Redis instance is logging to the `/var/log/redis_6379.log` file. +4. If it's a new machine where you can try it without problems, make sure that after a reboot everything is still working. + +{{% alert title="Note" color="warning" %}} +The above instructions don't include all of the Redis configuration parameters that you could change. For example, to use AOF persistence instead of RDB persistence, or to set up replication, and so forth. +{{% /alert %}} + +You should also read the example [redis.conf](/docs/management/config-file/) file, which is heavily annotated to help guide you on making changes. Further details can also be found in the [configuration article on this site](/docs/management/config/). + +
diff --git a/docs/getting-started/installation/install-redis-from-source.md b/docs/install/install-redis/install-redis-from-source.md similarity index 62% rename from docs/getting-started/installation/install-redis-from-source.md rename to docs/install/install-redis/install-redis-from-source.md index 769d84b4ca..f910965350 100644 --- a/docs/getting-started/installation/install-redis-from-source.md +++ b/docs/install/install-redis/install-redis-from-source.md @@ -1,16 +1,18 @@ --- title: "Install Redis from Source" -linkTitle: "Install from Source" +linkTitle: "Source code" weight: 5 description: > Compile and install Redis from source +aliases: +- /docs/getting-started/installation/install-redis-from-source --- You can compile and install Redis from source on variety of platforms and operating systems including Linux and macOS. Redis has no dependencies other than a C compiler and `libc`. ## Downloading the source files -The Redis source files are available on [this site's Download page]. You can verify the integrity of these downloads by checking them against the digests in the [redis-hashes git repository](https://github.com/redis/redis-hashes). +The Redis source files are available from the [Download](/download) page. You can verify the integrity of these downloads by checking them against the digests in the [redis-hashes git repository](https://github.com/redis/redis-hashes). To obtain the source files for the latest stable version of Redis from the Redis downloads site, run: @@ -20,7 +22,7 @@ wget https://download.redis.io/redis-stable.tar.gz ## Compiling Redis -To compile Redis, first the tarball, change to the root directory, and then run `make`: +To compile Redis, first extract the tarball, change to the root directory, and then run `make`: {{< highlight bash >}} tar -xzvf redis-stable.tar.gz @@ -28,6 +30,12 @@ cd redis-stable make {{< / highlight >}} +To build with TLS support, you'll need to install OpenSSL development libraries (e.g., libssl-dev on Debian/Ubuntu) and then run: + +{{< highlight bash >}} +make BUILD_TLS=yes +{{< / highlight >}} + If the compile succeeds, you'll find several Redis binaries in the `src` directory, including: * **redis-server**: the Redis Server itself @@ -36,7 +44,7 @@ If the compile succeeds, you'll find several Redis binaries in the `src` directo To install these binaries in `/usr/local/bin`, run: {{< highlight bash >}} -make install +sudo make install {{< / highlight >}} ### Starting and stopping Redis in the foreground @@ -50,3 +58,5 @@ redis-server If successful, you'll see the startup logs for Redis, and Redis will be running in the foreground. To stop Redis, enter `Ctrl-C`. + +For a more complete installation, continue with [these instructions](/docs/install/#install-redis-more-properly). diff --git a/docs/getting-started/installation/install-redis-on-linux.md b/docs/install/install-redis/install-redis-on-linux.md similarity index 60% rename from docs/getting-started/installation/install-redis-on-linux.md rename to docs/install/install-redis/install-redis-on-linux.md index 95f1c07e7b..2e47efe945 100644 --- a/docs/getting-started/installation/install-redis-on-linux.md +++ b/docs/install/install-redis/install-redis-on-linux.md @@ -1,9 +1,11 @@ --- title: "Install Redis on Linux" -linkTitle: "Install on Linux" +linkTitle: "Linux" weight: 1 description: > - How to install Redis on Ubuntu, RHEL, and CentOS + How to install Redis on Linux +aliases: +- /docs/getting-started/installation/install-redis-on-linux --- Most major Linux distributions provide packages for Redis. @@ -11,6 +13,15 @@ Most major Linux distributions provide packages for Redis. ## Install on Ubuntu/Debian You can install recent stable versions of Redis from the official `packages.redis.io` APT repository. + +{{% alert title="Prerequisites" color="warning" %}} +If you're running a very minimal distribution (such as a Docker container) you may need to install `lsb-release`, `curl` and `gpg` first: + +{{< highlight bash >}} +sudo apt install lsb-release curl gpg +{{< / highlight >}} +{{% /alert %}} + Add the repository to the apt index, update it, and then install: {{< highlight bash >}} @@ -25,6 +36,7 @@ sudo apt-get install redis ## Install from Snapcraft The [Snapcraft store](https://snapcraft.io/store) provides [Redis packages](https://snapcraft.io/redis) that can be installed on platforms that support snap. +Snap is supported and available on most major Linux distributions. To install via snap, run: @@ -32,4 +44,4 @@ To install via snap, run: sudo snap install redis {{< / highlight >}} -If your Linux does not currently have snap installed, you may install it by following the instructions described in [Installing snapd](https://snapcraft.io/docs/installing-snapd). +If your Linux does not currently have snap installed, install it using the instructions described in [Installing snapd](https://snapcraft.io/docs/installing-snapd). diff --git a/docs/getting-started/installation/install-redis-on-mac-os.md b/docs/install/install-redis/install-redis-on-mac-os.md similarity index 92% rename from docs/getting-started/installation/install-redis-on-mac-os.md rename to docs/install/install-redis/install-redis-on-mac-os.md index 71d9ca2c92..f2bd70cfab 100644 --- a/docs/getting-started/installation/install-redis-on-mac-os.md +++ b/docs/install/install-redis/install-redis-on-mac-os.md @@ -1,18 +1,20 @@ --- title: "Install Redis on macOS" -linkTitle: "Install on macOS" +linkTitle: "MacOS" weight: 1 description: Use Homebrew to install and start Redis on macOS +aliases: +- /docs/getting-started/installation/install-redis-on-mac-os --- -This guide shows you how to install Redis on macOS using Homebrew. Homebrew is the easiest way to install Redis on macOS. If you'd prefer to build Redis from the source files on macOS, see [Installing Redis from Source]. +This guide shows you how to install Redis on macOS using Homebrew. Homebrew is the easiest way to install Redis on macOS. If you'd prefer to build Redis from the source files on macOS, see [Installing Redis from Source](/docs/install/install-redis/install-redis-from-source). ## Prerequisites First, make sure you have Homebrew installed. From the terminal, run: {{< highlight bash >}} -$ brew --version +brew --version {{< / highlight >}} If this command fails, you'll need to [follow the Homebrew installation instructions](https://brew.sh/). diff --git a/docs/getting-started/installation/install-redis-on-windows.md b/docs/install/install-redis/install-redis-on-windows.md similarity index 80% rename from docs/getting-started/installation/install-redis-on-windows.md rename to docs/install/install-redis/install-redis-on-windows.md index ef8495cfa3..087f3ba119 100644 --- a/docs/getting-started/installation/install-redis-on-windows.md +++ b/docs/install/install-redis/install-redis-on-windows.md @@ -1,11 +1,13 @@ --- title: "Install Redis on Windows" -linkTitle: "Install on Windows" +linkTitle: "Windows" weight: 1 description: Use Redis on Windows for development +aliases: +- /docs/getting-started/installation/install-redis-on-windows/ --- -Redis is not officially supported on Windows. However, you can install Redis on Windows for development by the following the instructions below. +Redis is not officially supported on Windows. However, you can install Redis on Windows for development by following the instructions below. To install Redis on Windows, you'll first need to enable [WSL2](https://docs.microsoft.com/en-us/windows/wsl/install) (Windows Subsystem for Linux). WSL2 lets you run Linux binaries natively on Windows. For this method to work, you'll need to be running Windows 10 version 2004 and higher or Windows 11. @@ -15,7 +17,7 @@ Microsoft provides [detailed instructions for installing WSL](https://docs.micro ## Install Redis -Once you're running Ubuntu on Windows, you can follow the steps detailed at [Install on Ubuntu/Debian](install-redis-on-linux#install-on-ubuntu-debian) to install recent stable versions of Redis from the official `packages.redis.io` APT repository. +Once you're running Ubuntu on Windows, you can follow the steps detailed at [Install on Ubuntu/Debian](/docs/install/install-redis/install-redis-on-linux#install-on-ubuntu-debian) to install recent stable versions of Redis from the official `packages.redis.io` APT repository. Add the repository to the apt index, update it, and then install: {{< highlight bash >}} diff --git a/docs/install/install-redisinsight/_index.md b/docs/install/install-redisinsight/_index.md new file mode 100644 index 0000000000..a45a4defcf --- /dev/null +++ b/docs/install/install-redisinsight/_index.md @@ -0,0 +1,9 @@ +--- +title: "Install RedisInsight" +linkTitle: "Install RedisInsight" +weight: 3 +description: > + Install RedisInsite on AWS, Docker, and Kubernetes +--- + +This is a an installation guide. You'll learn how to install RedisInsight on Amazon Web Services (AWS), Docker, and Kubernetes. \ No newline at end of file diff --git a/docs/install/install-redisinsight/env-variables.md b/docs/install/install-redisinsight/env-variables.md new file mode 100644 index 0000000000..dbf115bcff --- /dev/null +++ b/docs/install/install-redisinsight/env-variables.md @@ -0,0 +1,19 @@ +--- +title: "Environment variables" +linkTitle: "Environment variables" +weight: 1 +description: > + RedisInsight supported environment variables +--- +You can configure RedisInsight with the following environment variables. + +| Variable | Purpose | Default | Additional info | +| --- | --- | --- | --- | +| RI_APP_PORT | The port that RedisInsight listens on |
  • Docker: 5540
  • desktop: 5530
| See [Express Documentation](https://expressjs.com/en/api.html#app.listen)| +| RI_APP_HOST | The host that RedisInsight connects to |
  • Docker: 0.0.0.0
  • desktop: 127.0.0.1
| See [Express Documentation](https://expressjs.com/en/api.html#app.listen)| +| RI_SERVER_TLS_KEY | Private key for HTTPS | n/a | Private key in [PEM format](https://www.ssl.com/guide/pem-der-crt-and-cer-x-509-encodings-and-conversions/#ftoc-heading-3). Can be a path to a file or a string in PEM format.| +| RI_SERVER_TLS_CERT | Certificate for supplied private key | n/a | Public certificate in [PEM format](https://www.ssl.com/guide/pem-der-crt-and-cer-x-509-encodings-and-conversions/#ftoc-heading-3). Can be a path to a file or a string in PEM format.| +| RI_ENCRYPTION_KEY | Key to encrypt data with | n/a | Available only for Docker.
Redisinsight stores sensitive information (database passwords, Workbench history, etc.) locally (using [sqlite3](https://github.com/TryGhost/node-sqlite3)). This variable allows you to store sensitive information encrypted using the specified encryption key.
Note: The same encryption key should be provided for subsequent `docker run` commands with the same volume attached to decrypt the information. | +| RI_LOG_LEVEL | Configures the log level of the application. | `info` | Supported logging levels are prioritized from highest to lowest:
  • error
  • warn
  • info
  • http
  • verbose
  • debug
  • silly
| +| RI_FILES_LOGGER | Log to file | `true` | By default, you can find log files in the following folders:
  • Docker: `/data/logs`
  • desktop: `/.refisinsight-app/logs`
| +| RI_STDOUT_LOGGER | Log to STDOUT | `true` | | diff --git a/docs/install/install-redisinsight/install-on-aws.md b/docs/install/install-redisinsight/install-on-aws.md new file mode 100644 index 0000000000..93fe59299e --- /dev/null +++ b/docs/install/install-redisinsight/install-on-aws.md @@ -0,0 +1,92 @@ +--- +title: "Install on AWS EC2" +linkTitle: "Install on AWS EC2" +weight: 3 +description: > + How to install RedisInsight on AWS EC2 +--- +This tutorial shows you how to install RedisInsight on an AWS EC2 instance and manage ElastiCache Redis instances using RedisInsight. To complete this tutorial you must have access to the AWS Console and permissions to launch EC2 instances. + +Step 1: Create a new IAM Role (optional) +-------------- + +RedisInsight needs read-only access to S3 and ElastiCache APIs. This is an optional step. + +1. Log in to the AWS Console and navigate to the IAM screen. +1. Create a new IAM Role. +1. Under *Select type of trusted entity*, choose EC2. The role is used by an EC2 instance. +1. Assign the following permissions: + * AmazonS3ReadOnlyAccess + * AmazonElastiCacheReadOnlyAccess + +Step 2: Launch EC2 Instance +-------------- + +Next, launch an EC2 instance. + +1. Navigate to EC2 under AWS Console. +1. Click Launch Instance. +1. Choose 64-bit Amazon Linux AMI. +1. Choose at least a t2.medium instance. The size of the instance depends on the memory used by your ElastiCache instance that you want to analyze. +1. Under Configure Instance: + * Choose the VPC that has your ElastiCache instances. + * Choose a subnet that has network access to your ElastiCache instances. + * Ensure that your EC2 instance has a public IP Address. + * Assign the IAM role that you created in Step 1. +1. Under the storage section, allocate at least 100 GiB storage. +1. Under security group, ensure that: + * Incoming traffic is allowed on port 5540 + * Incoming traffic is allowed on port 22 only during installation +1. Review and launch the ec2 instance. + +Step 3: Verify permissions and connectivity +---------- + +Next, verify that the EC2 instance has the required IAM permissions and can connect to ElastiCache Redis instances. + +1. SSH into the newly launched EC2 instance. +1. Open a command prompt. +1. Run the command `aws s3 ls`. This should list all S3 buckets. + 1. If the `aws` command cannot be found, make sure your EC2 instance is based of Amazon Linux. +1. Next, find the hostname of the ElastiCache instance you want to analyze and run the command `echo info | nc 6379`. +1. If you see some details about the ElastiCache Redis instance, you can proceed to the next step. +1. If you cannot connect to redis, you should review your VPC, subnet, and security group settings. + +Step 4: Install Docker on EC2 +------- + +Next, install Docker on the EC2 instance. Run the following commands: + +1. `sudo yum update -y` +1. `sudo yum install -y docker` +1. `sudo service docker start` +1. `sudo usermod -a -G docker ec2-user` +1. Log out and log back in again to pick up the new docker group permissions. +1. To verify, run `docker ps`. You should see some output without having to run `sudo`. + +Step 5: Run RedisInsight in the Docker container +------- + +Finally, install RedisInsight using one of the options described below. + +1. If you do not want to persist your RedisInsight data: + +```bash +docker run -d --name redisinsight -p 5540:5540 redis/redisinsight:latest +``` +2. If you want to persist your RedisInsight data, first attach the Docker volume to the `/data` path and then run the following command: + +```bash +docker run -d --name redisinsight -p 5540:5540 redis/redisinsight:latest -v redisinsight:/data +``` + +If the previous command returns a permission error, ensure that the user with `ID = 1000` has the necessary permission to access the volume provided (`redisinsight` in the command above). + +Find the IP Address of your EC2 instances and launch your browser at `http://:5540`. Accept the EULA and start using RedisInsight. + +RedisInsight also provides a health check endpoint at `http://:5540/api/health/` to monitor the health of the running container. + +Summary +------ + +In this guide, we installed RedisInsight on an AWS EC2 instance running Docker. As a next step, you should add an ElastiCache Redis Instance and then run the memory analysis. diff --git a/docs/install/install-redisinsight/install-on-docker.md b/docs/install/install-redisinsight/install-on-docker.md new file mode 100644 index 0000000000..c73a15ea97 --- /dev/null +++ b/docs/install/install-redisinsight/install-on-docker.md @@ -0,0 +1,34 @@ +--- +title: "Install on Docker" +linkTitle: "Install on Docker" +weight: 2 +description: > + How to install RedisInsight on Docker +--- +This tutorial shows how to install RedisInsight on [Docker](https://www.docker.com/) so you can use RedisInsight in development. +See a separate guide for installing [RedisInsight on AWS](/docs/install/install-redisinsight/install-on-aws/). + +## Install Docker + +The first step is to [install Docker for your operating system](https://docs.docker.com/install/). + +## Run RedisInsight Docker image + +You can install RedisInsight using one of the options described below. + +1. If you do not want to persist your RedisInsight data: + +```bash +docker run -d --name redisinsight -p 5540:5540 redis/redisinsight:latest +``` +2. If you want to persist your RedisInsight data, first attach the Docker volume to the `/data` path and then run the following command: + +```bash +docker run -d --name redisinsight -p 5540:5540 redis/redisinsight:latest -v redisinsight:/data +``` + +If the previous command returns a permission error, ensure that the user with `ID = 1000` has the necessary permissions to access the volume provided (`redisinsight` in the command above). + +Next, point your browser to `http://localhost:5540`. + +RedisInsight also provides a health check endpoint at `http://localhost:5540/api/health/` to monitor the health of the running container. diff --git a/docs/install/install-redisinsight/install-on-k8s.md b/docs/install/install-redisinsight/install-on-k8s.md new file mode 100644 index 0000000000..8a42865fec --- /dev/null +++ b/docs/install/install-redisinsight/install-on-k8s.md @@ -0,0 +1,267 @@ +--- +title: "Install on Kubernetes" +linkTitle: "Install on Kubernetes" +weight: 4 +description: > + How to install RedisInsight on Kubernetes +--- +This tutorial shows how to install RedisInsight on [Kubernetes](https://kubernetes.io/) (K8s). +This is an easy way to use RedisInsight with a [Redis Enterprise K8s deployment](https://redis.io/docs/about/redis-enterprise/#:~:text=and%20Multi%2Dcloud-,Redis%20Enterprise%20Software,-Redis%20Enterprise%20Software). + +## Create the RedisInsight deployment and service + +Below is an annotated YAML file that will create a RedisInsight +deployment and a service in a K8s cluster. + +1. Create a new file named `redisinsight.yaml` with the content below. + +```yaml +# RedisInsight service with name 'redisinsight-service' +apiVersion: v1 +kind: Service +metadata: + name: redisinsight-service # name should not be 'redisinsight' + # since the service creates + # environment variables that + # conflicts with redisinsight + # application's environment + # variables `RI_APP_HOST` and + # `RI_APP_PORT` +spec: + type: LoadBalancer + ports: + - port: 80 + targetPort: 5540 + selector: + app: redisinsight +--- +# RedisInsight deployment with name 'redisinsight' +apiVersion: apps/v1 +kind: Deployment +metadata: + name: redisinsight #deployment name + labels: + app: redisinsight #deployment label +spec: + replicas: 1 #a single replica pod + selector: + matchLabels: + app: redisinsight #which pods is the deployment managing, as defined by the pod template + template: #pod template + metadata: + labels: + app: redisinsight #label for pod/s + spec: + containers: + + - name: redisinsight #Container name (DNS_LABEL, unique) + image: redis/redisinsight:latest #repo/image + imagePullPolicy: IfNotPresent #Installs the latest RedisInsight version + volumeMounts: + - name: redisinsight #Pod volumes to mount into the container's filesystem. Cannot be updated. + mountPath: /data + ports: + - containerPort: 5540 #exposed container port and protocol + protocol: TCP + volumes: + - name: redisinsight + emptyDir: {} # node-ephemeral volume https://kubernetes.io/docs/concepts/storage/volumes/#emptydir +``` + +2. Create the RedisInsight deployment and service: + +```sh +kubectl apply -f redisinsight.yaml +``` + +3. Once the deployment and service are successfully applied and complete, access RedisInsight. This can be accomplished by using the `` of the service we created to reach RedisInsight. + +```sh +$ kubectl get svc redisinsight-service +NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE +redisinsight-service 80:32143/TCP 1m +``` + +4. If you are using minikube, run `minikube list` to list the service and access RedisInsight at `http://:`. +``` +$ minikube list +|-------------|----------------------|--------------|---------------------------------------------| +| NAMESPACE | NAME | TARGET PORT | URL | +|-------------|----------------------|--------------|---------------------------------------------| +| default | kubernetes | No node port | | +| default | redisinsight-service | 80 | http://: | +| kube-system | kube-dns | No node port | | +|-------------|----------------------|--------------|---------------------------------------------| +``` + +## Create the RedisInsight deployment with persistant storage + +Below is an annotated YAML file that will create a RedisInsight +deployment in a K8s cluster. It will assign a peristent volume created from a volume claim template. +Write access to the container is configured in an init container. When using deployments +with persistent writeable volumes, it's best to set the strategy to `Recreate`. Otherwise you may find yourself +with two pods trying to use the same volume. + +1. Create a new file `redisinsight.yaml` with the content below. + +```yaml +# RedisInsight service with name 'redisinsight-service' +apiVersion: v1 +kind: Service +metadata: + name: redisinsight-service # name should not be 'redisinsight' + # since the service creates + # environment variables that + # conflicts with redisinsight + # application's environment + # variables `RI_APP_HOST` and + # `RI_APP_PORT` +spec: + type: LoadBalancer + ports: + - port: 80 + targetPort: 5540 + selector: + app: redisinsight +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: redisinsight-pv-claim + labels: + app: redisinsight +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 2Gi + storageClassName: default +--- +# RedisInsight deployment with name 'redisinsight' +apiVersion: apps/v1 +kind: Deployment +metadata: + name: redisinsight #deployment name + labels: + app: redisinsight #deployment label +spec: + replicas: 1 #a single replica pod + strategy: + type: Recreate + selector: + matchLabels: + app: redisinsight #which pods is the deployment managing, as defined by the pod template + template: #pod template + metadata: + labels: + app: redisinsight #label for pod/s + spec: + volumes: + - name: redisinsight + persistentVolumeClaim: + claimName: redisinsight-pv-claim + initContainers: + - name: init + image: busybox + command: + - /bin/sh + - '-c' + - | + chown -R 1001 /data + resources: {} + volumeMounts: + - name: redisinsight + mountPath: /data + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + containers: + - name: redisinsight #Container name (DNS_LABEL, unique) + image: redis/redisinsight:latest #repo/image + imagePullPolicy: IfNotPresent #Always pull image + volumeMounts: + - name: redisinsight #Pod volumes to mount into the container's filesystem. Cannot be updated. + mountPath: /data + ports: + - containerPort: 5540 #exposed container port and protocol + protocol: TCP +``` + +2. Create the RedisInsight deployment and service. + +```sh +kubectl apply -f redisinsight.yaml +``` + +## Create the RedisInsight deployment without a service. + +Below is an annotated YAML file that will create a RedisInsight +deployment in a K8s cluster. + +1. Create a new file redisinsight.yaml with the content below + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: redisinsight #deployment name + labels: + app: redisinsight #deployment label +spec: + replicas: 1 #a single replica pod + selector: + matchLabels: + app: redisinsight #which pods is the deployment managing, as defined by the pod template + template: #pod template + metadata: + labels: + app: redisinsight #label for pod/s + spec: + containers: + - name: redisinsight #Container name (DNS_LABEL, unique) + image: redis/redisinsight:latest #repo/image + imagePullPolicy: IfNotPresent #Always pull image + env: + # If there's a service named 'redisinsight' that exposes the + # deployment, we manually set `RI_APP_HOST` and + # `RI_APP_PORT` to override the service environment + # variables. + - name: RI_APP_HOST + value: "0.0.0.0" + - name: RI_APP_PORT + value: "5540" + volumeMounts: + - name: redisinsight #Pod volumes to mount into the container's filesystem. Cannot be updated. + mountPath: /data + ports: + - containerPort: 5540 #exposed container port and protocol + protocol: TCP + livenessProbe: + httpGet: + path : /healthcheck/ # exposed RI endpoint for healthcheck + port: 5540 # exposed container port + initialDelaySeconds: 5 # number of seconds to wait after the container starts to perform liveness probe + periodSeconds: 5 # period in seconds after which liveness probe is performed + failureThreshold: 1 # number of liveness probe failures after which container restarts + volumes: + - name: redisinsight + emptyDir: {} # node-ephemeral volume https://kubernetes.io/docs/concepts/storage/volumes/#emptydir +``` + +2. Create the RedisInsight deployment + +```sh +kubectl apply -f redisinsight.yaml +``` + +{{< alert title="Note" >}} +If the deployment will be exposed by a service whose name is 'redisinsight', set `RI_APP_HOST` and `RI_APP_PORT` environment variables to override the environment variables created by the service. +{{< /alert >}} + +3. Once the deployment has been successfully applied and the deployment is complete, access RedisInsight. This can be accomplished by exposing the deployment as a K8s Service or by using port forwarding, as in the example below: + +```sh +kubectl port-forward deployment/redisinsight 5540 +``` + +Open your browser and point to diff --git a/docs/interact/_index.md b/docs/interact/_index.md new file mode 100644 index 0000000000..a69055546b --- /dev/null +++ b/docs/interact/_index.md @@ -0,0 +1,9 @@ +--- +title: "Interact with data in Redis" +linkTitle: "Interact with data" + +weight: 40 + +description: > + How to interact with data in Redis, including searching, querying, triggered functions, transactions, and pub/sub. +--- \ No newline at end of file diff --git a/docs/manual/programmability/_index.md b/docs/interact/programmability/_index.md similarity index 99% rename from docs/manual/programmability/_index.md rename to docs/interact/programmability/_index.md index 48b2ca1c91..14ac7489c3 100644 --- a/docs/manual/programmability/_index.md +++ b/docs/interact/programmability/_index.md @@ -1,11 +1,12 @@ --- title: "Redis programmability" linkTitle: "Programmability" -weight: 1 +weight: 20 description: > Extending Redis with Lua and Redis Functions aliases: - /topics/programmability + - /docs/manual/programmability/ --- Redis provides a programming interface that lets you execute custom scripts on the server itself. In Redis 7 and beyond, you can use [Redis Functions](/docs/manual/programmability/functions-intro) to manage and run your scripts. In Redis 6.2 and below, you use [Lua scripting with the EVAL command](/docs/manual/programmability/eval-intro) to program the server. diff --git a/docs/manual/programmability/eval-intro.md b/docs/interact/programmability/eval-intro.md similarity index 99% rename from docs/manual/programmability/eval-intro.md rename to docs/interact/programmability/eval-intro.md index 28678360eb..04776e3dbf 100644 --- a/docs/manual/programmability/eval-intro.md +++ b/docs/interact/programmability/eval-intro.md @@ -6,6 +6,7 @@ description: > Executing Lua in Redis aliases: - /topics/eval-intro + - /docs/manual/programmability/eval-intro/ --- Redis lets users upload and execute Lua scripts on the server. @@ -21,7 +22,7 @@ These include: * Providing locality by executing logic where data lives. Data locality reduces overall latency and saves networking resources. * Blocking semantics that ensure the script's atomic execution. -* Enabling the composition of simple capabilities that are either missing from Redis or are too niche to a part of it. +* Enabling the composition of simple capabilities that are either missing from Redis or are too niche to be a part of it. Lua lets you run part of your application logic inside Redis. Such scripts can perform conditional updates across multiple keys, possibly combining several different data types atomically. @@ -179,7 +180,7 @@ In this case, the application should first load it with `SCRIPT LOAD` and then c Most of [Redis' clients](/clients) already provide utility APIs for doing that automatically. Please consult your client's documentation regarding the specific details. -### `EVALSHA` in the context of pipelining +### `!EVALSHA` in the context of pipelining Special care should be given executing `EVALSHA` in the context of a [pipelined request](/topics/pipelining). The commands in a pipelined request run in the order they are sent, but other clients' commands may be interleaved for execution between these. @@ -206,7 +207,7 @@ However, from the point of view of the Redis client, there are only two ways to Practically speaking, it is much simpler for the client to assume that in the context of a given connection, cached scripts are guaranteed to be there unless the administrator explicitly invoked the `SCRIPT FLUSH` command. The fact that the user can count on Redis to retain cached scripts is semantically helpful in the context of pipelining. -## The `SCRIPT` command +## The `!SCRIPT` command The Redis `SCRIPT` provides several ways for controlling the scripting subsystem. These are: diff --git a/docs/manual/programmability/functions-intro.md b/docs/interact/programmability/functions-intro.md similarity index 98% rename from docs/manual/programmability/functions-intro.md rename to docs/interact/programmability/functions-intro.md index 13db8877c9..7d7d8e2543 100644 --- a/docs/manual/programmability/functions-intro.md +++ b/docs/interact/programmability/functions-intro.md @@ -6,6 +6,7 @@ description: > Scripting with Redis 7 and beyond aliases: - /topics/functions-intro + - /docs/manual/programmability/functions-intro/ --- Redis Functions is an API for managing code to be executed on the server. This feature, which became available in Redis 7, supersedes the use of [EVAL](/docs/manual/programmability/eval-intro) in prior versions of Redis. @@ -228,7 +229,7 @@ end local function my_hlastmodified(keys, args) local hash = keys[1] - return redis.call('HGET', keys[1], '_last_modified_') + return redis.call('HGET', hash, '_last_modified_') end redis.register_function('my_hset', my_hset) @@ -271,14 +272,20 @@ redis> FUNCTION LIST 2) "my_hset" 3) "description" 4) (nil) + 5) "flags" + 6) (empty array) 2) 1) "name" 2) "my_hgetall" 3) "description" 4) (nil) + 5) "flags" + 6) (empty array) 3) 1) "name" 2) "my_hlastmodified" 3) "description" 4) (nil) + 5) "flags" + 6) (empty array) ``` You can see that it is easy to update our library with new capabilities. diff --git a/docs/manual/programmability/lua-api.md b/docs/interact/programmability/lua-api.md similarity index 93% rename from docs/manual/programmability/lua-api.md rename to docs/interact/programmability/lua-api.md index da048f76a8..3fa62f10c9 100644 --- a/docs/manual/programmability/lua-api.md +++ b/docs/interact/programmability/lua-api.md @@ -6,6 +6,7 @@ description: > Executing Lua in Redis aliases: - /topics/lua-api + - /docs/manual/programmability/lua-api/ --- Redis includes an embedded [Lua 5.1](https://www.lua.org/) interpreter. @@ -36,7 +37,7 @@ my_global_variable = 'some value' And similarly for the following global function declaration: ```lua -function my_global_funcion() +function my_global_function() -- Do something amazing end ``` @@ -133,16 +134,16 @@ return redis.call('ECHO', 'Echo, echo... eco... o...') ``` If and when `redis.call()` triggers a runtime exception, the raw exception is raised back to the user as an error, automatically. -Therefore, attempting to execute the following ephemeral script will fail and generate a runtime exception because `ECHO` accepts exactly zero or one argument: +Therefore, attempting to execute the following ephemeral script will fail and generate a runtime exception because `ECHO` accepts exactly one argument: ```lua redis> EVAL "return redis.call('ECHO', 'Echo,', 'echo... ', 'eco... ', 'o...')" 0 -(error) ERR Error running script (call to b0345693f4b77517a711221050e76d24ae60b7f7): @user_script:1: @user_script: 1: Wrong number of args calling Redis command from script +(error) ERR Wrong number of args calling Redis command from script script: b0345693f4b77517a711221050e76d24ae60b7f7, on @user_script:1. ``` Note that the call can fail due to various reasons, see [Execution under low memory conditions](/topics/eval-intro#execution-under-low-memory-conditions) and [Script flags](#script_flags) -To handle Redis runtime errors use `redis.pcall() instead. +To handle Redis runtime errors use `redis.pcall()` instead. ### `redis.pcall(command [,arg...])` @@ -163,7 +164,7 @@ local reply = redis.pcall('ECHO', unpack(ARGV)) if reply['err'] ~= nil then -- Handle the error sometime, but for now just log it redis.log(redis.LOG_WARNING, reply['err']) - reply['err'] = 'Something is wrong, but no worries, everything is under control' + reply['err'] = 'ERR Something is wrong, but no worries, everything is under control' end return reply ``` @@ -172,7 +173,7 @@ Evaluating this script with more than one argument will return: ``` redis> EVAL "..." 0 hello world -(error) Something is wrong, but no worries, everything is under control +(error) ERR Something is wrong, but no worries, everything is under control ``` ### `redis.error_reply(x)` @@ -187,7 +188,7 @@ The helper accepts a single string argument and returns a Lua table with the _er The outcome of the following code is that _error1_ and _error2_ are identical for all intents and purposes: ```lua -local text = 'My very special error' +local text = 'ERR My very special error' local reply1 = { err = text } local reply2 = redis.error_reply(text) ``` @@ -195,15 +196,19 @@ local reply2 = redis.error_reply(text) Therefore, both forms are valid as means for returning an error reply from scripts: ``` -redis> EVAL "return { err = 'My very special table error' }" 0 -(error) My very special table error -redis> EVAL "return redis.error_reply('My very special reply error')" 0 -(error) My very special reply error +redis> EVAL "return { err = 'ERR My very special table error' }" 0 +(error) ERR My very special table error +redis> EVAL "return redis.error_reply('ERR My very special reply error')" 0 +(error) ERR My very special reply error ``` -For returing Redis status replies refer to [`redis.status_reply()`](#redis.status_reply). +For returning Redis status replies refer to [`redis.status_reply()`](#redis.status_reply). Refer to the [Data type conversion](#data-type-conversion) for returning other response types. +**Note:** +By convention, Redis uses the first word of an error string as a unique error code for specific errors or `ERR` for general-purpose errors. +Scripts are advised to follow this convention, as shown in the example above, but this is not mandatory. + ### `redis.status_reply(x)` * Since version: 2.6.0 @@ -231,7 +236,7 @@ redis> EVAL "return redis.status_reply('TOCK')" 0 TOCK ``` -For returing Redis error replies refer to [`redis.error_reply()`](#redis.error_reply). +For returning Redis error replies refer to [`redis.error_reply()`](#redis.error_reply). Refer to the [Data type conversion](#data-type-conversion) for returning other response types. ### `redis.sha1hex(x)` @@ -287,7 +292,7 @@ will produce a line similar to the following in your server's log: * Available in scripts: yes * Available in functions: yes -This function allows the executing script to switch between [Redis Serialization Protocol (RESP)](/topics/protocol) versions for the replies returned by [`redis.call()](#redis.call) and [`redis.pall()](#redis.pcall). +This function allows the executing script to switch between [Redis Serialization Protocol (RESP)](/topics/protocol) versions for the replies returned by [`redis.call()`](#redis.call) and [`redis.pcall()`](#redis.pcall). It expects a single numerical argument as the protocol's version. The default protocol version is _2_, but it can be switched to version _3_. @@ -327,7 +332,7 @@ It then picks five random elements (`SRANDMEMBER`) from the intersection and sto Finally, before returning, it deletes the temporary key that stores the intersection of the two source sets. In this case, only the new set with its five randomly-chosen elements needs to be replicated. -Replicating the `SUNIONSTORE` command and the `DEL'ition of the temporary key is unnecessary and wasteful. +Replicating the `SUNIONSTORE` command and the `DEL`ition of the temporary key is unnecessary and wasteful. The `redis.set_repl()` function instructs the server how to treat subsequent write commands in terms of replication. It accepts a single input argument that only be one of the following: @@ -375,7 +380,7 @@ For more information, please refer to [`Replicating commands instead of scripts` * Available in scripts: yes * Available in functions: no -This function triggers a breakpoint when using the Redis Lua debugger](/topics/ldb). +This function triggers a breakpoint when using the [Redis Lua debugger](/topics/ldb). ### `redis.debug(x)` @@ -534,7 +539,7 @@ that reply is automatically converted to Redis' protocol. Put differently; there's a one-to-one mapping between Redis' replies and Lua's data types and a one-to-one mapping between Lua's data types and the [Redis Protocol](/topics/protocol) data types. The underlying design is such that if a Redis type is converted into a Lua type and converted back into a Redis type, the result is the same as the initial value. -Type conversion from Redis protocol replies (i.e., the replies from `redis.call()` and `redis.pcall()` to Lua data types depends on the Redis Serialization Protocol version used by the script. +Type conversion from Redis protocol replies (i.e., the replies from `redis.call()` and `redis.pcall()`) to Lua data types depends on the Redis Serialization Protocol version used by the script. The default protocol version during script executions is RESP2. The script may switch the replies' protocol versions by calling the `redis.setresp()` function. @@ -624,7 +629,7 @@ Once Redis' replies are in RESP3 protocol, all of the [RESP2 to Lua conversion]( * [RESP3 null](https://github.com/redis/redis-specifications/blob/master/protocol/RESP3.md#null-reply) -> Lua `nil`. * [RESP3 true reply](https://github.com/redis/redis-specifications/blob/master/protocol/RESP3.md#boolean-reply) -> Lua true boolean value. * [RESP3 false reply](https://github.com/redis/redis-specifications/blob/master/protocol/RESP3.md#boolean-reply) -> Lua false boolean value. -* [RESP3 double reply](https://github.com/redis/redis-specifications/blob/master/protocol/RESP3.md#double-type) -> Lua table with a single _score_ field containing a Lua number representing the double value. +* [RESP3 double reply](https://github.com/redis/redis-specifications/blob/master/protocol/RESP3.md#double-type) -> Lua table with a single _double_ field containing a Lua number representing the double value. * [RESP3 big number reply](https://github.com/redis/redis-specifications/blob/master/protocol/RESP3.md#big-number-type) -> Lua table with a single _big_number_ field containing a Lua string representing the big number value. * [Redis verbatim string reply](https://github.com/redis/redis-specifications/blob/master/protocol/RESP3.md#verbatim-string-type) -> Lua table with a single _verbatim_string_ field containing a Lua table with two fields, _string_ and _format_, representing the verbatim string and its format, respectively. @@ -639,12 +644,12 @@ Although the default protocol for incoming client connections is RESP2, the scri * Lua Boolean -> [RESP3 Boolean reply](https://github.com/redis/redis-specifications/blob/master/protocol/RESP3.md#boolean-reply) (note that this is a change compared to the RESP2, in which returning a Boolean Lua `true` returned the number 1 to the Redis client, and returning a `false` used to return a `null`. * Lua table with a single _map_ field set to an associative Lua table -> [RESP3 map reply](https://github.com/redis/redis-specifications/blob/master/protocol/RESP3.md#map-type). -* Lua table with a single _set field set to an associative Lua table -> [RESP3 set reply](https://github.com/redis/redis-specifications/blob/master/protocol/RESP3.md#set-type). Values can be set to anything and are discarded anyway. +* Lua table with a single _set_ field set to an associative Lua table -> [RESP3 set reply](https://github.com/redis/redis-specifications/blob/master/protocol/RESP3.md#set-type). Values can be set to anything and are discarded anyway. * Lua table with a single _double_ field to an associative Lua table -> [RESP3 double reply](https://github.com/redis/redis-specifications/blob/master/protocol/RESP3.md#double-type). * Lua nil -> [RESP3 null](https://github.com/redis/redis-specifications/blob/master/protocol/RESP3.md#null-reply). However, if the connection is set use the RESP2 protocol, and even if the script replies with RESP3-typed responses, Redis will automatically perform a RESP3 to RESP2 conversion of the reply as is the case for regular commands. -That means, for example, that returning the RESP3 map type to a RESP2 connection will result in the repy being converted to a flat RESP2 array that consists of alternating field names and their values, rather than a RESP3 map. +That means, for example, that returning the RESP3 map type to a RESP2 connection will result in the reply being converted to a flat RESP2 array that consists of alternating field names and their values, rather than a RESP3 map. ## Additional notes about scripting @@ -665,6 +670,7 @@ The following [standard Lua libraries](https://www.lua.org/manual/5.1/manual.htm * The [_String Manipulation (string)_ library](https://www.lua.org/manual/5.1/manual.html#5.4) * The [_Table Manipulation (table)_ library](https://www.lua.org/manual/5.1/manual.html#5.5) * The [_Mathematical Functions (math)_ library](https://www.lua.org/manual/5.1/manual.html#5.6) +* The [_Operating System Facilities (os)_ library](#os-library) In addition, the following external libraries are loaded and accessible to scripts: @@ -673,6 +679,18 @@ In addition, the following external libraries are loaded and accessible to scrip * The [_cmsgpack_ library](#cmsgpack-library) * The [_bitop_ library](#bitop-library) +### _os_ library + +* Since version: 8.0.0 +* Available in scripts: yes +* Available in functions: yes + +_os_ provides a set of functions for dealing with date, time, and system commands. +More details can be found in the [Operating System Facilities](https://www.lua.org/manual/5.1/manual.html#5.8). +Note that for sandbox security, currently only the following os functions is exposed: + +* `os.clock()` + ### _struct_ library * Since version: 2.6.0 diff --git a/docs/manual/programmability/lua-debugging.md b/docs/interact/programmability/lua-debugging.md similarity index 96% rename from docs/manual/programmability/lua-debugging.md rename to docs/interact/programmability/lua-debugging.md index 3c5c9df8b7..26b4b05e1d 100644 --- a/docs/manual/programmability/lua-debugging.md +++ b/docs/interact/programmability/lua-debugging.md @@ -5,6 +5,7 @@ description: How to use the built-in Lua debugger weight: 4 aliases: - /topics/ldb + - /docs/manual/programmability/lua-debugging/ --- Starting with version 3.2 Redis includes a complete Lua debugger, that can be @@ -71,11 +72,11 @@ Redis Lua debugger help: [h]elp Show this help. [s]tep Run current line and stop again. [n]ext Alias for step. -[c]continue Run till next breakpoint. -[l]list List source code around current line. -[l]list [line] List source code around [line]. +[c]ontinue Run till next breakpoint. +[l]ist List source code around current line. +[l]ist [line] List source code around [line]. line = 0 means: current position. -[l]list [line] [ctx] In this form [ctx] specifies how many lines +[l]ist [line] [ctx] In this form [ctx] specifies how many lines to show before/after [line]. [w]hole List all source code. Alias for 'list 1 1000000'. [p]rint Show all the local variables. @@ -86,11 +87,11 @@ Redis Lua debugger help: [b]reak - Remove breakpoint from the specified line. [b]reak 0 Remove all breakpoints. [t]race Show a backtrace. -[e]eval Execute some Lua code (in a different callframe). +[e]val Execute some Lua code (in a different callframe). [r]edis Execute a Redis command. [m]axlen [len] Trim logged Redis replies and Lua var dumps to len. Specifying zero as means unlimited. -[a]abort Stop the execution of the script. In sync +[a]bort Stop the execution of the script. In sync mode dataset changes will be retained. Debugger functions you can call from Lua scripts: diff --git a/docs/interact/pubsub.md b/docs/interact/pubsub.md new file mode 100644 index 0000000000..d9ed33e98c --- /dev/null +++ b/docs/interact/pubsub.md @@ -0,0 +1,199 @@ +--- +title: Redis Pub/Sub +linkTitle: "Pub/sub" +weight: 40 +description: How to use pub/sub channels in Redis +aliases: + - /topics/pubsub + - /docs/manual/pub-sub + - /docs/manual/pubsub +--- + +`SUBSCRIBE`, `UNSUBSCRIBE` and `PUBLISH` implement the [Publish/Subscribe messaging paradigm](http://en.wikipedia.org/wiki/Publish/subscribe) where (citing Wikipedia) senders (publishers) are not programmed to send their messages to specific receivers (subscribers). +Rather, published messages are characterized into channels, without knowledge of what (if any) subscribers there may be. +Subscribers express interest in one or more channels and only receive messages that are of interest, without knowledge of what (if any) publishers there are. +This decoupling of publishers and subscribers allows for greater scalability and a more dynamic network topology. + +For instance, to subscribe to channels "channel11" and "ch:00" the client issues a `SUBSCRIBE` providing the names of the channels: + +```bash +SUBSCRIBE channel11 ch:00 +``` + +Messages sent by other clients to these channels will be pushed by Redis to all the subscribed clients. +Subscribers receive the messages in the order that the messages are published. + +A client subscribed to one or more channels shouldn't issue commands, although it can `SUBSCRIBE` and `UNSUBSCRIBE` to and from other channels. +The replies to subscription and unsubscribing operations are sent in the form of messages so that the client can just read a coherent stream of messages where the first element indicates the type of message. +The commands that are allowed in the context of a subscribed RESP2 client are: + +* `PING` +* `PSUBSCRIBE` +* `PUNSUBSCRIBE` +* `QUIT` +* `RESET` +* `SSUBSCRIBE` +* `SUBSCRIBE` +* `SUNSUBSCRIBE` +* `UNSUBSCRIBE` + +However, if RESP3 is used (see `HELLO`), a client can issue any commands while in the subscribed state. + +Please note that when using `redis-cli`, in subscribed mode commands such as `UNSUBSCRIBE` and `PUNSUBSCRIBE` cannot be used because `redis-cli` will not accept any commands and can only quit the mode with `Ctrl-C`. + +## Delivery semantics + +Redis' Pub/Sub exhibits _at-most-once_ message delivery semantics. +As the name suggests, it means that a message will be delivered once if at all. +Once the message is sent by the Redis server, there's no chance of it being sent again. +If the subscriber is unable to handle the message (for example, due to an error or a network disconnect) the message is forever lost. + +If your application requires stronger delivery guarantees, you may want to learn about [Redis Streams](/docs/data-types/streams-tutorial). +Messages in streams are persisted, and support both _at-most-once_ as well as _at-least-once_ delivery semantics. + +## Format of pushed messages + +A message is an [array-reply](/topics/protocol#array-reply) with three elements. + +The first element is the kind of message: + +* `subscribe`: means that we successfully subscribed to the channel given as the second element in the reply. + The third argument represents the number of channels we are currently subscribed to. + +* `unsubscribe`: means that we successfully unsubscribed from the channel given as second element in the reply. + The third argument represents the number of channels we are currently subscribed to. + When the last argument is zero, we are no longer subscribed to any channel, and the client can issue any kind of Redis command as we are outside the Pub/Sub state. + +* `message`: it is a message received as a result of a `PUBLISH` command issued by another client. + The second element is the name of the originating channel, and the third argument is the actual message payload. + +## Database & Scoping + +Pub/Sub has no relation to the key space. +It was made to not interfere with it on any level, including database numbers. + +Publishing on db 10, will be heard by a subscriber on db 1. + +If you need scoping of some kind, prefix the channels with the name of the environment (test, staging, production...). + +## Wire protocol example + +``` +SUBSCRIBE first second +*3 +$9 +subscribe +$5 +first +:1 +*3 +$9 +subscribe +$6 +second +:2 +``` + +At this point, from another client we issue a `PUBLISH` operation against the channel named `second`: + +``` +> PUBLISH second Hello +``` + +This is what the first client receives: + +``` +*3 +$7 +message +$6 +second +$5 +Hello +``` + +Now the client unsubscribes itself from all the channels using the `UNSUBSCRIBE` command without additional arguments: + +``` +UNSUBSCRIBE +*3 +$11 +unsubscribe +$6 +second +:1 +*3 +$11 +unsubscribe +$5 +first +:0 +``` + +## Pattern-matching subscriptions + +The Redis Pub/Sub implementation supports pattern matching. +Clients may subscribe to glob-style patterns to receive all the messages sent to channel names matching a given pattern. + +For instance: + +``` +PSUBSCRIBE news.* +``` + +Will receive all the messages sent to the channel `news.art.figurative`, `news.music.jazz`, etc. +All the glob-style patterns are valid, so multiple wildcards are supported. + +``` +PUNSUBSCRIBE news.* +``` + +Will then unsubscribe the client from that pattern. +No other subscriptions will be affected by this call. + +Messages received as a result of pattern matching are sent in a different format: + +* The type of the message is `pmessage`: it is a message received as a result from a `PUBLISH` command issued by another client, matching a pattern-matching subscription. + The second element is the original pattern matched, the third element is the name of the originating channel, and the last element is the actual message payload. + +Similarly to `SUBSCRIBE` and `UNSUBSCRIBE`, `PSUBSCRIBE` and `PUNSUBSCRIBE` commands are acknowledged by the system sending a message of type `psubscribe` and `punsubscribe` using the same format as the `subscribe` and `unsubscribe` message format. + +## Messages matching both a pattern and a channel subscription + +A client may receive a single message multiple times if it's subscribed to multiple patterns matching a published message, or if it is subscribed to both patterns and channels matching the message. +This is shown by the following example: + +``` +SUBSCRIBE foo +PSUBSCRIBE f* +``` + +In the above example, if a message is sent to channel `foo`, the client will receive two messages: one of type `message` and one of type `pmessage`. + +## The meaning of the subscription count with pattern matching + +In `subscribe`, `unsubscribe`, `psubscribe` and `punsubscribe` message types, the last argument is the count of subscriptions still active. +This number is the total number of channels and patterns the client is still subscribed to. +So the client will exit the Pub/Sub state only when this count drops to zero as a result of unsubscribing from all the channels and patterns. + +## Sharded Pub/Sub + +From Redis 7.0, sharded Pub/Sub is introduced in which shard channels are assigned to slots by the same algorithm used to assign keys to slots. +A shard message must be sent to a node that owns the slot the shard channel is hashed to. +The cluster makes sure the published shard messages are forwarded to all nodes in the shard, so clients can subscribe to a shard channel by connecting to either the master responsible for the slot, or to any of its replicas. +`SSUBSCRIBE`, `SUNSUBSCRIBE` and `SPUBLISH` are used to implement sharded Pub/Sub. + +Sharded Pub/Sub helps to scale the usage of Pub/Sub in cluster mode. +It restricts the propagation of messages to be within the shard of a cluster. +Hence, the amount of data passing through the cluster bus is limited in comparison to global Pub/Sub where each message propagates to each node in the cluster. +This allows users to horizontally scale the Pub/Sub usage by adding more shards. + +## Programming example + +Pieter Noordhuis provided a great example using EventMachine and Redis to create [a multi user high performance web chat](https://gist.github.com/pietern/348262). + +## Client library implementation hints + +Because all the messages received contain the original subscription causing the message delivery (the channel in the case of message type, and the original pattern in the case of pmessage type) client libraries may bind the original subscription to callbacks (that can be anonymous functions, blocks, function pointers), using a hash table. + +When a message is received an O(1) lookup can be done to deliver the message to the registered callback. diff --git a/docs/manual/transactions.md b/docs/interact/transactions.md similarity index 97% rename from docs/manual/transactions.md rename to docs/interact/transactions.md index ae230a944d..e9504fd47e 100644 --- a/docs/manual/transactions.md +++ b/docs/interact/transactions.md @@ -1,10 +1,11 @@ --- title: Transactions linkTitle: Transactions -weight: 1 +weight: 30 description: How transactions work in Redis aliases: - /topics/transactions + - /docs/manual/transactions/ --- Redis Transactions allow the execution of a group of commands @@ -111,11 +112,11 @@ LPOP a EXEC *2 +OK --ERR Operation against a key holding the wrong kind of value +-WRONGTYPE Operation against a key holding the wrong kind of value ``` `EXEC` returned two-element [bulk string reply](/topics/protocol#bulk-string-reply) where one is an `OK` code and -the other an `-ERR` reply. It's up to the client library to find a +the other an error reply. It's up to the client library to find a sensible way to provide the error to the user. It's important to note that @@ -221,7 +222,7 @@ instead. **NOTE** * In Redis versions before 6.0.9, an expired key would not cause a transaction to be aborted. [More on this](https://github.com/redis/redis/pull/7920) -* Commands within a transaction wont trigger the `WATCH` condition since they +* Commands within a transaction won't trigger the `WATCH` condition since they are only queued until the `EXEC` is sent. `WATCH` can be called multiple times. Simply all the `WATCH` calls will diff --git a/docs/libraries/index.md b/docs/libraries/index.md deleted file mode 100644 index 09b4a4cede..0000000000 --- a/docs/libraries/index.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: "Libraries" -linkTitle: "Libraries" -weight: 11 -description: Libraries that use Redis -layout: bazzar -bazzar: libraries ---- - diff --git a/docs/management/_index.md b/docs/management/_index.md new file mode 100644 index 0000000000..bc67c5ceae --- /dev/null +++ b/docs/management/_index.md @@ -0,0 +1,6 @@ +--- +title: "Manage Redis" +linkTitle: "Manage Redis" +description: An administrator's guide to Redis +weight: 60 +--- diff --git a/docs/manual/admin.md b/docs/management/admin.md similarity index 85% rename from docs/manual/admin.md rename to docs/management/admin.md index fdf76ec3d4..116b3aca21 100644 --- a/docs/manual/admin.md +++ b/docs/management/admin.md @@ -7,7 +7,7 @@ aliases: [ /topics/admin, /topics/admin.md, /manual/admin, - /manual/admin.md, + /manual/admin.md ] --- @@ -17,11 +17,9 @@ aliases: [ * Deploy Redis using the Linux operating system. Redis is also tested on OS X, and from time to time on FreeBSD and OpenBSD systems. However, Linux is where most of the stress testing is performed, and where most production deployments are run. -* Set the Linux kernel overcommit memory setting to 1. Add `vm.overcommit_memory = 1` to `/etc/sysctl.conf`. Then, reboot or run the command `sysctl vm.overcommit_memory=1` to activate the setting. +* Set the Linux kernel overcommit memory setting to 1. Add `vm.overcommit_memory = 1` to `/etc/sysctl.conf`. Then, reboot or run the command `sysctl vm.overcommit_memory=1` to activate the setting. See [FAQ: Background saving fails with a fork() error on Linux?](https://redis.io/docs/get-started/faq/#background-saving-fails-with-a-fork-error-on-linux) for details. -* To ensure the Linux kernel feature Transparent Huge Pages does not impact Redis memory usage and latency, use this command: - -`echo never > /sys/kernel/mm/transparent_hugepage/enabled` +* To ensure the Linux kernel feature Transparent Huge Pages does not impact Redis memory usage and latency, run the command: `echo never > /sys/kernel/mm/transparent_hugepage/enabled` to disable it. See [Latency Diagnosis - Latency induced by transparent huge pages](https://redis.io/docs/management/optimization/latency/#latency-induced-by-transparent-huge-pages) for additional context. ### Memory @@ -58,7 +56,7 @@ aliases: [ ## Upgrading or restarting a Redis instance without downtime -Redis is designed to be a long-running process in your server. You can modify many configuration options restart using the [CONFIG SET command](/commands/config-set). You can also switch from AOF to RDB snapshots persistence, or the other way around, without restarting Redis. Check the output of the `CONFIG GET *` command for more information. +Redis is designed to be a long-running process in your server. You can modify many configuration options without a restart using the `CONFIG SET` command. You can also switch from AOF to RDB snapshots persistence, or the other way around, without restarting Redis. Check the output of the `CONFIG GET *` command for more information. From time to time, a restart is required, for example, to upgrade the Redis process to a newer version, or when you need to modify a configuration parameter that is currently not supported by the `CONFIG` command. @@ -76,7 +74,7 @@ Follow these steps to avoid downtime. * Configure all your clients to use the new instance (the replica). Note that you may want to use the `CLIENT PAUSE` command to ensure that no client can write to the old master during the switch. -* Once you confirm that the master is no longer receiving any queries (you can check this using the [MONITOR command](/commands/monitor)), elect the replica to master using the `REPLICAOF NO ONE` command, and then shut down your master. +* Once you confirm that the master is no longer receiving any queries (you can check this using the `MONITOR` command), elect the replica to master using the `REPLICAOF NO ONE` command, and then shut down your master. If you are using [Redis Sentinel](/topics/sentinel) or [Redis Cluster](/topics/cluster-tutorial), the simplest way to upgrade to newer versions is to upgrade one replica after the other. Then you can perform a manual failover to promote one of the upgraded replicas to master, and finally promote the last replica. diff --git a/docs/management/config-file.md b/docs/management/config-file.md new file mode 100644 index 0000000000..05074ec832 --- /dev/null +++ b/docs/management/config-file.md @@ -0,0 +1,12 @@ +--- +title: "Redis configuration file example" +linkTitle: "Configuration example" +weight: 3 +description: > + The self-documented `redis.conf` file that's shipped with every version. +aliases: [ + /docs/manual/config-file + ] +--- + +Note: this file is generated from the unstable redis.conf during the website's build. diff --git a/docs/manual/config.md b/docs/management/config.md similarity index 83% rename from docs/manual/config.md rename to docs/management/config.md index 89b8aacd75..4a834763be 100644 --- a/docs/manual/config.md +++ b/docs/management/config.md @@ -1,11 +1,13 @@ --- title: "Redis configuration" linkTitle: "Configuration" -weight: 1 +weight: 2 description: > Overview of redis.conf, the Redis configuration file -aliases: - - /docs/manual/config +aliases: [ + /docs/manual/config + ] + --- Redis is able to start without a configuration file using a built-in default @@ -37,6 +39,7 @@ The list of configuration directives, and their meaning and intended usage is available in the self documented example redis.conf shipped into the Redis distribution. +* The self documented [redis.conf for Redis 7.2](https://raw.githubusercontent.com/redis/redis/7.2/redis.conf). * The self documented [redis.conf for Redis 7.0](https://raw.githubusercontent.com/redis/redis/7.0/redis.conf). * The self documented [redis.conf for Redis 6.2](https://raw.githubusercontent.com/redis/redis/6.2/redis.conf). * The self documented [redis.conf for Redis 6.0](https://raw.githubusercontent.com/redis/redis/6.0/redis.conf). @@ -63,7 +66,7 @@ as the one used in the redis.conf file, with the exception that the keyword is prefixed with `--`. Note that internally this generates an in-memory temporary config file -(possibly concatenating the config file passed by the user if any) where +(possibly concatenating the config file passed by the user, if any) where arguments are translated into the format of redis.conf. Changing Redis configuration while the server is running @@ -71,20 +74,21 @@ Changing Redis configuration while the server is running It is possible to reconfigure Redis on the fly without stopping and restarting the service, or querying the current configuration programmatically using the -special commands [`CONFIG SET`](/commands/config-set) and -[`CONFIG GET`](/commands/config-get) +special commands `CONFIG SET` and `CONFIG GET`. Not all of the configuration directives are supported in this way, but most -are supported as expected. Please refer to the -[`CONFIG SET`](/commands/config-set) and [`CONFIG GET`](/commands/config-get) -pages for more information. +are supported as expected. +Please refer to the `CONFIG SET` and `CONFIG GET` pages for more information. Note that modifying the configuration on the fly **has no effects on the redis.conf file** so at the next restart of Redis the old configuration will be used instead. Make sure to also modify the `redis.conf` file accordingly to the configuration -you set using [`CONFIG SET`](/commands/config-set). You can do it manually or you can use [`CONFIG REWRITE`](/commands/config-rewrite), which will automatically scan your `redis.conf` file and update the fields which don't match the current configuration value. Fields non existing but set to the default value are not added. Comments inside your configuration file are retained. +you set using `CONFIG SET`. +You can do it manually, or you can use `CONFIG REWRITE`, which will automatically scan your `redis.conf` file and update the fields which don't match the current configuration value. +Fields non existing but set to the default value are not added. +Comments inside your configuration file are retained. Configuring Redis as a cache --- @@ -101,5 +105,5 @@ time to live for keys using the `EXPIRE` command (or equivalent) since all the keys will be evicted using an approximated LRU algorithm as long as we hit the 2 megabyte memory limit. -Basically in this configuration Redis acts in a similar way to memcached. +Basically, in this configuration Redis acts in a similar way to memcached. We have more extensive documentation about using Redis as an LRU cache [here](/topics/lru-cache). diff --git a/docs/reference/debugging.md b/docs/management/debugging.md similarity index 98% rename from docs/reference/debugging.md rename to docs/management/debugging.md index 95b0c719d0..60dbc70898 100644 --- a/docs/reference/debugging.md +++ b/docs/management/debugging.md @@ -1,11 +1,14 @@ --- title: "Debugging" linkTitle: "Debugging" -weight: 1 +weight: 10 description: > A guide to debugging Redis server processes -aliases: - - /topics/debugging +aliases: [ + /topics/debugging, + /docs/reference/debugging, + /docs/reference/debugging.md +] --- Redis is developed with an emphasis on stability. We do our best with diff --git a/docs/reference/optimization/_index.md b/docs/management/optimization/_index.md similarity index 71% rename from docs/reference/optimization/_index.md rename to docs/management/optimization/_index.md index 393ac7f37b..d444e489ad 100644 --- a/docs/reference/optimization/_index.md +++ b/docs/management/optimization/_index.md @@ -1,6 +1,9 @@ --- title: "Optimizing Redis" linkTitle: "Optimization" -weight: 1 +weight: 8 description: Benchmarking, profiling, and optimizations for memory and latency +aliases: [ + /docs/reference/optimization +] --- diff --git a/docs/images/Connections_chart.png b/docs/management/optimization/benchmarks/Connections_chart.png similarity index 100% rename from docs/images/Connections_chart.png rename to docs/management/optimization/benchmarks/Connections_chart.png diff --git a/docs/images/Data_size.png b/docs/management/optimization/benchmarks/Data_size.png similarity index 100% rename from docs/images/Data_size.png rename to docs/management/optimization/benchmarks/Data_size.png diff --git a/docs/images/NUMA_chart.gif b/docs/management/optimization/benchmarks/NUMA_chart.gif similarity index 100% rename from docs/images/NUMA_chart.gif rename to docs/management/optimization/benchmarks/NUMA_chart.gif diff --git a/docs/reference/optimization/benchmarks.md b/docs/management/optimization/benchmarks/index.md similarity index 94% rename from docs/reference/optimization/benchmarks.md rename to docs/management/optimization/benchmarks/index.md index 93663f249e..cca6d8ac52 100644 --- a/docs/reference/optimization/benchmarks.md +++ b/docs/management/optimization/benchmarks/index.md @@ -3,14 +3,17 @@ title: "Redis benchmark" linkTitle: "Benchmarking" weight: 1 description: > - Using the redis-benchmark utility to benchmark a Redis server -aliases: - - /topics/benchmarks + Using the redis-benchmark utility on a Redis server +aliases: [ + /topics/benchmarks, + /docs/reference/optimization/benchmarks, + /docs/reference/optimization/benchmarks.md +] --- Redis includes the `redis-benchmark` utility that simulates running commands done -by N clients at the same time sending M total queries. The utility provides -a default set of tests, or a custom set of tests can be supplied. +by N clients while at the same time sending M total queries. The utility provides +a default set of tests, or you can supply a custom set of tests. The following options are supported: @@ -22,7 +25,7 @@ The following options are supported: -a Password for Redis Auth -c Number of parallel connections (default 50) -n Total number of requests (default 100000) - -d Data size of SET/GET value in bytes (default 2) + -d Data size of SET/GET value in bytes (default 3) --dbnum SELECT the specified db number (default 0) -k 1=keep alive 0=reconnect (default 1) -r Use random keys for SET/GET/INCR, random values for SADD @@ -56,7 +59,7 @@ as in the following example: This example runs the tests for the `SET` and `LPUSH` commands and uses quiet mode (see the `-q` switch). -You can even benchmark a specfic command: +You can even benchmark a specific command: $ redis-benchmark -n 100000 -q script load "redis.call('set','foo','bar')" script load redis.call('set','foo','bar'): 69881.20 requests per second @@ -103,7 +106,7 @@ multiple commands at once, a feature often exploited by real world applications. Redis pipelining is able to dramatically improve the number of operations per second a server is able do deliver. -This is an example of running the benchmark in a MacBook Air 11" using a +Consider this example of running the benchmark using a pipelining of 16 commands: $ redis-benchmark -n 1000000 -t set,get -P 16 -q @@ -115,8 +118,7 @@ Using pipelining results in a significant increase in performance. ### Pitfalls and misconceptions The first point is obvious: the golden rule of a useful benchmark is to -only compare apples and apples. Different versions of Redis can be compared -on the same workload for instance. Or the same version of Redis, but with +only compare apples and apples. You can compare different versions of Redis on the same workload or the same version of Redis, but with different options. If you plan to compare Redis to something else, then it is important to evaluate the functional and technical differences, and take them in account. @@ -213,7 +215,7 @@ the ethernet packet size (about 1500 bytes). Actually, processing 10 bytes, 100 bytes, or 1000 bytes queries almost result in the same throughput. See the graph below. -![Data size impact](https://github.com/dspezia/redis-doc/raw/client_command/topics/Data_size.png) + ![Data size impact](Data_size.png) + On multi CPU sockets servers, Redis performance becomes dependent on the NUMA configuration and process location. The most visible effect is that @@ -227,7 +229,7 @@ Intel Nehalem EX, and Intel Westmere) with different relative placements. Please note this benchmark is not meant to compare CPU models between themselves (CPUs exact model and frequency are therefore not disclosed). -![NUMA chart](https://github.com/dspezia/redis-doc/raw/6374a07f93e867353e5e946c1e39a573dfc83f6c/topics/NUMA_chart.gif) + ![NUMA chart](NUMA_chart.gif) + With high-end configurations, the number of client connections is also an important factor. Being based on epoll/kqueue, the Redis event loop is quite @@ -237,7 +239,7 @@ an instance with 30000 connections can only process half the throughput achievable with 100 connections. Here is an example showing the throughput of a Redis instance per number of connections: -![connections chart](https://github.com/dspezia/redis-doc/raw/system_info/topics/Connections_chart.png) + ![connections chart](Connections_chart.png) + With high-end configurations, it is possible to achieve higher throughput by tuning the NIC(s) configuration and associated interruptions. Best throughput @@ -288,4 +290,4 @@ documentation for more information about its goals and capabilities. * [memtier_benchmark](https://github.com/redislabs/memtier_benchmark) from [Redis Ltd.](https://twitter.com/RedisInc) is a NoSQL Redis and Memcache traffic generation and benchmarking tool. * [rpc-perf](https://github.com/twitter/rpc-perf) from [Twitter](https://twitter.com/twitter) is a tool for benchmarking RPC services that supports Redis and Memcache. -* [YCSB](https://github.com/brianfrankcooper/YCSB) from [Yahoo @Yahoo](https://twitter.com/Yahoo) is a benchmarking framework with clients to many databases, including Redis. +* [YCSB](https://github.com/brianfrankcooper/YCSB) from [Yahoo @Yahoo](https://twitter.com/Yahoo) is a benchmarking framework with clients to many databases, including Redis. diff --git a/docs/reference/optimization/cpu-profiling.md b/docs/management/optimization/cpu-profiling.md similarity index 97% rename from docs/reference/optimization/cpu-profiling.md rename to docs/management/optimization/cpu-profiling.md index 53371e43e1..9f1383c95e 100644 --- a/docs/reference/optimization/cpu-profiling.md +++ b/docs/management/optimization/cpu-profiling.md @@ -4,8 +4,10 @@ linkTitle: "CPU profiling" weight: 1 description: > Performance engineering guide for on-CPU profiling and tracing -aliases: - - /topics/performance-on-cpu +aliases: [ + /topics/performance-on-cpu, + /docs/reference/optimization/cpu-profiling +] --- ## Filling the performance checklist @@ -18,13 +20,13 @@ are pursuing a performance regression investigation you will need a concise methodical way of monitoring and analyzing Redis performance. To do so you can rely on different methodologies (some more suited than other -depending on the class of issues/analysis we intent to make). A curated list +depending on the class of issues/analysis we intend to make). A curated list of methodologies and their steps are enumerated by Brendan Greg at the [following link](http://www.brendangregg.com/methodology.html). We recommend the Utilization Saturation and Errors (USE) Method for answering the question of what is your bottleneck. Check the following mapping between -system resource, metric, and tools for a pratical deep dive: +system resource, metric, and tools for a practical deep dive: [USE method](http://www.brendangregg.com/USEmethod/use-rosetta.html). ### Ensuring the CPU is your bottleneck @@ -69,13 +71,13 @@ and cache misses, etc. For that we will rely on toolkits (perf, bcc tools), and hardware specific PMCs (Performance Monitoring Counters), to proceed with: -- Hotspot analysis (pref or bcc tools): to profile code execution and determine which functions are consuming the most time and thus are targets for optimization. We'll present two options to collect, report, and visualize hotspots either with perf or bcc/BPF tracing tools. +- Hotspot analysis (perf or bcc tools): to profile code execution and determine which functions are consuming the most time and thus are targets for optimization. We'll present two options to collect, report, and visualize hotspots either with perf or bcc/BPF tracing tools. - Call counts analysis: to count events including function calls, enabling us to correlate several calls/components at once, relying on bcc/BPF tracing tools. - Hardware event sampling: crucial for understanding CPU behavior, including memory I/O, stall cycles, and cache misses. -### Tool prerequesits +### Tool prerequisites The following steps rely on Linux perf_events (aka ["perf"](https://man7.org/linux/man-pages/man1/perf.1.html)), [bcc/BPF tracing tools](https://github.com/iovisor/bcc), and Brendan Greg’s [FlameGraph repo](https://github.com/brendangregg/FlameGraph). diff --git a/docs/reference/optimization/latency-monitor.md b/docs/management/optimization/latency-monitor.md similarity index 92% rename from docs/reference/optimization/latency-monitor.md rename to docs/management/optimization/latency-monitor.md index 204b695e11..07809edb28 100644 --- a/docs/reference/optimization/latency-monitor.md +++ b/docs/management/optimization/latency-monitor.md @@ -3,8 +3,10 @@ title: "Redis latency monitoring" linkTitle: "Latency monitoring" weight: 1 description: Discovering slow server events in Redis -aliases: - - /topics/latency-monitor +aliases: [ + /topics/latency-monitor, + /docs/reference/optimization/latency-monitor +] --- Redis is often used for demanding use cases, where it @@ -55,7 +57,7 @@ event. This is how the time series work: * Every time a latency spike happens, it is logged in the appropriate time series. * Every time series is composed of 160 elements. * Each element is a pair made of a Unix timestamp of the time the latency spike was measured and the number of milliseconds the event took to execute. -* Latency spikes for the same event that occur in the same second are merged by taking the maximum latency. Even if continuous latency spikes are measured for a given event, which could happen with a low threshold, at least 180 seconds of history are available. +* Latency spikes for the same event that occur in the same second are merged by taking the maximum latency. Even if continuous latency spikes are measured for a given event, which could happen with a low threshold, at least 160 seconds of history are available. * Records the all-time maximum latency for every element. The framework monitors and logs latency spikes in the execution time of these events: @@ -64,11 +66,11 @@ The framework monitors and logs latency spikes in the execution time of these ev * `fast-command`: O(1) and O(log N) commands. * `fork`: the `fork(2)` system call. * `rdb-unlink-temp-file`: the `unlink(2)` system call. -* `aof-write`: writing to the AOF - a catchall event for `fsync(2)` system calls. * `aof-fsync-always`: the `fsync(2)` system call when invoked by the `appendfsync allways` policy. -* `aof-write-pending-fsync`: the `fsync(2)` system call when there are pending writes. -* `aof-write-active-child`: the `fsync(2)` system call when performed by a child process. -* `aof-write-alone`: the `fsync(2)` system call when performed by the main process. +* `aof-write`: writing to the AOF - a catchall event for `write(2)` system calls. +* `aof-write-pending-fsync`: the `write(2)` system call when there is a pending fsync. +* `aof-write-active-child`: the `write(2)` system call when there are active child processes. +* `aof-write-alone`: the `write(2)` system call when no pending fsync and no active child process. * `aof-fstat`: the `fstat(2)` system call. * `aof-rename`: the `rename(2)` system call for renaming the temporary file after completing `BGREWRITEAOF`. * `aof-rewrite-diff-write`: writing the differences accumulated while performing `BGREWRITEAOF`. diff --git a/docs/reference/optimization/latency.md b/docs/management/optimization/latency.md similarity index 99% rename from docs/reference/optimization/latency.md rename to docs/management/optimization/latency.md index ce214915a0..1419fa8fc6 100644 --- a/docs/reference/optimization/latency.md +++ b/docs/management/optimization/latency.md @@ -3,8 +3,10 @@ title: "Diagnosing latency issues" linkTitle: "Latency diagnosis" weight: 1 description: Finding the causes of slow responses -aliases: - - /topics/latency +aliases: [ + /topics/latency, + /docs/reference/optimization/latency +] --- This document will help you understand what the problem could be if you diff --git a/docs/reference/optimization/memory-optimization.md b/docs/management/optimization/memory-optimization.md similarity index 69% rename from docs/reference/optimization/memory-optimization.md rename to docs/management/optimization/memory-optimization.md index 0b3b822f38..09cc105a06 100644 --- a/docs/reference/optimization/memory-optimization.md +++ b/docs/management/optimization/memory-optimization.md @@ -3,28 +3,51 @@ title: Memory optimization linkTitle: Memory optimization description: Strategies for optimizing memory usage in Redis weight: 1 -aliases: - - /topics/memory-optimization +aliases: [ + /topics/memory-optimization, + /docs/reference/optimization/memory-optimization +] --- ## Special encoding of small aggregate data types Since Redis 2.2 many data types are optimized to use less space up to a certain size. -Hashes, Lists, Sets composed of just integers, and Sorted Sets, when smaller than a given number of elements, and up to a maximum element size, are encoded in a very memory efficient way that uses *up to 10 times less memory* (with 5 time less memory used being the average saving). +Hashes, Lists, Sets composed of just integers, and Sorted Sets, when smaller than a given number of elements, and up to a maximum element size, are encoded in a very memory-efficient way that uses *up to 10 times less memory* (with 5 times less memory used being the average saving). This is completely transparent from the point of view of the user and API. -Since this is a CPU / memory trade off it is possible to tune the maximum +Since this is a CPU / memory tradeoff it is possible to tune the maximum number of elements and maximum element size for special encoded types -using the following redis.conf directives. +using the following redis.conf directives (defaults are shown): + +### Redis <= 6.2 ``` hash-max-ziplist-entries 512 hash-max-ziplist-value 64 -zset-max-ziplist-entries 128 +zset-max-ziplist-entries 128 zset-max-ziplist-value 64 set-max-intset-entries 512 ``` +### Redis >= 7.0 + +``` +hash-max-listpack-entries 512 +hash-max-listpack-value 64 +zset-max-listpack-entries 128 +zset-max-listpack-value 64 +set-max-intset-entries 512 +``` + +### Redis >= 7.2 + +The following directives are also available: + +``` +set-max-listpack-entries 128 +set-max-listpack-value 64 +``` + If a specially encoded value overflows the configured max size, Redis will automatically convert it into normal encoding. This operation is very fast for small values, @@ -32,63 +55,61 @@ but if you change the setting in order to use specially encoded values for much larger aggregate types the suggestion is to run some benchmarks and tests to check the conversion time. -## Using 32 bit instances +## Using 32-bit instances -Redis compiled with 32 bit target uses a lot less memory per key, since pointers are small, +When Redis is compiled as a 32-bit target, it uses a lot less memory per key, since pointers are small, but such an instance will be limited to 4 GB of maximum memory usage. -To compile Redis as 32 bit binary use *make 32bit*. -RDB and AOF files are compatible between 32 bit and 64 bit instances -(and between little and big endian of course) so you can switch from 32 to 64 bit, or the contrary, without problems. +To compile Redis as 32-bit binary use *make 32bit*. +RDB and AOF files are compatible between 32-bit and 64-bit instances +(and between little and big endian of course) so you can switch from 32 to 64-bit, or the contrary, without problems. ## Bit and byte level operations Redis 2.2 introduced new bit and byte level operations: `GETRANGE`, `SETRANGE`, `GETBIT` and `SETBIT`. Using these commands you can treat the Redis string type as a random access array. -For instance if you have an application where users are identified by a unique progressive integer number, -you can use a bitmap in order to save information about the subscription of users in a mailing list, +For instance, if you have an application where users are identified by a unique progressive integer number, +you can use a bitmap to save information about the subscription of users in a mailing list, setting the bit for subscribed and clearing it for unsubscribed, or the other way around. With 100 million users this data will take just 12 megabytes of RAM in a Redis instance. -You can do the same using `GETRANGE` and `SETRANGE` in order to store one byte of information for each user. -This is just an example but it is actually possible to model a number of problems in very little space with these new primitives. +You can do the same using `GETRANGE` and `SETRANGE` to store one byte of information for each user. +This is just an example but it is possible to model several problems in very little space with these new primitives. ## Use hashes when possible Small hashes are encoded in a very small space, so you should try representing your data using hashes whenever possible. -For instance if you have objects representing users in a web application, +For instance, if you have objects representing users in a web application, instead of using different keys for name, surname, email, password, use a single hash with all the required fields. If you want to know more about this, read the next section. -## Using hashes to abstract a very memory efficient plain key-value store on top of Redis +## Using hashes to abstract a very memory-efficient plain key-value store on top of Redis -I understand the title of this section is a bit scary, but I'm going to explain in details what this is about. +I understand the title of this section is a bit scary, but I'm going to explain in detail what this is about. Basically it is possible to model a plain key-value store using Redis -where values can just be just strings, that is not just more memory efficient +where values can just be just strings, which is not just more memory efficient than Redis plain keys but also much more memory efficient than memcached. Let's start with some facts: a few keys use a lot more memory than a single key containing a hash with a few fields. How is this possible? We use a trick. -In theory in order to guarantee that we perform lookups in constant time +In theory to guarantee that we perform lookups in constant time (also known as O(1) in big O notation) there is the need to use a data structure with a constant time complexity in the average case, like a hash table. But many times hashes contain just a few fields. When hashes are small we can instead just encode them in an O(N) data structure, like a linear -array with length-prefixed key value pairs. Since we do this only when N -is small, the amortized time for HGET and HSET commands is still O(1): the +array with length-prefixed key-value pairs. Since we do this only when N +is small, the amortized time for `HGET` and `HSET` commands is still O(1): the hash will be converted into a real hash table as soon as the number of elements it contains grows too large (you can configure the limit in redis.conf). This does not only work well from the point of view of time complexity, but -also from the point of view of constant times, since a linear array of key -value pairs happens to play very well with the CPU cache (it has a better +also from the point of view of constant times since a linear array of key-value pairs happens to play very well with the CPU cache (it has a better cache locality than a hash table). -However since hash fields and values are not (always) represented as full -featured Redis objects, hash fields can't have an associated time to live +However since hash fields and values are not (always) represented as full-featured Redis objects, hash fields can't have an associated time to live (expire) like a real key, and can only contain a string. But we are okay with this, this was the intention anyway when the hash data type API was designed (we trust simplicity more than features, so nested data structures @@ -98,9 +119,8 @@ So hashes are memory efficient. This is useful when using hashes to represent objects or to model other problems when there are group of related fields. But what about if we have a plain key value business? -Imagine we want to use Redis as a cache for many small objects, that can be -JSON encoded objects, small HTML fragments, simple key -> boolean values -and so forth. Basically anything is a string -> string map with small keys +Imagine we want to use Redis as a cache for many small objects, which can be JSON encoded objects, small HTML fragments, simple key -> boolean values +and so forth. Basically, anything is a string -> string map with small keys and values. Now let's assume the objects we want to cache are numbered, like: @@ -111,7 +131,7 @@ Now let's assume the objects we want to cache are numbered, like: This is what we can do. Every time we perform a SET operation to set a new value, we actually split the key into two parts, -one part used as a key, and the other part used as the field name for the hash. For instance the +one part used as a key, and the other part used as the field name for the hash. For instance, the object named "object:1234" is actually split into: * a Key named object:12 @@ -125,14 +145,11 @@ command: HSET object:12 34 somevalue ``` -As you can see every hash will end containing 100 fields, that -is an optimal compromise between CPU and memory saved. +As you can see every hash will end up containing 100 fields, which is an optimal compromise between CPU and memory saved. There is another important thing to note, with this schema every hash will have more or -less 100 fields regardless of the number of objects we cached. This is since -our objects will always end with a number, and not a random string. In some -way the final number can be considered as a form of implicit pre-sharding. +less 100 fields regardless of the number of objects we cached. This is because our objects will always end with a number and not a random string. In some way, the final number can be considered as a form of implicit pre-sharding. What about small numbers? Like object:2? We handle this case using just "object:" as a key name, and the whole number as the hash field name. @@ -207,12 +224,11 @@ it will be converted into a real hash table, and the memory saving will be lost. You may ask, why don't you do this implicitly in the normal key space so that I don't have to care? There are two reasons: one is that we tend to make tradeoffs explicit, and this is a clear tradeoff between many things: CPU, -memory, max element size. The second is that the top level key space must +memory, and max element size. The second is that the top-level key space must support a lot of interesting things like expires, LRU data, and so forth so it is not practical to do this in a general way. -But the Redis Way is that the user must understand how things work so that -he is able to pick the best compromise, and to understand how the system will +But the Redis Way is that the user must understand how things work so that he can pick the best compromise and to understand how the system will behave exactly. ## Memory allocation @@ -221,39 +237,38 @@ To store user keys, Redis allocates at most as much memory as the `maxmemory` setting enables (however there are small extra allocations possible). The exact value can be set in the configuration file or set later via -`CONFIG SET` (see [Using memory as an LRU cache for more info](https://redis.io/topics/lru-cache)). +`CONFIG SET` (for more info, see [Using memory as an LRU cache](/docs/reference/eviction)). There are a few things that should be noted about how Redis manages memory: * Redis will not always free up (return) memory to the OS when keys are removed. This is not something special about Redis, but it is how most malloc() implementations work. -For example if you fill an instance with 5GB worth of data, and then +For example, if you fill an instance with 5GB worth of data, and then remove the equivalent of 2GB of data, the Resident Set Size (also known as the RSS, which is the number of memory pages consumed by the process) will probably still be around 5GB, even if Redis will claim that the user memory is around 3GB. This happens because the underlying allocator can't easily release the memory. -For example often most of the removed keys were allocated in the same pages as the other keys that still exist. +For example, often most of the removed keys were allocated on the same pages as the other keys that still exist. * The previous point means that you need to provision memory based on your **peak memory usage**. If your workload from time to time requires 10GB, even if -most of the times 5GB could do, you need to provision for 10GB. +most of the time 5GB could do, you need to provision for 10GB. * However allocators are smart and are able to reuse free chunks of memory, -so after you freed 2GB of your 5GB data set, when you start adding more keys +so after you free 2GB of your 5GB data set, when you start adding more keys again, you'll see the RSS (Resident Set Size) stay steady and not grow more, as you add up to 2GB of additional keys. The allocator is basically trying to reuse the 2GB of memory previously (logically) freed. * Because of all this, the fragmentation ratio is not reliable when you -had a memory usage that at peak is much larger than the currently used memory. +had a memory usage that at the peak is much larger than the currently used memory. The fragmentation is calculated as the physical memory actually used (the RSS value) divided by the amount of memory currently in use (as the sum of all the allocations performed by Redis). Because the RSS reflects the peak memory, -when the (virtually) used memory is low since a lot of keys / values were -freed, but the RSS is high, the ratio `RSS / mem_used` will be very high. +when the (virtually) used memory is low since a lot of keys/values were freed, but the RSS is high, the ratio `RSS / mem_used` will be very high. If `maxmemory` is not set Redis will keep allocating memory as it sees fit and thus it can (gradually) eat up all your free memory. -Therefore it is generally advisable to configure some limit. You may also +Therefore it is generally advisable to configure some limits. You may also want to set `maxmemory-policy` to `noeviction` (which is *not* the default value in some older versions of Redis). -It makes Redis return an out of memory error for write commands if and when it reaches the +It makes Redis return an out-of-memory error for write commands if and when it reaches the limit - which in turn may result in errors in the application but will not render the whole machine dead because of memory starvation. diff --git a/docs/manual/persistence.md b/docs/management/persistence.md similarity index 85% rename from docs/manual/persistence.md rename to docs/management/persistence.md index 54c3f29490..4328c1b849 100644 --- a/docs/manual/persistence.md +++ b/docs/management/persistence.md @@ -1,25 +1,26 @@ --- title: Redis persistence linkTitle: Persistence -weight: 1 -description: How Redis writes data to disk (append-only files, snapshots, etc.) +weight: 7 +description: How Redis writes data to disk aliases: [ /topics/persistence, /topics/persistence.md, - /manual/persistence, - /manual/persistence.md, + /docs/manual/persistence, + /docs/manual/persistence.md ] --- -Persistence refers to the writing of data to durable storage, such as a solid-state disk (SSD). Redis itself provides a range of persistence options: +Persistence refers to the writing of data to durable storage, such as a solid-state disk (SSD). Redis provides a range of persistence options. These include: -* **RDB** (Redis Database): The RDB persistence performs point-in-time snapshots of your dataset at specified intervals. -* **AOF** (Append Only File): The AOF persistence logs every write operation received by the server, that will be played again at server startup, reconstructing the original dataset. Commands are logged using the same format as the Redis protocol itself, in an append-only fashion. Redis is able to [rewrite](#log-rewriting) the log in the background when it gets too big. -* **No persistence**: If you wish, you can disable persistence completely, if you want your data to just exist as long as the server is running. -* **RDB + AOF**: It is possible to combine both AOF and RDB in the same instance. Notice that, in this case, when Redis restarts the AOF file will be used to reconstruct the original dataset since it is guaranteed to be the most complete. +* **RDB** (Redis Database): RDB persistence performs point-in-time snapshots of your dataset at specified intervals. +* **AOF** (Append Only File): AOF persistence logs every write operation received by the server. These operations can then be replayed again at server startup, reconstructing the original dataset. Commands are logged using the same format as the Redis protocol itself. +* **No persistence**: You can disable persistence completely. This is sometimes used when caching. +* **RDB + AOF**: You can also combine both AOF and RDB in the same instance. -The most important thing to understand is the different trade-offs between the -RDB and AOF persistence. +If you'd rather not think about the tradeoffs between these different persistence strategies, you may want to consider [Redis Enterprise's persistence options](https://docs.redis.com/latest/rs/databases/configure/database-persistence/), which can be pre-configured using a UI. + +To learn more about how to evaluate your Redis persistence strategy, read on. ## RDB advantages @@ -258,26 +259,30 @@ and starts appending new data into the new file. ### How I can switch to AOF, if I'm currently using dump.rdb snapshots? -There is a different procedure to do this in version 2.0 and later versions, as you -can guess it's simpler since Redis 2.2 and does not require a restart at all. +If you want to enable AOF in a server that is currently using RDB snapshots, you need to convert the data by enabling AOF via CONFIG command on the live server first. + +**IMPORTANT:** not following this procedure (e.g. just changing the config and restarting the server) can result in data loss! **Redis >= 2.2** +Preparations: + * Make a backup of your latest dump.rdb file. * Transfer this backup to a safe place. -* Issue the following two commands: -* `redis-cli config set appendonly yes` -* `redis-cli config set save ""` -* Make sure your database contains the same number of keys it contained. -* Make sure writes are appended to the append only file correctly. -The first CONFIG command enables the Append Only File persistence. +Switch to AOF on live database: -The second CONFIG command is used to turn off snapshotting persistence. This is optional, if you wish you can take both the persistence methods enabled. +* Enable AOF: `redis-cli config set appendonly yes` +* Optionally disable RDB: `redis-cli config set save ""` +* Make sure writes are appended to the append only file correctly. +* **IMPORTANT:** Update your `redis.conf` (potentially through `CONFIG REWRITE`) and ensure that it matches the configuration above. + If you forget this step, when you restart the server, the configuration changes will be lost and the server will start again with the old configuration, resulting in a loss of your data. + +Next time you restart the server: -**IMPORTANT:** remember to edit your redis.conf to turn on the AOF, otherwise -when you restart the server the configuration changes will be lost and the -server will start again with the old configuration. +* Before restarting the server, wait for AOF rewrite to finish persisting the data. + You can do that by watching `INFO persistence`, waiting for `aof_rewrite_in_progress` and `aof_rewrite_scheduled` to be `0`, and validating that `aof_last_bgrewrite_status` is `ok`. +* After restarting the server, check that your database contains the same number of keys it contained previously. **Redis 2.0** @@ -293,7 +298,6 @@ server will start again with the old configuration. ## Interactions between AOF and RDB persistence - Redis >= 2.4 makes sure to avoid triggering an AOF rewrite when an RDB snapshotting operation is already in progress, or allowing a `BGSAVE` while the AOF rewrite is in progress. This prevents two Redis background processes @@ -354,29 +358,6 @@ Prior to version 7.0.0 backing up the AOF file can be done simply by copying the but Redis will still be able to load it (see the previous sections about [truncated AOF files](#what-should-i-do-if-my-aof-gets-truncated)). -1. Turn off automatic rewrites with
- `CONFIG SET` `auto-aof-rewrite-percentage 0`
- Make sure you don't manually start a rewrite (using `BGREWRITEAOF`) during this time. -2. Check there's no current rewrite in progress using
- `INFO` `persistence`
- and verifying `aof_rewrite_in_progress` is 0. If it's 1, then you'll need to wait for the rewrite to complete. -3. Now you can safely copy the files in the `appenddirname` directory. -4. Re-enable rewrites when done:
- `CONFIG SET` `auto-aof-rewrite-percentage ` - -**Note:** If you want to minimize the time AOF rewrites are disabled you may create hard links to the files in `appenddirname` (in step 3 above) and then re-enable rewrites (step 4) after the hard links are created. -Now you can copy/tar the hardlinks and delete them when done. This works because Redis guarantees that it -only appends to files in this directory, or completely replaces them if necessary, so the content should be -consistent at any given point in time. - - -**Note:** If you want to handle the case of the server being restarted during the backup and make sure no rewrite will automatically start after the restart you can change step 1 above to also persist the updated configuration via `CONFIG REWRITE`. -Just make sure to re-enable automatic rewrites when done (step 4) and persist it with another `CONFIG REWRITE`. - -Prior to version 7.0.0 backing up the AOF file can be done simply by copying the aof file (like backing up the RDB snapshot). The file may lack the final part -but Redis will still be able to load it (see the previous sections about [truncated AOF files](#what-should-i-do-if-my-aof-gets-truncated)). - - ## Disaster recovery Disaster recovery in the context of Redis is basically the same story as diff --git a/docs/manual/replication.md b/docs/management/replication.md similarity index 96% rename from docs/manual/replication.md rename to docs/management/replication.md index bac6b83a4c..79e7e341fc 100644 --- a/docs/manual/replication.md +++ b/docs/management/replication.md @@ -1,13 +1,13 @@ --- title: Redis replication linkTitle: Replication -weight: 1 +weight: 5 description: How Redis supports high availability and failover with replication aliases: [ /topics/replication, /topics/replication.md, - /manual/replication, - /manual/replication.md, + /docs/manual/replication, + /docs/manual/replication.md ] --- @@ -200,14 +200,14 @@ Historically, there were some use cases that were considered legitimate for writ As of version 7.0, these use cases are now all obsolete and the same can be achieved by other means. For example: -* Computing slow Set or Sorted set operations and storing the result in temporary local keys using commands like [SUNIONSTORE](/commands/sunionstore) and [ZINTERSTORE](/commands/zinterstore). - Instead, use commands that return the result without storing it, such as [SUNION](/commands/sunion) and [ZINTER](/commands/zinter). +* Computing slow Set or Sorted set operations and storing the result in temporary local keys using commands like `SUNIONSTORE` and `ZINTERSTORE`. + Instead, use commands that return the result without storing it, such as `SUNION` and `ZINTER`. -* Using the [SORT](/commands/sort) command (which is not considered a read-only command because of the optional STORE option and therefore cannot be used on a read-only replica). - Instead, use [SORT_RO](/commands/sort_ro), which is a read-only command. +* Using the `SORT` command (which is not considered a read-only command because of the optional STORE option and therefore cannot be used on a read-only replica). + Instead, use `SORT_RO`, which is a read-only command. -* Using [EVAL](/commands/eval) and [EVALSHA](/commands/evalsha) are also not considered read-only commands, because the Lua script may call write commands. - Instead, use [EVAL_RO](/commands/eval_ro) and [EVALSHA_RO](/commands/evalsha_ro) where the Lua script can only call read-only commands. +* Using `EVAL` and `EVALSHA` are also not considered read-only commands, because the Lua script may call write commands. + Instead, use `EVAL_RO` and `EVALSHA_RO` where the Lua script can only call read-only commands. While writes to a replica will be discarded if the replica and the master resync or if the replica is restarted, there is no guarantee that they will sync automatically. @@ -273,7 +273,7 @@ replicate keys with expires, even when such keys are altered using Lua scripts. To implement such a feature Redis cannot rely on the ability of the master and -replica to have syncd clocks, since this is a problem that cannot be solved +replica to have synced clocks, since this is a problem that cannot be solved and would result in race conditions and diverging data sets, so Redis uses three main techniques to make the replication of expired keys able to work: diff --git a/docs/manual/scaling.md b/docs/management/scaling.md similarity index 83% rename from docs/manual/scaling.md rename to docs/management/scaling.md index c5c444477e..11de8eaafe 100644 --- a/docs/manual/scaling.md +++ b/docs/management/scaling.md @@ -1,80 +1,63 @@ --- -title: Scaling with Redis Cluster -linkTitle: Scaling -weight: 1 +title: Scale with Redis Cluster +linkTitle: Scale with Redis Cluster +weight: 6 description: Horizontal scaling with Redis Cluster -aliases: - - /topics/cluster-tutorial - - /topics/partitioning +aliases: [ + /topics/cluster-tutorial, + /topics/partitioning, + /docs/manual/scaling, + /docs/manual/scaling.md +] --- -Redis scales horizontally with a deployment topology called Redis Cluster. +Redis scales horizontally with a deployment topology called Redis Cluster. +This topic will teach you how to set up, test, and operate Redis Cluster in production. +You will learn about the availability and consistency characteristics of Redis Cluster from the end user's point of view. -This document is a gentle introduction to Redis Cluster, teaching you -how to set up, test, and operate Redis Cluster in production. - -This tutorial also described the availability -and consistency characteristics of Redis Cluster from the point of view -of the end user, stated in a simple-to-understand way. - -If you plan to run a production Redis Cluster deployment, or want to better understand how Redis Cluster works internally, consult the [Redis Cluster specification](/topics/cluster-spec). +If you plan to run a production Redis Cluster deployment or want to understand better how Redis Cluster works internally, consult the [Redis Cluster specification](/topics/cluster-spec). To learn how Redis Enterprise handles scaling, see [Linear Scaling with Redis Enterprise](https://redis.com/redis-enterprise/technology/linear-scaling-redis-enterprise/). ## Redis Cluster 101 -Redis Cluster provides a way to run a Redis installation where data is -**automatically sharded across multiple Redis nodes**. +Redis Cluster provides a way to run a Redis installation where data is automatically sharded across multiple Redis nodes. +Redis Cluster also provides some degree of availability during partitions—in practical terms, the ability to continue operations when some nodes fail or are unable to communicate. +However, the cluster will become unavailable in the event of larger failures (for example, when the majority of masters are unavailable). -Redis Cluster also provides **some degree of availability during partitions**, -that is in practical terms the ability to continue the operations when -some nodes fail or are not able to communicate. However the cluster stops -to operate in the event of larger failures (for example when the majority of -masters are unavailable). +So, with Redis Cluster, you get the ability to: -So in practical terms, what do you get with Redis Cluster? +* Automatically split your dataset among multiple nodes. +* Continue operations when a subset of the nodes are experiencing failures or are unable to communicate with the rest of the cluster. -* The ability to **automatically split your dataset among multiple nodes**. -* The ability to **continue operations when a subset of the nodes are experiencing failures** or are unable to communicate with the rest of the cluster. +#### Redis Cluster TCP ports -### Redis Cluster TCP ports +Every Redis Cluster node requires two open TCP connections: a Redis TCP port used to serve clients, e.g., 6379, and second port known as the _cluster bus port_. +By default, the cluster bus port is set by adding 10000 to the data port (e.g., 16379); however, you can override this in the `cluster-port` configuration. -Every Redis Cluster node requires two open TCP connections: a Redis -TCP port used to serve clients, e.g., 6379, and second port known as the -cluster bus port. By default, the cluster bus port is set by adding 10000 to the data port (e.g., 16379); however, you can override this in the `cluster-port` config. +Cluster bus is a node-to-node communication channel that uses a binary protocol, which is more suited to exchanging information between nodes due to +little bandwidth and processing time. +Nodes use the cluster bus for failure detection, configuration updates, failover authorization, and so forth. +Clients should never try to communicate with the cluster bus port, but rather use the Redis command port. +However, make sure you open both ports in your firewall, otherwise Redis cluster nodes won't be able to communicate. -This second port is used for the cluster bus, which is a node-to-node -communication channel using a binary protocol. The cluster bus is used by -nodes for failure detection, configuration update, failover authorization, -and so forth. Clients should never try to communicate with the cluster bus -port, but always with the normal Redis command port, however make sure you -open both ports in your firewall, otherwise Redis cluster nodes will be -unable to communicate. +For a Redis Cluster to work properly you need, for each node: -Note that for a Redis Cluster to work properly you need, for each node: - -1. The normal client communication port (usually 6379) used to communicate with clients to be open to all the clients that need to reach the cluster, plus all the other cluster nodes (that use the client port for keys migrations). +1. The client communication port (usually 6379) used to communicate with clients and be open to all the clients that need to reach the cluster, plus all the other cluster nodes that use the client port for key migrations. 2. The cluster bus port must be reachable from all the other cluster nodes. If you don't open both TCP ports, your cluster will not work as expected. -The cluster bus uses a different, binary protocol, for node to node data -exchange, which is more suited to exchange information between nodes using -little bandwidth and processing time. - -### Redis Cluster and Docker +#### Redis Cluster and Docker Currently, Redis Cluster does not support NATted environments and in general environments where IP addresses or TCP ports are remapped. -Docker uses a technique called *port mapping*: programs running inside Docker -containers may be exposed with a different port compared to the one the -program believes to be using. This is useful for running multiple -containers using the same ports, at the same time, in the same server. +Docker uses a technique called _port mapping_: programs running inside Docker containers may be exposed with a different port compared to the one the program believes to be using. +This is useful for running multiple containers using the same ports, at the same time, in the same server. -To make Docker compatible with Redis Cluster, you need to use -Docker's **host networking mode**. Please see the `--net=host` option -in the [Docker documentation](https://docs.docker.com/engine/userguide/networking/dockernetworks/) for more information. +To make Docker compatible with Redis Cluster, you need to use Docker's _host networking mode_. +Please see the `--net=host` option in the [Docker documentation](https://docs.docker.com/engine/userguide/networking/dockernetworks/) for more information. -### Redis Cluster data sharding +#### Redis Cluster data sharding Redis Cluster does not use consistent hashing, but a different form of sharding where every key is conceptually part of what we call a **hash slot**. @@ -107,7 +90,7 @@ Hash tags are documented in the Redis Cluster specification, but the gist is that if there is a substring between {} brackets in a key, only what is inside the string is hashed. For example, the keys `user:{123}:profile` and `user:{123}:account` are guaranteed to be in the same hash slot because they share the same hash tag. As a result, you can operate on these two keys in the same multi-key operation. -### Redis Cluster master-replica model +#### Redis Cluster master-replica model To remain available when a subset of master nodes are failing or are not able to communicate with the majority of nodes, Redis Cluster uses a @@ -128,7 +111,7 @@ master and will continue to operate correctly. However, note that if nodes B and B1 fail at the same time, Redis Cluster will not be able to continue to operate. -### Redis Cluster consistency guarantees +#### Redis Cluster consistency guarantees Redis Cluster does not guarantee **strong consistency**. In practical terms this means that under certain conditions it is possible that Redis @@ -149,7 +132,7 @@ but crashes before being able to send the write to its replicas, one of the replicas (that did not receive the write) can be promoted to master, losing the write forever. -This is **very similar to what happens** with most databases that are +This is very similar to what happens with most databases that are configured to flush data to disk every second, so it is a scenario you are already able to reason about because of past experiences with traditional database systems not involving distributed systems. Similarly you can @@ -183,10 +166,12 @@ However, if the partition lasts enough time for B1 to be promoted to master on the majority side of the partition, the writes that Z1 has sent to B in the meantime will be lost. -Note that there is a **maximum window** to the amount of writes Z1 will be able +{{% alert title="Note" color="info" %}} +There is a **maximum window** to the amount of writes Z1 will be able to send to B: if enough time has elapsed for the majority side of the partition to elect a replica as master, every master node in the minority side will have stopped accepting writes. +{{% /alert %}} This amount of time is a very important configuration directive of Redis Cluster, and is called the **node timeout**. @@ -199,8 +184,8 @@ and stops accepting writes. ## Redis Cluster configuration parameters -We are about to create an example cluster deployment. Before we continue, -let's introduce the configuration parameters that Redis Cluster introduces +We are about to create an example cluster deployment. +Before we continue, let's introduce the configuration parameters that Redis Cluster introduces in the `redis.conf` file. * **cluster-enabled ``**: If yes, enables Redis Cluster support in a specific Redis instance. Otherwise the instance starts as a standalone instance as usual. @@ -211,13 +196,30 @@ in the `redis.conf` file. * **cluster-require-full-coverage ``**: If this is set to yes, as it is by default, the cluster stops accepting writes if some percentage of the key space is not covered by any node. If the option is set to no, the cluster will still serve queries even if only requests about a subset of keys can be processed. * **cluster-allow-reads-when-down ``**: If this is set to no, as it is by default, a node in a Redis Cluster will stop serving all traffic when the cluster is marked as failed, either when a node can't reach a quorum of masters or when full coverage is not met. This prevents reading potentially inconsistent data from a node that is unaware of changes in the cluster. This option can be set to yes to allow reads from a node during the fail state, which is useful for applications that want to prioritize read availability but still want to prevent inconsistent writes. It can also be used for when using Redis Cluster with only one or two shards, as it allows the nodes to continue serving writes when a master fails but automatic failover is impossible. +## Create and use a Redis Cluster + +To create and use a Redis Cluster, follow these steps: + +* [Create a Redis Cluster](#create-a-redis-cluster) +* [Interact with the cluster](#interact-with-the-cluster) +* [Write an example app with redis-rb-cluster](#write-an-example-app-with-redis-rb-cluster) +* [Reshard the cluster](#reshard-the-cluster) +* [A more interesting example application](#a-more-interesting-example-application) +* [Test the failover](#test-the-failover) +* [Manual failover](#manual-failover) +* [Add a new node](#add-a-new-node) +* [Remove a node](#remove-a-node) +* [Replica migration](#replica-migration) +* [Upgrade nodes in a Redis Cluster](#upgrade-nodes-in-a-redis-cluster) +* [Migrate to Redis Cluster](#migrate-to-redis-cluster) + +But, first, familiarize yourself with the requirements for creating a cluster. -## Creating and using a Redis Cluster +#### Requirements to create a Redis Cluster -To create a cluster, the first thing we need is to have a few empty -Redis instances running in **cluster mode**. +To create a cluster, the first thing you need is to have a few empty Redis instances running in _cluster mode_. -At minimum, you'll see to set the following directives in the `redis.conf` file: +At minimum, set the following directives in the `redis.conf` file: ``` port 7000 @@ -227,7 +229,7 @@ cluster-node-timeout 5000 appendonly yes ``` -Setting the `cluster-enabled` directive to `yes` enables cluster mode. +To enable cluster mode, set the `cluster-enabled` directive to `yes`. Every instance also contains the path of a file where the configuration for this node is stored, which by default is `nodes.conf`. This file is never touched by humans; it is simply generated at startup @@ -270,10 +272,12 @@ remembers every other node using this IDs, and not by IP or port. IP addresses and ports may change, but the unique node identifier will never change for all the life of the node. We call this identifier simply **Node ID**. -### Initializing the cluster +#### Create a Redis Cluster -Now that we have a number of instances running, we need to create our -cluster by writing some meaningful configuration to the nodes. +Now that we have a number of instances running, you need to create your cluster by writing some meaningful configuration to the nodes. + +You can configure and execute individual instances manually or use the create-cluster script. +Let's go over how you do it manually. To create the cluster, run: @@ -296,8 +300,6 @@ bootstrapped into talking with each other. Finally, if everything has gone well, This means that there is at least one master instance serving each of the 16384 available slots. -### Creating a Redis Cluster using the create-cluster script - If you don't want to create a Redis Cluster by configuring and executing individual instances manually as explained above, there is a much simpler system (but you'll not learn the same amount of operational details). @@ -322,10 +324,10 @@ by default. When you are done, stop the cluster with: Please read the `README` inside this directory for more information on how to run the script. -### Interacting with the cluster ---- +#### Interact with the cluster -To connect to Redis Cluster, you'll need a cluster-aware Redis client. See the documentation for your client of choice to determine its cluster support. +To connect to Redis Cluster, you'll need a cluster-aware Redis client. +See the [documentation](/docs/clients) for your client of choice to determine its cluster support. You can also test your Redis Cluster using the `redis-cli` command line utility: @@ -345,8 +347,10 @@ redis 127.0.0.1:7002> get hello "world" ``` -**Note:** if you created the cluster using the script, your nodes may listen +{{% alert title="Note" color="info" %}} +If you created the cluster using the script, your nodes may listen on different ports, starting from 30001 by default. +{{% /alert %}} The `redis-cli` cluster support is very basic, so it always uses the fact that Redis Cluster nodes are able to redirect a client to the right node. @@ -356,7 +360,7 @@ right node. The map is refreshed only when something changed in the cluster configuration, for example after a failover or after the system administrator changed the cluster layout by adding or removing nodes. -### Writing an example app with redis-rb-cluster +#### Write an example app with redis-rb-cluster Before going forward showing how to operate the Redis Cluster, doing things like a failover, or a resharding, we need to create some example application @@ -370,7 +374,8 @@ is writing to the cluster. This section explains some basic usage of [redis-rb-cluster](https://github.com/antirez/redis-rb-cluster) showing two -examples. The first is the following, and is the +examples. +The first is the following, and is the [`example.rb`](https://github.com/antirez/redis-rb-cluster/blob/master/example.rb) file inside the redis-rb-cluster distribution: @@ -479,18 +484,18 @@ This is not a very interesting program and we'll use a better one in a moment but we can already see what happens during a resharding when the program is running. -### Resharding the cluster +#### Reshard the cluster -Now we are ready to try a cluster resharding. To do this please +Now we are ready to try a cluster resharding. To do this, please keep the example.rb program running, so that you can see if there is some -impact on the program running. Also you may want to comment the `sleep` -call in order to have some more serious write load during resharding. +impact on the program running. Also, you may want to comment the `sleep` +call to have some more serious write load during resharding. Resharding basically means to move hash slots from a set of nodes to another -set of nodes, and like cluster creation it is accomplished using the -redis-cli utility. +set of nodes. +Like cluster creation, it is accomplished using the redis-cli utility. -To start a resharding just type: +To start a resharding, just type: redis-cli --cluster reshard 127.0.0.1:7000 @@ -542,8 +547,6 @@ the following command: All the slots will be covered as usual, but this time the master at 127.0.0.1:7000 will have more hash slots, something around 6461. -### Scripting a resharding operation - Resharding can be performed automatically without the need to manually enter the parameters in an interactive way. This is possible using a command line like the following: @@ -561,7 +564,7 @@ The `--cluster-yes` option instructs the cluster manager to automatically answer Note that this option can also be activated by setting the `REDISCLI_CLUSTER_YES` environment variable. -### A more interesting example application +#### A more interesting example application The example application we wrote early is not very good. It writes to the cluster in a simple way without even checking if what was @@ -625,16 +628,17 @@ When I set the counter to 0 the real value was 114, so the program reports This program is much more interesting as a test case, so we'll use it to test the Redis Cluster failover. -Testing the failover ---- - -Note: during this test, you should take a tab open with the consistency test -application running. +#### Test the failover -In order to trigger the failover, the simplest thing we can do (that is also +To trigger the failover, the simplest thing we can do (that is also the semantically simplest failure that can occur in a distributed system) is to crash a single process, in our case a single master. +{{% alert title="Note" color="info" %}} +During this test, you should take a tab open with the consistency test +application running. +{{% /alert %}} + We can identify a master and crash it with the following command: ``` @@ -707,19 +711,19 @@ The output of the `CLUSTER NODES` command may look intimidating, but it is actua * Status of the link to this node. * Slots served... -### Manual failover +#### Manual failover Sometimes it is useful to force a failover without actually causing any problem -on a master. For example in order to upgrade the Redis process of one of the -master nodes it is a good idea to failover it in order to turn it into a replica +on a master. For example, to upgrade the Redis process of one of the +master nodes it is a good idea to failover it to turn it into a replica with minimal impact on availability. Manual failovers are supported by Redis Cluster using the `CLUSTER FAILOVER` -command, that must be executed in one of the **replicas** of the master you want +command, that must be executed in one of the replicas of the master you want to failover. Manual failovers are special and are safer compared to failovers resulting from -actual master failures, since they occur in a way that avoid data loss in the +actual master failures. They occur in a way that avoids data loss in the process, by switching clients from the original master to the new master only when the system is sure that the new master processed all the replication stream from the old one. @@ -740,13 +744,13 @@ the failover starts, and the old master is informed about the configuration switch. When the clients are unblocked on the old master, they are redirected to the new master. -Note: - -* To promote a replica to master, it must first be known as a replica by a majority of the masters in the cluster. +{{% alert title="Note" color="info" %}} +To promote a replica to master, it must first be known as a replica by a majority of the masters in the cluster. Otherwise, it cannot win the failover election. - If the replica has just been added to the cluster (see [Adding a new node as a replica](#adding-a-new-node-as-a-replica) below), you may need to wait a while before sending the `CLUSTER FAILOVER` command, to make sure the masters in cluster are aware of the new replica. + If the replica has just been added to the cluster (see [Add a new node as a replica](#add-a-new-node-as-a-replica)), you may need to wait a while before sending the `CLUSTER FAILOVER` command, to make sure the masters in cluster are aware of the new replica. +{{% /alert %}} -### Adding a new node +#### Add a new node Adding a new node is basically the process of adding an empty node and then moving some data into it, in case it is a new master, or telling it to @@ -805,20 +809,21 @@ the cluster. However it has two peculiarities compared to the other masters: * Because it is a master without assigned slots, it does not participate in the election process when a replica wants to become a master. Now it is possible to assign hash slots to this node using the resharding -feature of `redis-cli`. It is basically useless to show this as we already +feature of `redis-cli`. +It is basically useless to show this as we already did in a previous section, there is no difference, it is just a resharding having as a target the empty node. -### Adding a new node as a replica +##### Add a new node as a replica -Adding a new Replica can be performed in two ways. The obvious one is to +Adding a new replica can be performed in two ways. The obvious one is to use redis-cli again, but with the --cluster-slave option, like this: redis-cli --cluster add-node 127.0.0.1:7006 127.0.0.1:7000 --cluster-slave Note that the command line here is exactly like the one we used to add a new master, so we are not specifying to which master we want to add -the replica. In this case what happens is that redis-cli will add the new +the replica. In this case, what happens is that redis-cli will add the new node as replica of a random master among the masters with fewer replicas. However you can specify exactly what master you want to target with your @@ -852,8 +857,7 @@ f093c80dde814da99c5cf72a7dd01590792b783b 127.0.0.1:7006 slave 3c3a0c74aae0b56170 The node 3c3a0c... now has two replicas, running on ports 7002 (the existing one) and 7006 (the new one). -Removing a node ---- +#### Remove a node To remove a replica node just use the `del-node` command of redis-cli: @@ -871,10 +875,18 @@ over one of its replicas and remove the node after it turned into a replica of t new master. Obviously this does not help when you want to reduce the actual number of masters in your cluster, in that case, a resharding is needed. -### Replica migration +There is a special scenario where you want to remove a failed node. +You should not use the `del-node` command because it tries to connect to all nodes and you will encounter a "connection refused" error. +Instead, you can use the `call` command: + + redis-cli --cluster call 127.0.0.1:7000 cluster forget `` + +This command will execute `CLUSTER FORGET` command on every node. -In Redis Cluster it is possible to reconfigure a replica to replicate with a -different master at any time just using the following command: +#### Replica migration + +In Redis Cluster, you can reconfigure a replica to replicate with a +different master at any time just using this command: CLUSTER REPLICATE @@ -883,8 +895,10 @@ master to another one automatically, without the help of the system administrato The automatic reconfiguration of replicas is called *replicas migration* and is able to improve the reliability of a Redis Cluster. -Note: you can read the details of replicas migration in the [Redis Cluster Specification](/topics/cluster-spec), here we'll only provide some information about the +{{% alert title="Note" color="info" %}} +You can read the details of replicas migration in the [Redis Cluster Specification](/topics/cluster-spec), here we'll only provide some information about the general idea and what you should do in order to benefit from it. +{{% /alert %}} The reason why you may want to let your cluster replicas to move from one master to another under certain condition, is that usually the Redis Cluster is as @@ -920,8 +934,7 @@ So what you should know about replicas migration in short? * To benefit from replica migration you have just to add a few more replicas to a single master in your cluster, it does not matter what master. * There is a configuration parameter that controls the replica migration feature that is called `cluster-migration-barrier`: you can read more about it in the example `redis.conf` file provided with Redis Cluster. -Upgrading nodes in a Redis Cluster ---- +#### Upgrade nodes in a Redis Cluster Upgrading replica nodes is easy since you just need to stop the node and restart it with an updated version of Redis. If there are clients scaling reads using @@ -931,7 +944,7 @@ one is not available. Upgrading masters is a bit more complex, and the suggested procedure is: 1. Use `CLUSTER FAILOVER` to trigger a manual failover of the master to one of its replicas. - (See the [Manual failover](#manual-failover) section in this document.) + (See the [Manual failover](#manual-failover) in this topic.) 2. Wait for the master to turn into a replica. 3. Finally upgrade the node as you do for replicas. 4. If you want the master to be the node you just upgraded, trigger a new manual failover in order to turn back the upgraded node into a master. @@ -939,8 +952,7 @@ Upgrading masters is a bit more complex, and the suggested procedure is: Following this procedure you should upgrade one node after the other until all the nodes are upgraded. -Migrating to Redis Cluster ---- +#### Migrate to Redis Cluster Users willing to migrate to Redis Cluster may have just a single master, or may already using a preexisting sharding setup, where keys @@ -956,7 +968,7 @@ by the application, and how. There are three different cases: 3. Multiple keys operations, or transactions, or Lua scripts involving multiple keys are used with key names not having an explicit, or the same, hash tag. The third case is not handled by Redis Cluster: the application requires to -be modified in order to don't use multi keys operations or only use them in +be modified in order to not use multi keys operations or only use them in the context of the same hash tag. Case 1 and 2 are covered, so we'll focus on those two cases, that are handled @@ -986,4 +998,13 @@ may be slow since 2.8 does not implement migrate connection caching, so you may want to restart your source instance with a Redis 3.x version before to perform such operation. -**A note about the word slave used in this page**: Starting with Redis 5, if not for backward compatibility, the Redis project no longer uses the word slave. Unfortunately in this command the word slave is part of the protocol, so we'll be able to remove such occurrences only when this API will be naturally deprecated. +{{% alert title="Note" color="info" %}} +Starting with Redis 5, if not for backward compatibility, the Redis project no longer uses the word slave. Unfortunately in this command the word slave is part of the protocol, so we'll be able to remove such occurrences only when this API will be naturally deprecated. +{{% /alert %}} + +## Learn more + +* [Redis Cluster specification](/topics/cluster-spec) +* [Linear Scaling with Redis Enterprise](https://redis.com/redis-enterprise/technology/linear-scaling-redis-enterprise/) +* [Docker documentation](https://docs.docker.com/engine/userguide/networking/dockernetworks/) + diff --git a/docs/manual/security/_index.md b/docs/management/security/_index.md similarity index 91% rename from docs/manual/security/_index.md rename to docs/management/security/_index.md index cf87c69b2f..20537b6879 100644 --- a/docs/manual/security/_index.md +++ b/docs/management/security/_index.md @@ -3,14 +3,18 @@ title: "Redis security" linkTitle: "Security" weight: 1 description: Security model and features in Redis -aliases: - - /topics/security +aliases: [ + /topics/security, + /docs/manual/security, + /docs/manual/security.md +] --- This document provides an introduction to the topic of security from the point of view of Redis. It covers the access control provided by Redis, code security concerns, attacks that can be triggered from the outside by selecting malicious inputs, and -other similar topics. +other similar topics. +You can learn more about access control, data protection and encryption, secure Redis architectures, and secure deployment techniques by taking the [Redis University security course](https://university.redis.com/courses/ru330/). For security-related contacts, open an issue on GitHub, or when you feel it is really important to preserve the security of the communication, use the @@ -72,11 +76,14 @@ disable protected mode or manually bind all the interfaces. ## Authentication -While Redis does not try to implement Access Control, it provides -a tiny layer of optional authentication that is turned on by editing the -**redis.conf** file. +Redis provides two ways to authenticate clients. +The recommended authentication method, introduced in Redis 6, is via Access Control Lists, allowing named users to be created and assigned fine-grained permissions. +Read more about Access Control Lists [here](/docs/management/security/acl/). -When the authorization layer is enabled, Redis will refuse any query by +The legacy authentication method is enabled by editing the **redis.conf** file, and providing a database password using the `requirepass` setting. +This password is then used by all clients. + +When the `requirepass` setting is enabled, Redis will refuse any query by unauthenticated clients. A client can authenticate itself by sending the **AUTH** command followed by the password. @@ -92,7 +99,7 @@ redundancy. If firewalling or any other system implemented to protect Redis from external attackers fail, an external client will still not be able to access the Redis instance without knowledge of the authentication password. -Since the AUTH command, like every other Redis command, is sent unencrypted, it +Since the `AUTH` command, like every other Redis command, is sent unencrypted, it does not protect against an attacker that has enough access to the network to perform eavesdropping. diff --git a/docs/manual/security/acl.md b/docs/management/security/acl.md similarity index 95% rename from docs/manual/security/acl.md rename to docs/management/security/acl.md index 46f3c89d0e..6b1baeed1f 100644 --- a/docs/manual/security/acl.md +++ b/docs/management/security/acl.md @@ -2,9 +2,12 @@ title: "ACL" linkTitle: "ACL" weight: 1 -description: Redis access control list -aliases: - - /topics/acl +description: Redis Access Control List +aliases: [ + /topics/acl, + /docs/manual/security/acl, + /docs/manual/security/acl.md +] --- The Redis ACL, short for Access Control List, is the feature that allows certain @@ -139,7 +142,7 @@ Configure selectors for the user: Reset the user: -* `reset` Performs the following actions: resetpass, resetkeys, resetchannels, off, -@all. The user returns to the same state it had immediately after its creation. +* `reset` Performs the following actions: resetpass, resetkeys, resetchannels, allchannels (if acl-pubsub-default is set), off, clearselectors, -@all. The user returns to the same state it had immediately after its creation. ## Create and edit user ACLs with the ACL SETUSER command @@ -166,8 +169,8 @@ users. If the user already exists, the command above will do nothing at all. Check the default user status: > ACL LIST - 1) "user alice off &* -@all" - 2) "user default on nopass ~* ~& +@all" + 1) "user alice off resetchannels -@all" + 2) "user default on nopass ~* &* +@all" The new user "alice" is: @@ -175,7 +178,7 @@ The new user "alice" is: * The user also has no passwords set. * Cannot access any command. Note that the user is created by default without the ability to access any command, so the `-@all` in the output above could be omitted; however, `ACL LIST` attempts to be explicit rather than implicit. * There are no key patterns that the user can access. -* The user can access all Pub/Sub channels. +* There are no Pub/Sub channels that the user can access. New users are created with restrictive permissions by default. Starting with Redis 6.2, ACL provides Pub/Sub channels access management as well. To ensure backward compatibility with version 6.0 when upgrading to Redis 6.2, new users are granted the 'allchannels' permission by default. The default can be set to `resetchannels` via the `acl-pubsub-default` configuration directive. @@ -208,7 +211,6 @@ computers to read, while `ACL GETUSER` is more human readable. > ACL GETUSER alice 1) "flags" 2) 1) "on" - 2) "allchannels" 3) "passwords" 4) 1) "2d9c75..." 5) "commands" @@ -216,27 +218,19 @@ computers to read, while `ACL GETUSER` is more human readable. 7) "keys" 8) "~cached:*" 9) "channels" - 10) "&*" + 10) "" 11) "selectors" - 12) 1) 1) "commands" - 2) "-@all +set" - 3) "keys" - 4) "~*" - 5) "channels" - 6) "&*" + 12) (empty array) The `ACL GETUSER` returns a field-value array that describes the user in more parsable terms. The output includes the set of flags, a list of key patterns, passwords, and so forth. The output is probably more readable if we use RESP3, so that it is returned as a map reply: > ACL GETUSER alice 1# "flags" => 1~ "on" - 2~ "allchannels" 2# "passwords" => 1) "2d9c75273d72b32df726fb545c8a4edc719f0a95a6fd993950b10c474ad9c927" 3# "commands" => "-@all +get" 4# "keys" => "~cached:*" - 5# "channels" => "&*" - 6# "selectors" => 1) 1# "commands" => "-@all +set" - 2# "keys" => "~*" - 3# "channels" => "&*" + 5# "channels" => "" + 6# "selectors" => (empty array) *Note: from now on, we'll continue using the Redis default protocol, version 2* @@ -245,7 +239,7 @@ Using another `ACL SETUSER` command (from a different user, because alice cannot > ACL SETUSER alice ~objects:* ~items:* ~public:* OK > ACL LIST - 1) "user alice on >2d9c75... ~cached:* ~objects:* ~items:* ~public:* &* -@all +get" + 1) "user alice on #2d9c75... ~cached:* ~objects:* ~items:* ~public:* resetchannels -@all +get" 2) "user default on nopass ~* &* +@all" The user representation in memory is now as we expect it to be. @@ -271,7 +265,7 @@ Will result in myuser being able to call both `GET` and `SET`: > ACL LIST 1) "user default on nopass ~* &* +@all" - 2) "user myuser off &* -@all +set +get" + 2) "user myuser off resetchannels -@all +get +set" ## Command categories @@ -327,7 +321,7 @@ The following is a list of command categories and their meanings: * **transaction** - `WATCH` / `MULTI` / `EXEC` related commands. * **write** - Writing to keys (values or metadata). -Redis can also show you a list of all categories and the exact commands each category includes using the Redis `ACL` command's `CAT` subcommand. It can be used in two forms: +Redis can also show you a list of all categories and the exact commands each category includes using the Redis `ACL CAT` command. It can be used in two forms: ACL CAT -- Will just list all the categories available ACL CAT -- Will list all the commands inside the category @@ -360,15 +354,17 @@ Examples: As you can see, so far there are 21 distinct categories. Now let's check what command is part of the *geo* category: - > ACL CAT geo - 1) "geohash" - 2) "georadius_ro" - 3) "georadiusbymember" - 4) "geopos" - 5) "geoadd" - 6) "georadiusbymember_ro" - 7) "geodist" - 8) "georadius" + > ACL CAT geo + 1) "geohash" + 2) "georadius_ro" + 3) "georadiusbymember" + 4) "geopos" + 5) "geoadd" + 6) "georadiusbymember_ro" + 7) "geodist" + 8) "georadius" + 9) "geosearch" + 10) "geosearchstore" Note that commands may be part of multiple categories. For example, an ACL rule like `+@geo -@read` will result in certain geo commands to be @@ -444,11 +440,11 @@ Permissions are defined as individual characters that map to the following key p Permissions can be composed together by specifying multiple characters. Specifying the permission as 'RW' is considered full access and is analogous to just passing in `~`. -For a concrete example, consider a user with ACL rules `+@all ~app1:* (+@readonly ~app2:*)`. +For a concrete example, consider a user with ACL rules `+@all ~app1:* (+@read ~app2:*)`. This user has full access on `app1:*` and readonly access on `app2:*`. However, some commands support reading data from one key, doing some transformation, and storing it into another key. One such command is the `COPY` command, which copies the data from the source key into the destination key. -The example set of ACL rules is unable to handle a request copying data from `app2:user` into `app1:user`, since neither the root permission or the selector fully matches the command. +The example set of ACL rules is unable to handle a request copying data from `app2:user` into `app1:user`, since neither the root permission nor the selector fully matches the command. However, using key selectors you can define a set of ACL rules that can handle this request `+@all ~app1:* %R~app2:*`. The first pattern is able to match `app1:user` and the second pattern is able to match `app2:user`. @@ -459,7 +455,7 @@ The access flag maps to the read key permission. If the key has no logical operation flags, such as `EXISTS`, the user still needs either key read or key write permissions to execute the command. Note: Side channels to accessing user data are ignored when it comes to evaluating whether read permissions are required to execute a command. -This means that some write commands that return metadata about the modified key only require write permission on the key to execute: +This means that some write commands that return metadata about the modified key only require write permission on the key to execute. For example, consider the following two commands: * `LPUSH key1 data`: modifies "key1" but only returns metadata about it, the size of the list after the push, so the command only requires write permission on "key1" to execute. @@ -477,9 +473,6 @@ examples, for the sake of brevity, the long hex string was trimmed: > ACL GETUSER default 1) "flags" 2) 1) "on" - 2) "allkeys" - 3) "allcommands" - 4) "allchannels" 3) "passwords" 4) 1) "2d9c75273d72b32df726fb545c8a4edc719f0a95a6fd993950b10c474ad9c927" 5) "commands" @@ -491,9 +484,6 @@ examples, for the sake of brevity, the long hex string was trimmed: 11) "selectors" 12) (empty array) -Also, starting with Redis 6, the old command `CONFIG GET requirepass` will -no longer return the clear text password, but instead the hashed password. - Using SHA256 provides the ability to avoid storing the password in clear text while still allowing for a very fast `AUTH` command, which is a very important feature of Redis and is coherent with what clients expect from Redis. diff --git a/docs/manual/security/encryption.md b/docs/management/security/encryption.md similarity index 94% rename from docs/manual/security/encryption.md rename to docs/management/security/encryption.md index 33407aeb05..33aa813a44 100644 --- a/docs/manual/security/encryption.md +++ b/docs/management/security/encryption.md @@ -3,8 +3,11 @@ title: "TLS" linkTitle: "TLS" weight: 1 description: Redis TLS support -aliases: - - /topics/encryption +aliases: [ + /topics/encryption, + /docs/manual/security/encryption, + /docs/manual/security/encryption.md +] --- SSL/TLS is supported by Redis starting with version 6 as an optional feature @@ -15,9 +18,13 @@ that needs to be enabled at compile time. ### Building To build with TLS support you'll need OpenSSL development libraries (e.g. -libssl-dev on Debian/Ubuntu). +`libssl-dev` on Debian/Ubuntu). -Run `make BUILD_TLS=yes`. +Build Redis with the following command: + +```sh +make BUILD_TLS=yes +``` ### Tests diff --git a/docs/manual/sentinel.md b/docs/management/sentinel.md similarity index 99% rename from docs/manual/sentinel.md rename to docs/management/sentinel.md index 03e84d72d1..35718e3692 100644 --- a/docs/manual/sentinel.md +++ b/docs/management/sentinel.md @@ -1,10 +1,13 @@ --- title: "High availability with Redis Sentinel" linkTitle: "High availability with Sentinel" -weight: 1 +weight: 4 description: High availability for non-clustered Redis -aliases: - - /topics/sentinel +aliases: [ + /topics/sentinel, + /docs/manual/sentinel, + /docs/manual/sentinel.md +] --- Redis Sentinel provides high availability for Redis when not using [Redis Cluster](/docs/manual/scaling). @@ -114,7 +117,7 @@ One set of instances is called `mymaster`, and the other `resque`. The meaning of the arguments of `sentinel monitor` statements is the following: - sentinel monitor + sentinel monitor For the sake of clarity, let's check line by line what the configuration options mean: @@ -806,8 +809,8 @@ In order for Sentinels to connect to Redis server instances when they are configured with ACL, the Sentinel configuration must include the following directives: - sentinel auth-user - sentinel auth-pass + sentinel auth-user + sentinel auth-pass Where `` and `` are the username and password for accessing the group's instances. These credentials should be provisioned on all of the group's Redis instances with the minimal control permissions. For example: @@ -839,7 +842,7 @@ In order for Sentinels to connect to Redis server instances when they are configured with `requirepass`, the Sentinel configuration must include the `sentinel auth-pass` directive, in the format: - sentinel auth-pass + sentinel auth-pass Configuring Sentinel instances with authentication --- diff --git a/docs/manual/troubleshooting.md b/docs/management/troubleshooting.md similarity index 95% rename from docs/manual/troubleshooting.md rename to docs/management/troubleshooting.md index 5264c8b701..8c19c906c8 100644 --- a/docs/manual/troubleshooting.md +++ b/docs/management/troubleshooting.md @@ -1,10 +1,13 @@ --- title: "Troubleshooting Redis" linkTitle: "Troubleshooting" -weight: 1 +weight: 9 description: Problems with Redis? Start here. -aliases: - - /topics/problems +aliases: [ + /topics/problems, + /docs/manual/troubleshooting, + /docs/manual/troubleshooting.md +] --- This page tries to help you with what to do if you have issues with Redis. Part of the Redis project is helping people that are experiencing problems because we don't like to leave people alone with their issues. diff --git a/docs/manual/_index.md b/docs/manual/_index.md index 760810a813..b7b72b871e 100644 --- a/docs/manual/_index.md +++ b/docs/manual/_index.md @@ -1,6 +1,6 @@ --- -title: "The Redis manual" -linkTitle: "Manual" +title: "Use Redis" +linkTitle: "Use Redis" description: A developer's guide to Redis -weight: 20 +weight: 50 --- diff --git a/docs/manual/client-side-caching.md b/docs/manual/client-side-caching.md index 60e94fff89..5961fe906f 100644 --- a/docs/manual/client-side-caching.md +++ b/docs/manual/client-side-caching.md @@ -1,7 +1,7 @@ --- title: "Client-side caching in Redis" linkTitle: "Client-side caching" -weight: 1 +weight: 2 description: > Server-assisted, client-side caching in Redis aliases: @@ -93,7 +93,7 @@ The Redis client-side caching support is called _Tracking_, and has two modes: * In the _broadcasting_ mode, the server does not attempt to remember what keys a given client accessed, so this mode costs no memory at all in the server side. Instead clients subscribe to key prefixes such as `object:` or `user:`, and receive a notification message every time a key matching a subscribed prefix is touched. To recap, for now let's forget for a moment about the broadcasting mode, to -focus on the first mode. We'll describe broadcasting later more in details. +focus on the first mode. We'll describe broadcasting in more detail later. 1. Clients can enable tracking if they want. Connections start without tracking enabled. 2. When tracking is enabled, the server remembers what keys each client requested during the connection lifetime (by sending read commands about such keys). diff --git a/docs/manual/data-types/_index.md b/docs/manual/data-types/_index.md deleted file mode 100644 index ff39872d27..0000000000 --- a/docs/manual/data-types/_index.md +++ /dev/null @@ -1,155 +0,0 @@ ---- -title: "Redis data types" -linkTitle: "Data types" -description: Overview of the many data types supported by Redis -weight: 1 -aliases: - - /topics/data-types ---- - -## Strings - -Strings are the most basic kind of Redis value. Redis Strings are binary safe, this means that a Redis string can contain any kind of data, for instance a -JPEG image or a serialized Ruby object. - -A String value can be at max 512 Megabytes in length. - -You can do a number of interesting things using strings in Redis, for instance you can: - -* Use Strings as atomic counters using commands in the INCR family: [INCR](/commands/incr), [DECR](/commands/decr), [INCRBY](/commands/incrby). -* Append to strings with the [APPEND](/commands/append) command. -* Use Strings as a random access vectors with [GETRANGE](/commands/getrange) and [SETRANGE](/commands/setrange). -* Encode a lot of data in little space, or create a Redis backed Bloom Filter using [GETBIT](/commands/getbit) and [SETBIT](/commands/setbit). - -Check all the [available string commands](/commands/#string) for more information, or read the [introduction to Redis data types](/topics/data-types-intro). - -## Lists - -Redis Lists are simply lists of strings, sorted by insertion order. -It is possible to add elements to a Redis List pushing new elements on the head (on the left) or on the tail (on the right) of the list. - -The [LPUSH](/commands/lpush) command inserts a new element on the head, while -[RPUSH](/commands/rpush) inserts a new element on the tail. A new list is created -when one of this operations is performed against an empty key. -Similarly the key is removed from the key space if a list operation will -empty the list. These are very handy semantics since all the list commands will -behave exactly like they were called with an empty list if called with a -non-existing key as argument. - -Some example of list operations and resulting lists: - - LPUSH mylist a # now the list is "a" - LPUSH mylist b # now the list is "b","a" - RPUSH mylist c # now the list is "b","a","c" (RPUSH was used this time) - -The max length of a list is 2^32 - 1 elements (4294967295, more than 4 billion of elements per list). - -The main features of Redis Lists from the point of view of time complexity are -the support for constant time insertion and deletion of elements near the -head and tail, even with many millions of inserted items. -Accessing elements is very fast near the extremes of the list but -is slow if you try accessing the middle of a very big list, as it is -an O(N) operation. - -You can do many interesting things with Redis Lists, for instance you can: - -* Model a timeline in a social network, using [LPUSH](/commands/lpush) in order to add new elements in the user time line, and using [LRANGE](/commands/lrange) in order to retrieve a few of recently inserted items. -* You can use [LPUSH](/commands/lpush) together with [LTRIM](/commands/ltrim) to create a list that never exceeds a given number of elements, but just remembers the latest N elements. -* Lists can be used as a message passing primitive, See for instance the well known [Resque](https://github.com/resque/resque) Ruby library for creating background jobs. -* You can do a lot more with lists, this data type supports a number of commands, including blocking commands like [BLPOP](/commands/blpop). - -Please check all the [available commands operating on lists](/commands#list) for more information, or read the [introduction to Redis data types](/topics/data-types-intro). - -## Sets ---- - -Redis Sets are an unordered collection of Strings. It is possible to add, -remove, and test for existence of members in O(1) (constant time regardless -of the number of elements contained inside the Set). - -Redis Sets have the desirable property of not allowing repeated members. Adding the same element multiple times will result in a set having a single copy of this element. Practically speaking this means that adding a member does not require a *check if exists then add* operation. - -A very interesting thing about Redis Sets is that they support a number of -server side commands to compute sets starting from existing sets, so you -can do unions, intersections, differences of sets in very short time. - -The max number of members in a set is 2^32 - 1 (4294967295, more than 4 billion of members per set). - -You can do many interesting things using Redis Sets, for instance you can: - -* You can track unique things using Redis Sets. Want to know all the unique IP addresses visiting a given blog post? Simply use [SADD](/commands/sadd) every time you process a page view. You are sure repeated IPs will not be inserted. -* Redis Sets are good to represent relations. You can create a tagging system with Redis using a Set to represent every tag. Then you can add all the IDs of all the objects having a given tag into a Set representing this particular tag, using the [SADD](/commands/sadd) command. Do you want all the IDs of all the Objects having three different tags at the same time? Just use [SINTER](/commands/sinter). -* You can use Sets to extract elements at random using the [SPOP](/commands/spop) or [SRANDMEMBER](/commands/srandmember) commands. - - -As usual, check the [full list of Set commands](/commands#set) for more information, or read the [introduction to Redis data types](/topics/data-types-intro). - -## Hashes - -Redis Hashes are maps between string fields and string values, so they are the perfect data type to represent objects (e.g. A User with a number of fields like name, surname, age, and so forth): - - HMSET user:1000 username antirez password P1pp0 age 34 - HGETALL user:1000 - HSET user:1000 password 12345 - HGETALL user:1000 - -A hash with a few fields (where few means up to one hundred or so) is stored in a way -that takes very little space, so you can store millions of objects in a small -Redis instance. - -While Hashes are used mainly to represent objects, they are capable of storing many elements, so you can use Hashes for many other tasks as well. - -Every hash can store up to 2^32 - 1 field-value pairs (more than 4 billion). - -Check the [full list of Hash commands](/commands#hash) for more information, or read the [introduction to Redis data types](/topics/data-types-intro). - -## Sorted Sets - -Redis Sorted Sets are, similarly to Redis Sets, non repeating collections of -Strings. The difference is that every member of a Sorted Set is associated -with a score, that is used to keep the Sorted Set in order, from the -smallest to the greatest score. While members are unique, scores may be -repeated. - -With Sorted Sets you can add, remove, or update elements in a very fast way -(in a time proportional to the logarithm of the number of elements). Since -elements are *stored in order* and not ordered afterwards, you can also get -ranges by score or by rank (position) in a very fast way. -Accessing the middle of a Sorted Set is also very fast, so you can use -Sorted Sets as a smart list of non repeating elements where you can quickly access -everything you need: elements in order, fast existence test, fast access -to elements in the middle! - -In short with Sorted Sets you can do a lot of tasks with great performance -that are really hard to model in other kind of databases. - -With Sorted Sets you can: - -* Build a leaderboard in a massive online game, where every time a new score -is submitted you update it using [ZADD](/commands/zadd). You can easily -retrieve the top users using [ZRANGE](/commands/zrange), you can also, given a -user name, return its rank in the listing using [ZRANK](/commands/zrank). -Using ZRANK and ZRANGE together you can show users with a score similar to -a given user. All very *quickly*. -* Sorted Sets are often used in order to index data that is stored inside Redis. -For instance if you have many hashes representing users, you can use a Sorted Set with members having the age of the user as the score and the ID of the user as the value. So using [ZRANGEBYSCORE](/commands/zrangebyscore) it will be trivial and fast to retrieve all the users with a given age range. - - -Sorted Sets are one of the more advanced Redis data types, so take some time to check the [full list of Sorted Set commands](/commands#sorted_set) to discover what you can do with Redis! Also you may want to read the [Introduction to Redis Data Types](/topics/data-types-intro). - -## Bitmaps and HyperLogLogs - -Redis also supports Bitmaps and HyperLogLogs which are actually data types -based on the String base type, but having their own semantics. - -Please refer to the [data types tutorial](/docs/manual/data-types/data-types-tutorial) for information about those types. - -## Streams - -A Redis stream is a data structure that acts like an append-only log. Streams are useful for recording events in the order they occur. See the [Redis streams docs](/docs/manual/data-types/streams) for details and usage. - -## Geospatial indexes - -Redis provides geospatial indexes, which are useful for finding locations within a given geographic radius. You can add locations to a geospatial index using the [GEOADD](/commands/geoadd) command. You then search for locations within a given radius using the [GEORADIUS](/commands/georadius) command. - -See the [complete geospatial index command reference](/commands/?group=geo) for all of the details. diff --git a/docs/manual/data-types/data-types-tutorial.md b/docs/manual/data-types/data-types-tutorial.md deleted file mode 100644 index 87bc00c915..0000000000 --- a/docs/manual/data-types/data-types-tutorial.md +++ /dev/null @@ -1,1028 +0,0 @@ ---- -title: "Data types tutorial" -linkTitle: "Tutorial" -description: Learning the basic Redis data types and how to use them -weight: 1 -aliases: - - /topics/data-types-intro ---- - -Redis is not a *plain* key-value store, it is actually a *data structures server*, supporting different kinds of values. What this means is that, while in -traditional key-value stores you associate string keys to string values, in -Redis the value is not limited to a simple string, but can also hold more complex -data structures. The following is the list of all the data structures supported -by Redis, which will be covered separately in this tutorial: - -* Binary-safe strings. -* Lists: collections of string elements sorted according to the order of insertion. They are basically *linked lists*. -* Sets: collections of unique, unsorted string elements. -* Sorted sets, similar to Sets but where every string element is associated to a - floating number value, called *score*. The elements are always taken sorted - by their score, so unlike Sets it is possible to retrieve a range of elements - (for example you may ask: give me the top 10, or the bottom 10). -* Hashes, which are maps composed of fields associated with values. Both the - field and the value are strings. This is very similar to Ruby or Python - hashes. -* Bit arrays (or simply bitmaps): it is possible, using special commands, to - handle String values like an array of bits: you can set and clear individual - bits, count all the bits set to 1, find the first set or unset bit, and so - forth. -* HyperLogLogs: this is a probabilistic data structure which is used in order - to estimate the cardinality of a set. Don't be scared, it is simpler than - it seems... See later in the HyperLogLog section of this tutorial. -* Streams: append-only collections of map-like entries that provide an abstract - log data type. They are covered in depth in the - [Introduction to Redis Streams](/topics/streams-intro). - -It's not always trivial to grasp how these data types work and what to use in -order to solve a given problem from the [command reference](/commands), so this -document is a crash course in Redis data types and their most common patterns. - -For all the examples we'll use the `redis-cli` utility, a simple but -handy command-line utility, to issue commands against the Redis server. - -## Keys - -Redis keys are binary safe, this means that you can use any binary sequence as a -key, from a string like "foo" to the content of a JPEG file. -The empty string is also a valid key. - -A few other rules about keys: - -* Very long keys are not a good idea. For instance a key of 1024 bytes is a bad - idea not only memory-wise, but also because the lookup of the key in the - dataset may require several costly key-comparisons. Even when the task at hand - is to match the existence of a large value, hashing it (for example - with SHA1) is a better idea, especially from the perspective of memory - and bandwidth. -* Very short keys are often not a good idea. There is little point in writing - "u1000flw" as a key if you can instead write "user:1000:followers". The latter - is more readable and the added space is minor compared to the space used by - the key object itself and the value object. While short keys will obviously - consume a bit less memory, your job is to find the right balance. -* Try to stick with a schema. For instance "object-type:id" is a good - idea, as in "user:1000". Dots or dashes are often used for multi-word - fields, as in "comment:4321:reply.to" or "comment:4321:reply-to". -* The maximum allowed key size is 512 MB. - - -## Strings - -The Redis String type is the simplest type of value you can associate with -a Redis key. It is the only data type in Memcached, so it is also very natural -for newcomers to use it in Redis. - -Since Redis keys are strings, when we use the string type as a value too, -we are mapping a string to another string. The string data type is useful -for a number of use cases, like caching HTML fragments or pages. - -Let's play a bit with the string type, using `redis-cli` (all the examples -will be performed via `redis-cli` in this tutorial). - - > set mykey somevalue - OK - > get mykey - "somevalue" - -As you can see using the `SET` and the `GET` commands are the way we set -and retrieve a string value. Note that `SET` will replace any existing value -already stored into the key, in the case that the key already exists, even if -the key is associated with a non-string value. So `SET` performs an assignment. - -Values can be strings (including binary data) of every kind, for instance you -can store a jpeg image inside a value. A value can't be bigger than 512 MB. - -The `SET` command has interesting options, that are provided as additional -arguments. For example, I may ask `SET` to fail if the key already exists, -or the opposite, that it only succeed if the key already exists: - - > set mykey newval nx - (nil) - > set mykey newval xx - OK - -Even if strings are the basic values of Redis, there are interesting operations -you can perform with them. For instance, one is atomic increment: - - > set counter 100 - OK - > incr counter - (integer) 101 - > incr counter - (integer) 102 - > incrby counter 50 - (integer) 152 - -The [INCR](/commands/incr) command parses the string value as an integer, -increments it by one, and finally sets the obtained value as the new value. -There are other similar commands like [INCRBY](/commands/incrby), -[DECR](/commands/decr) and [DECRBY](/commands/decrby). Internally it's -always the same command, acting in a slightly different way. - -What does it mean that INCR is atomic? -That even multiple clients issuing INCR against -the same key will never enter into a race condition. For instance, it will never -happen that client 1 reads "10", client 2 reads "10" at the same time, both -increment to 11, and set the new value to 11. The final value will always be -12 and the read-increment-set operation is performed while all the other -clients are not executing a command at the same time. - -There are a number of commands for operating on strings. For example -the `GETSET` command sets a key to a new value, returning the old value as the -result. You can use this command, for example, if you have a -system that increments a Redis key using `INCR` -every time your web site receives a new visitor. You may want to collect this -information once every hour, without losing a single increment. -You can `GETSET` the key, assigning it the new value of "0" and reading the -old value back. - -The ability to set or retrieve the value of multiple keys in a single -command is also useful for reduced latency. For this reason there are -the `MSET` and `MGET` commands: - - > mset a 10 b 20 c 30 - OK - > mget a b c - 1) "10" - 2) "20" - 3) "30" - -When `MGET` is used, Redis returns an array of values. - -## Altering and querying the key space - -There are commands that are not defined on particular types, but are useful -in order to interact with the space of keys, and thus, can be used with -keys of any type. - -For example the `EXISTS` command returns 1 or 0 to signal if a given key -exists or not in the database, while the `DEL` command deletes a key -and associated value, whatever the value is. - - > set mykey hello - OK - > exists mykey - (integer) 1 - > del mykey - (integer) 1 - > exists mykey - (integer) 0 - -From the examples you can also see how `DEL` itself returns 1 or 0 depending on whether -the key was removed (it existed) or not (there was no such key with that -name). - -There are many key space related commands, but the above two are the -essential ones together with the `TYPE` command, which returns the kind -of value stored at the specified key: - - > set mykey x - OK - > type mykey - string - > del mykey - (integer) 1 - > type mykey - none - -## Key expiration - -Before moving on, we should look at an important Redis feature that works regardless of the type of value you're storing: key expiration. Key expiration lets you set a timeout for a key, also known as a "time to live", or "TTL". When the time to live elapses, the key is automatically destroyed. - -A few important notes about key expiration: - -* They can be set both using seconds or milliseconds precision. -* However the expire time resolution is always 1 millisecond. -* Information about expires are replicated and persisted on disk, the time virtually passes when your Redis server remains stopped (this means that Redis saves the date at which a key will expire). - -Use the `EXPIRE` command to set a key's expiration: - - > set key some-value - OK - > expire key 5 - (integer) 1 - > get key (immediately) - "some-value" - > get key (after some time) - (nil) - -The key vanished between the two `GET` calls, since the second call was -delayed more than 5 seconds. In the example above we used `EXPIRE` in -order to set the expire (it can also be used in order to set a different -expire to a key already having one, like `PERSIST` can be used in order -to remove the expire and make the key persistent forever). However we -can also create keys with expires using other Redis commands. For example -using `SET` options: - - > set key 100 ex 10 - OK - > ttl key - (integer) 9 - -The example above sets a key with the string value `100`, having an expire -of ten seconds. Later the `TTL` command is called in order to check the -remaining time to live for the key. - -In order to set and check expires in milliseconds, check the `PEXPIRE` and -the `PTTL` commands, and the full list of `SET` options. - - -## Lists - -To explain the List data type it's better to start with a little bit of theory, -as the term *List* is often used in an improper way by information technology -folks. For instance "Python Lists" are not what the name may suggest (Linked -Lists), but rather Arrays (the same data type is called Array in -Ruby actually). - -From a very general point of view a List is just a sequence of ordered -elements: 10,20,1,2,3 is a list. But the properties of a List implemented using -an Array are very different from the properties of a List implemented using a -*Linked List*. - -Redis lists are implemented via Linked Lists. This means that even if you have -millions of elements inside a list, the operation of adding a new element in -the head or in the tail of the list is performed *in constant time*. The speed of adding a -new element with the `LPUSH` command to the head of a list with ten -elements is the same as adding an element to the head of list with 10 -million elements. - -What's the downside? Accessing an element *by index* is very fast in lists -implemented with an Array (constant time indexed access) and not so fast in -lists implemented by linked lists (where the operation requires an amount of -work proportional to the index of the accessed element). - -Redis Lists are implemented with linked lists because for a database system it -is crucial to be able to add elements to a very long list in a very fast way. -Another strong advantage, as you'll see in a moment, is that Redis Lists can be -taken at constant length in constant time. - -When fast access to the middle of a large collection of elements is important, -there is a different data structure that can be used, called sorted sets. -Sorted sets will be covered later in this tutorial. - -### First steps with Redis Lists - -The `LPUSH` command adds a new element into a list, on the -left (at the head), while the `RPUSH` command adds a new -element into a list, on the right (at the tail). Finally the -`LRANGE` command extracts ranges of elements from lists: - - > rpush mylist A - (integer) 1 - > rpush mylist B - (integer) 2 - > lpush mylist first - (integer) 3 - > lrange mylist 0 -1 - 1) "first" - 2) "A" - 3) "B" - -Note that [LRANGE](/commands/lrange) takes two indexes, the first and the last -element of the range to return. Both the indexes can be negative, telling Redis -to start counting from the end: so -1 is the last element, -2 is the -penultimate element of the list, and so forth. - -As you can see `RPUSH` appended the elements on the right of the list, while -the final `LPUSH` appended the element on the left. - -Both commands are *variadic commands*, meaning that you are free to push -multiple elements into a list in a single call: - - > rpush mylist 1 2 3 4 5 "foo bar" - (integer) 9 - > lrange mylist 0 -1 - 1) "first" - 2) "A" - 3) "B" - 4) "1" - 5) "2" - 6) "3" - 7) "4" - 8) "5" - 9) "foo bar" - -An important operation defined on Redis lists is the ability to *pop elements*. -Popping elements is the operation of both retrieving the element from the list, -and eliminating it from the list, at the same time. You can pop elements -from left and right, similarly to how you can push elements in both sides -of the list: - - > rpush mylist a b c - (integer) 3 - > rpop mylist - "c" - > rpop mylist - "b" - > rpop mylist - "a" - -We added three elements and popped three elements, so at the end of this -sequence of commands the list is empty and there are no more elements to -pop. If we try to pop yet another element, this is the result we get: - - > rpop mylist - (nil) - -Redis returned a NULL value to signal that there are no elements in the -list. - -### Common use cases for lists - -Lists are useful for a number of tasks, two very representative use cases -are the following: - -* Remember the latest updates posted by users into a social network. -* Communication between processes, using a consumer-producer pattern where the producer pushes items into a list, and a consumer (usually a *worker*) consumes those items and executed actions. Redis has special list commands to make this use case both more reliable and efficient. - -For example both the popular Ruby libraries [resque](https://github.com/resque/resque) and -[sidekiq](https://github.com/mperham/sidekiq) use Redis lists under the hood in order to -implement background jobs. - -The popular Twitter social network [takes the latest tweets](http://www.infoq.com/presentations/Real-Time-Delivery-Twitter) -posted by users into Redis lists. - -To describe a common use case step by step, imagine your home page shows the latest -photos published in a photo sharing social network and you want to speedup access. - -* Every time a user posts a new photo, we add its ID into a list with `LPUSH`. -* When users visit the home page, we use `LRANGE 0 9` in order to get the latest 10 posted items. - -### Capped lists - -In many use cases we just want to use lists to store the *latest items*, -whatever they are: social network updates, logs, or anything else. - -Redis allows us to use lists as a capped collection, only remembering the latest -N items and discarding all the oldest items using the `LTRIM` command. - -The `LTRIM` command is similar to `LRANGE`, but **instead of displaying the -specified range of elements** it sets this range as the new list value. All -the elements outside the given range are removed. - -An example will make it more clear: - - > rpush mylist 1 2 3 4 5 - (integer) 5 - > ltrim mylist 0 2 - OK - > lrange mylist 0 -1 - 1) "1" - 2) "2" - 3) "3" - -The above `LTRIM` command tells Redis to take just list elements from index -0 to 2, everything else will be discarded. This allows for a very simple but -useful pattern: doing a List push operation + a List trim operation together -in order to add a new element and discard elements exceeding a limit: - - LPUSH mylist - LTRIM mylist 0 999 - -The above combination adds a new element and takes only the 1000 -newest elements into the list. With `LRANGE` you can access the top items -without any need to remember very old data. - -Note: while `LRANGE` is technically an O(N) command, accessing small ranges -towards the head or the tail of the list is a constant time operation. - -Blocking operations on lists ---- - -Lists have a special feature that make them suitable to implement queues, -and in general as a building block for inter process communication systems: -blocking operations. - -Imagine you want to push items into a list with one process, and use -a different process in order to actually do some kind of work with those -items. This is the usual producer / consumer setup, and can be implemented -in the following simple way: - -* To push items into the list, producers call `LPUSH`. -* To extract / process items from the list, consumers call `RPOP`. - -However it is possible that sometimes the list is empty and there is nothing -to process, so `RPOP` just returns NULL. In this case a consumer is forced to wait -some time and retry again with `RPOP`. This is called *polling*, and is not -a good idea in this context because it has several drawbacks: - -1. Forces Redis and clients to process useless commands (all the requests when the list is empty will get no actual work done, they'll just return NULL). -2. Adds a delay to the processing of items, since after a worker receives a NULL, it waits some time. To make the delay smaller, we could wait less between calls to `RPOP`, with the effect of amplifying problem number 1, i.e. more useless calls to Redis. - -So Redis implements commands called `BRPOP` and `BLPOP` which are versions -of `RPOP` and `LPOP` able to block if the list is empty: they'll return to -the caller only when a new element is added to the list, or when a user-specified -timeout is reached. - -This is an example of a `BRPOP` call we could use in the worker: - - > brpop tasks 5 - 1) "tasks" - 2) "do_something" - -It means: "wait for elements in the list `tasks`, but return if after 5 seconds -no element is available". - -Note that you can use 0 as timeout to wait for elements forever, and you can -also specify multiple lists and not just one, in order to wait on multiple -lists at the same time, and get notified when the first list receives an -element. - -A few things to note about `BRPOP`: - -1. Clients are served in an ordered way: the first client that blocked waiting for a list, is served first when an element is pushed by some other client, and so forth. -2. The return value is different compared to `RPOP`: it is a two-element array since it also includes the name of the key, because `BRPOP` and `BLPOP` are able to block waiting for elements from multiple lists. -3. If the timeout is reached, NULL is returned. - -There are more things you should know about lists and blocking ops. We -suggest that you read more on the following: - -* It is possible to build safer queues or rotating queues using `LMOVE`. -* There is also a blocking variant of the command, called `BLMOVE`. - -## Automatic creation and removal of keys - -So far in our examples we never had to create empty lists before pushing -elements, or removing empty lists when they no longer have elements inside. -It is Redis' responsibility to delete keys when lists are left empty, or to create -an empty list if the key does not exist and we are trying to add elements -to it, for example, with `LPUSH`. - -This is not specific to lists, it applies to all the Redis data types -composed of multiple elements -- Streams, Sets, Sorted Sets and Hashes. - -Basically we can summarize the behavior with three rules: - -1. When we add an element to an aggregate data type, if the target key does not exist, an empty aggregate data type is created before adding the element. -2. When we remove elements from an aggregate data type, if the value remains empty, the key is automatically destroyed. The Stream data type is the only exception to this rule. -3. Calling a read-only command such as `LLEN` (which returns the length of the list), or a write command removing elements, with an empty key, always produces the same result as if the key is holding an empty aggregate type of the type the command expects to find. - -Examples of rule 1: - - > del mylist - (integer) 1 - > lpush mylist 1 2 3 - (integer) 3 - -However we can't perform operations against the wrong type if the key exists: - - > set foo bar - OK - > lpush foo 1 2 3 - (error) WRONGTYPE Operation against a key holding the wrong kind of value - > type foo - string - -Example of rule 2: - - > lpush mylist 1 2 3 - (integer) 3 - > exists mylist - (integer) 1 - > lpop mylist - "3" - > lpop mylist - "2" - > lpop mylist - "1" - > exists mylist - (integer) 0 - -The key no longer exists after all the elements are popped. - -Example of rule 3: - - > del mylist - (integer) 0 - > llen mylist - (integer) 0 - > lpop mylist - (nil) - - -## Hashes - -Redis hashes look exactly how one might expect a "hash" to look, with field-value pairs: - - > hmset user:1000 username antirez birthyear 1977 verified 1 - OK - > hget user:1000 username - "antirez" - > hget user:1000 birthyear - "1977" - > hgetall user:1000 - 1) "username" - 2) "antirez" - 3) "birthyear" - 4) "1977" - 5) "verified" - 6) "1" - -While hashes are handy to represent *objects*, actually the number of fields you can -put inside a hash has no practical limits (other than available memory), so you can use -hashes in many different ways inside your application. - -The command `HMSET` sets multiple fields of the hash, while `HGET` retrieves -a single field. `HMGET` is similar to `HGET` but returns an array of values: - - > hmget user:1000 username birthyear no-such-field - 1) "antirez" - 2) "1977" - 3) (nil) - -There are commands that are able to perform operations on individual fields -as well, like `HINCRBY`: - - > hincrby user:1000 birthyear 10 - (integer) 1987 - > hincrby user:1000 birthyear 10 - (integer) 1997 - -You can find the [full list of hash commands in the documentation](https://redis.io/commands#hash). - -It is worth noting that small hashes (i.e., a few elements with small values) are -encoded in special way in memory that make them very memory efficient. - - -## Sets - -Redis Sets are unordered collections of strings. The -`SADD` command adds new elements to a set. It's also possible -to do a number of other operations against sets like testing if a given element -already exists, performing the intersection, union or difference between -multiple sets, and so forth. - - > sadd myset 1 2 3 - (integer) 3 - > smembers myset - 1. 3 - 2. 1 - 3. 2 - -Here I've added three elements to my set and told Redis to return all the -elements. As you can see they are not sorted -- Redis is free to return the -elements in any order at every call, since there is no contract with the -user about element ordering. - -Redis has commands to test for membership. For example, checking if an element exists: - - > sismember myset 3 - (integer) 1 - > sismember myset 30 - (integer) 0 - -"3" is a member of the set, while "30" is not. - -Sets are good for expressing relations between objects. -For instance we can easily use sets in order to implement tags. - -A simple way to model this problem is to have a set for every object we -want to tag. The set contains the IDs of the tags associated with the object. - -One illustration is tagging news articles. -If article ID 1000 is tagged with tags 1, 2, 5 and 77, a set -can associate these tag IDs with the news item: - - > sadd news:1000:tags 1 2 5 77 - (integer) 4 - -We may also want to have the inverse relation as well: the list -of all the news tagged with a given tag: - - > sadd tag:1:news 1000 - (integer) 1 - > sadd tag:2:news 1000 - (integer) 1 - > sadd tag:5:news 1000 - (integer) 1 - > sadd tag:77:news 1000 - (integer) 1 - -To get all the tags for a given object is trivial: - - > smembers news:1000:tags - 1. 5 - 2. 1 - 3. 77 - 4. 2 - -Note: in the example we assume you have another data structure, for example -a Redis hash, which maps tag IDs to tag names. - -There are other non trivial operations that are still easy to implement -using the right Redis commands. For instance we may want a list of all the -objects with the tags 1, 2, 10, and 27 together. We can do this using -the `SINTER` command, which performs the intersection between different -sets. We can use: - - > sinter tag:1:news tag:2:news tag:10:news tag:27:news - ... results here ... - -In addition to intersection you can also perform -unions, difference, extract a random element, and so forth. - -The command to extract an element is called `SPOP`, and is handy to model -certain problems. For example in order to implement a web-based poker game, -you may want to represent your deck with a set. Imagine we use a one-char -prefix for (C)lubs, (D)iamonds, (H)earts, (S)pades: - - > sadd deck C1 C2 C3 C4 C5 C6 C7 C8 C9 C10 CJ CQ CK - D1 D2 D3 D4 D5 D6 D7 D8 D9 D10 DJ DQ DK H1 H2 H3 - H4 H5 H6 H7 H8 H9 H10 HJ HQ HK S1 S2 S3 S4 S5 S6 - S7 S8 S9 S10 SJ SQ SK - (integer) 52 - -Now we want to provide each player with 5 cards. The `SPOP` command -removes a random element, returning it to the client, so it is the -perfect operation in this case. - -However if we call it against our deck directly, in the next play of the -game we'll need to populate the deck of cards again, which may not be -ideal. So to start, we can make a copy of the set stored in the `deck` key -into the `game:1:deck` key. - -This is accomplished using `SUNIONSTORE`, which normally performs the -union between multiple sets, and stores the result into another set. -However, since the union of a single set is itself, I can copy my deck -with: - - > sunionstore game:1:deck deck - (integer) 52 - -Now I'm ready to provide the first player with five cards: - - > spop game:1:deck - "C6" - > spop game:1:deck - "CQ" - > spop game:1:deck - "D1" - > spop game:1:deck - "CJ" - > spop game:1:deck - "SJ" - -One pair of jacks, not great... - -This is a good time to introduce the set command that provides the number -of elements inside a set. This is often called the *cardinality of a set* -in the context of set theory, so the Redis command is called `SCARD`. - - > scard game:1:deck - (integer) 47 - -The math works: 52 - 5 = 47. - -When you need to just get random elements without removing them from the -set, there is the `SRANDMEMBER` command suitable for the task. It also features -the ability to return both repeating and non-repeating elements. - - -## Sorted sets - -Sorted sets are a data type which is similar to a mix between a Set and -a Hash. Like sets, sorted sets are composed of unique, non-repeating -string elements, so in some sense a sorted set is a set as well. - -However while elements inside sets are not ordered, every element in -a sorted set is associated with a floating point value, called *the score* -(this is why the type is also similar to a hash, since every element -is mapped to a value). - -Moreover, elements in a sorted sets are *taken in order* (so they are not -ordered on request, order is a peculiarity of the data structure used to -represent sorted sets). They are ordered according to the following rule: - -* If B and A are two elements with a different score, then A > B if A.score is > B.score. -* If B and A have exactly the same score, then A > B if the A string is lexicographically greater than the B string. B and A strings can't be equal since sorted sets only have unique elements. - -Let's start with a simple example, adding a few selected hackers names as -sorted set elements, with their year of birth as "score". - - > zadd hackers 1940 "Alan Kay" - (integer) 1 - > zadd hackers 1957 "Sophie Wilson" - (integer) 1 - > zadd hackers 1953 "Richard Stallman" - (integer) 1 - > zadd hackers 1949 "Anita Borg" - (integer) 1 - > zadd hackers 1965 "Yukihiro Matsumoto" - (integer) 1 - > zadd hackers 1914 "Hedy Lamarr" - (integer) 1 - > zadd hackers 1916 "Claude Shannon" - (integer) 1 - > zadd hackers 1969 "Linus Torvalds" - (integer) 1 - > zadd hackers 1912 "Alan Turing" - (integer) 1 - - -As you can see `ZADD` is similar to `SADD`, but takes one additional argument -(placed before the element to be added) which is the score. -`ZADD` is also variadic, so you are free to specify multiple score-value -pairs, even if this is not used in the example above. - -With sorted sets it is trivial to return a list of hackers sorted by their -birth year because actually *they are already sorted*. - -Implementation note: Sorted sets are implemented via a -dual-ported data structure containing both a skip list and a hash table, so -every time we add an element Redis performs an O(log(N)) operation. That's -good, but when we ask for sorted elements Redis does not have to do any work at -all, it's already all sorted: - - > zrange hackers 0 -1 - 1) "Alan Turing" - 2) "Hedy Lamarr" - 3) "Claude Shannon" - 4) "Alan Kay" - 5) "Anita Borg" - 6) "Richard Stallman" - 7) "Sophie Wilson" - 8) "Yukihiro Matsumoto" - 9) "Linus Torvalds" - -Note: 0 and -1 means from element index 0 to the last element (-1 works -here just as it does in the case of the `LRANGE` command). - -What if I want to order them the opposite way, youngest to oldest? -Use [ZREVRANGE](/commands/zrevrange) instead of [ZRANGE](/commands/zrange): - - > zrevrange hackers 0 -1 - 1) "Linus Torvalds" - 2) "Yukihiro Matsumoto" - 3) "Sophie Wilson" - 4) "Richard Stallman" - 5) "Anita Borg" - 6) "Alan Kay" - 7) "Claude Shannon" - 8) "Hedy Lamarr" - 9) "Alan Turing" - -It is possible to return scores as well, using the `WITHSCORES` argument: - - > zrange hackers 0 -1 withscores - 1) "Alan Turing" - 2) "1912" - 3) "Hedy Lamarr" - 4) "1914" - 5) "Claude Shannon" - 6) "1916" - 7) "Alan Kay" - 8) "1940" - 9) "Anita Borg" - 10) "1949" - 11) "Richard Stallman" - 12) "1953" - 13) "Sophie Wilson" - 14) "1957" - 15) "Yukihiro Matsumoto" - 16) "1965" - 17) "Linus Torvalds" - 18) "1969" - -### Operating on ranges - -Sorted sets are more powerful than this. They can operate on ranges. -Let's get all the individuals that were born up to 1950 inclusive. We -use the `ZRANGEBYSCORE` command to do it: - - > zrangebyscore hackers -inf 1950 - 1) "Alan Turing" - 2) "Hedy Lamarr" - 3) "Claude Shannon" - 4) "Alan Kay" - 5) "Anita Borg" - -We asked Redis to return all the elements with a score between negative -infinity and 1950 (both extremes are included). - -It's also possible to remove ranges of elements. Let's remove all -the hackers born between 1940 and 1960 from the sorted set: - - > zremrangebyscore hackers 1940 1960 - (integer) 4 - -`ZREMRANGEBYSCORE` is perhaps not the best command name, -but it can be very useful, and returns the number of removed elements. - -Another extremely useful operation defined for sorted set elements -is the get-rank operation. It is possible to ask what is the -position of an element in the set of the ordered elements. - - > zrank hackers "Anita Borg" - (integer) 4 - -The `ZREVRANK` command is also available in order to get the rank, considering -the elements sorted a descending way. - -### Lexicographical scores - -With recent versions of Redis 2.8, a new feature was introduced that allows -getting ranges lexicographically, assuming elements in a sorted set are all -inserted with the same identical score (elements are compared with the C -`memcmp` function, so it is guaranteed that there is no collation, and every -Redis instance will reply with the same output). - -The main commands to operate with lexicographical ranges are `ZRANGEBYLEX`, -`ZREVRANGEBYLEX`, `ZREMRANGEBYLEX` and `ZLEXCOUNT`. - -For example, let's add again our list of famous hackers, but this time -use a score of zero for all the elements: - - > zadd hackers 0 "Alan Kay" 0 "Sophie Wilson" 0 "Richard Stallman" 0 - "Anita Borg" 0 "Yukihiro Matsumoto" 0 "Hedy Lamarr" 0 "Claude Shannon" - 0 "Linus Torvalds" 0 "Alan Turing" - -Because of the sorted sets ordering rules, they are already sorted -lexicographically: - - > zrange hackers 0 -1 - 1) "Alan Kay" - 2) "Alan Turing" - 3) "Anita Borg" - 4) "Claude Shannon" - 5) "Hedy Lamarr" - 6) "Linus Torvalds" - 7) "Richard Stallman" - 8) "Sophie Wilson" - 9) "Yukihiro Matsumoto" - -Using `ZRANGEBYLEX` we can ask for lexicographical ranges: - - > zrangebylex hackers [B [P - 1) "Claude Shannon" - 2) "Hedy Lamarr" - 3) "Linus Torvalds" - -Ranges can be inclusive or exclusive (depending on the first character), -also string infinite and minus infinite are specified respectively with -the `+` and `-` strings. See the documentation for more information. - -This feature is important because it allows us to use sorted sets as a generic -index. For example, if you want to index elements by a 128-bit unsigned -integer argument, all you need to do is to add elements into a sorted -set with the same score (for example 0) but with a 16 byte prefix -consisting of **the 128 bit number in big endian**. Since numbers in big -endian, when ordered lexicographically (in raw bytes order) are actually -ordered numerically as well, you can ask for ranges in the 128 bit space, -and get the element's value discarding the prefix. - -If you want to see the feature in the context of a more serious demo, -check the [Redis autocomplete demo](http://autocomplete.redis.io). - -Updating the score: leader boards ---- - -Just a final note about sorted sets before switching to the next topic. -Sorted sets' scores can be updated at any time. Just calling `ZADD` against -an element already included in the sorted set will update its score -(and position) with O(log(N)) time complexity. As such, sorted sets are suitable -when there are tons of updates. - -Because of this characteristic a common use case is leader boards. -The typical application is a Facebook game where you combine the ability to -take users sorted by their high score, plus the get-rank operation, in order -to show the top-N users, and the user rank in the leader board (e.g., "you are -the #4932 best score here"). - - -## Bitmaps - -Bitmaps are not an actual data type, but a set of bit-oriented operations -defined on the String type. Since strings are binary safe blobs and their -maximum length is 512 MB, they are suitable to set up to 2^32 different -bits. - -Bit operations are divided into two groups: constant-time single bit -operations, like setting a bit to 1 or 0, or getting its value, and -operations on groups of bits, for example counting the number of set -bits in a given range of bits (e.g., population counting). - -One of the biggest advantages of bitmaps is that they often provide -extreme space savings when storing information. For example in a system -where different users are represented by incremental user IDs, it is possible -to remember a single bit information (for example, knowing whether -a user wants to receive a newsletter) of 4 billion of users using just 512 MB of memory. - -Bits are set and retrieved using the `SETBIT` and `GETBIT` commands: - - > setbit key 10 1 - (integer) 1 - > getbit key 10 - (integer) 1 - > getbit key 11 - (integer) 0 - -The `SETBIT` command takes as its first argument the bit number, and as its second -argument the value to set the bit to, which is 1 or 0. The command -automatically enlarges the string if the addressed bit is outside the -current string length. - -`GETBIT` just returns the value of the bit at the specified index. -Out of range bits (addressing a bit that is outside the length of the string -stored into the target key) are always considered to be zero. - -There are three commands operating on group of bits: - -1. `BITOP` performs bit-wise operations between different strings. The provided operations are AND, OR, XOR and NOT. -2. `BITCOUNT` performs population counting, reporting the number of bits set to 1. -3. `BITPOS` finds the first bit having the specified value of 0 or 1. - -Both `BITPOS` and `BITCOUNT` are able to operate with byte ranges of the -string, instead of running for the whole length of the string. The following -is a trivial example of `BITCOUNT` call: - - > setbit key 0 1 - (integer) 0 - > setbit key 100 1 - (integer) 0 - > bitcount key - (integer) 2 - -Common use cases for bitmaps are: - -* Real time analytics of all kinds. -* Storing space efficient but high performance boolean information associated with object IDs. - -For example imagine you want to know the longest streak of daily visits of -your web site users. You start counting days starting from zero, that is the -day you made your web site public, and set a bit with `SETBIT` every time -the user visits the web site. As a bit index you simply take the current unix -time, subtract the initial offset, and divide by the number of seconds in a day -(normally, 3600\*24). - -This way for each user you have a small string containing the visit -information for each day. With `BITCOUNT` it is possible to easily get -the number of days a given user visited the web site, while with -a few `BITPOS` calls, or simply fetching and analyzing the bitmap client-side, -it is possible to easily compute the longest streak. - -Bitmaps are trivial to split into multiple keys, for example for -the sake of sharding the data set and because in general it is better to -avoid working with huge keys. To split a bitmap across different keys -instead of setting all the bits into a key, a trivial strategy is just -to store M bits per key and obtain the key name with `bit-number/M` and -the Nth bit to address inside the key with `bit-number MOD M`. - - -## HyperLogLogs - -A HyperLogLog is a probabilistic data structure used in order to count -unique things (technically this is referred to estimating the cardinality -of a set). Usually counting unique items requires using an amount of memory -proportional to the number of items you want to count, because you need -to remember the elements you have already seen in the past in order to avoid -counting them multiple times. However there is a set of algorithms that trade -memory for precision: you end with an estimated measure with a standard error, -which in the case of the Redis implementation is less than 1%. The -magic of this algorithm is that you no longer need to use an amount of memory -proportional to the number of items counted, and instead can use a -constant amount of memory! 12k bytes in the worst case, or a lot less if your -HyperLogLog (We'll just call them HLL from now) has seen very few elements. - -HLLs in Redis, while technically a different data structure, are encoded -as a Redis string, so you can call `GET` to serialize a HLL, and `SET` -to deserialize it back to the server. - -Conceptually the HLL API is like using Sets to do the same task. You would -`SADD` every observed element into a set, and would use `SCARD` to check the -number of elements inside the set, which are unique since `SADD` will not -re-add an existing element. - -While you don't really *add items* into an HLL, because the data structure -only contains a state that does not include actual elements, the API is the -same: - -* Every time you see a new element, you add it to the count with `PFADD`. -* Every time you want to retrieve the current approximation of the unique elements *added* with `PFADD` so far, you use the `PFCOUNT`. - - > pfadd hll a b c d - (integer) 1 - > pfcount hll - (integer) 4 - -An example of use case for this data structure is counting unique queries -performed by users in a search form every day. - -Redis is also able to perform the union of HLLs, please check the -[full documentation](/commands#hyperloglog) for more information. - -## Other notable features - -There are other important things in the Redis API that can't be explored -in the context of this document, but are worth your attention: - -* It is possible to [iterate the key space of a large collection incrementally](/commands/scan). -* It is possible to run [Lua scripts server side](/commands/eval) to improve latency and bandwidth. -* Redis is also a [Pub-Sub server](/topics/pubsub). - -## Learn more - -This tutorial is in no way complete and has covered just the basics of the API. -Read the [command reference](/commands) to discover a lot more. - -Thanks for reading, and have fun hacking with Redis! diff --git a/docs/manual/keyspace-notifications.md b/docs/manual/keyspace-notifications.md index bd0dce4680..d46a750999 100644 --- a/docs/manual/keyspace-notifications.md +++ b/docs/manual/keyspace-notifications.md @@ -1,7 +1,7 @@ --- title: "Redis keyspace notifications" linkTitle: "Keyspace notifications" -weight: 1 +weight: 4 description: > Monitor changes to Redis keys and values in real time aliases: @@ -74,7 +74,7 @@ following table: e Evicted events (events generated when a key is evicted for maxmemory) m Key miss events (events generated when a key that doesn't exist is accessed) n New key events (Note: not included in the 'A' class) - A Alias for "g$lshztxed", so that the "AKE" string means all the events except "m". + A Alias for "g$lshztxed", so that the "AKE" string means all the events except "m" and "n". At least `K` or `E` should be present in the string, otherwise no event will be delivered regardless of the rest of the string. diff --git a/docs/manual/keyspace.md b/docs/manual/keyspace.md new file mode 100644 index 0000000000..f6f443f64d --- /dev/null +++ b/docs/manual/keyspace.md @@ -0,0 +1,144 @@ +--- +title: "Keyspace" +linkTitle: "Keyspace" +weight: 1 +description: > + Managing keys in Redis: Key expiration, scanning, altering and querying the key space +aliases: + - /docs/manual/the-redis-keyspace +--- + +Redis keys are binary safe; this means that you can use any binary sequence as a +key, from a string like "foo" to the content of a JPEG file. +The empty string is also a valid key. + +A few other rules about keys: + +* Very long keys are not a good idea. For instance a key of 1024 bytes is a bad + idea not only memory-wise, but also because the lookup of the key in the + dataset may require several costly key-comparisons. Even when the task at hand + is to match the existence of a large value, hashing it (for example + with SHA1) is a better idea, especially from the perspective of memory + and bandwidth. +* Very short keys are often not a good idea. There is little point in writing + "u1000flw" as a key if you can instead write "user:1000:followers". The latter + is more readable and the added space is minor compared to the space used by + the key object itself and the value object. While short keys will obviously + consume a bit less memory, your job is to find the right balance. +* Try to stick with a schema. For instance "object-type:id" is a good + idea, as in "user:1000". Dots or dashes are often used for multi-word + fields, as in "comment:4321:reply.to" or "comment:4321:reply-to". +* The maximum allowed key size is 512 MB. + +## Altering and querying the key space + +There are commands that are not defined on particular types, but are useful +in order to interact with the space of keys, and thus, can be used with +keys of any type. + +For example the `EXISTS` command returns 1 or 0 to signal if a given key +exists or not in the database, while the `DEL` command deletes a key +and associated value, whatever the value is. + + > set mykey hello + OK + > exists mykey + (integer) 1 + > del mykey + (integer) 1 + > exists mykey + (integer) 0 + +From the examples you can also see how `DEL` itself returns 1 or 0 depending on whether +the key was removed (it existed) or not (there was no such key with that +name). + +There are many key space related commands, but the above two are the +essential ones together with the `TYPE` command, which returns the kind +of value stored at the specified key: + + > set mykey x + OK + > type mykey + string + > del mykey + (integer) 1 + > type mykey + none + +## Key expiration + +Before moving on, we should look at an important Redis feature that works regardless of the type of value you're storing: key expiration. Key expiration lets you set a timeout for a key, also known as a "time to live", or "TTL". When the time to live elapses, the key is automatically destroyed. + +A few important notes about key expiration: + +* They can be set both using seconds or milliseconds precision. +* However the expire time resolution is always 1 millisecond. +* Information about expires are replicated and persisted on disk, the time virtually passes when your Redis server remains stopped (this means that Redis saves the date at which a key will expire). + +Use the `EXPIRE` command to set a key's expiration: + + > set key some-value + OK + > expire key 5 + (integer) 1 + > get key (immediately) + "some-value" + > get key (after some time) + (nil) + +The key vanished between the two `GET` calls, since the second call was +delayed more than 5 seconds. In the example above we used `EXPIRE` in +order to set the expire (it can also be used in order to set a different +expire to a key already having one, like `PERSIST` can be used in order +to remove the expire and make the key persistent forever). However we +can also create keys with expires using other Redis commands. For example +using `SET` options: + + > set key 100 ex 10 + OK + > ttl key + (integer) 9 + +The example above sets a key with the string value `100`, having an expire +of ten seconds. Later the `TTL` command is called in order to check the +remaining time to live for the key. + +In order to set and check expires in milliseconds, check the `PEXPIRE` and +the `PTTL` commands, and the full list of `SET` options. + +## Navigating the keyspace + +### Scan +To incrementally iterate over the keys in a Redis database in an efficient manner, you can use the `SCAN` command. + +Since `SCAN` allows for incremental iteration, returning only a small number of elements per call, it can be used in production without the downside of commands like `KEYS` or `SMEMBERS` that may block the server for a long time (even several seconds) when called against big collections of keys or elements. + +However while blocking commands like `SMEMBERS` are able to provide all the elements that are part of a Set in a given moment. +The `SCAN` family of commands only offer limited guarantees about the returned elements since the collection that we incrementally iterate can change during the iteration process. + +### Keys + +Another way to iterate over the keyspace is to use the `KEYS` command, but this approach should be used with care, since `KEYS` will block the Redis server until all keys are returned. + +**Warning**: consider `KEYS` as a command that should only be used in production +environments with extreme care. + +`KEYS` may ruin performance when it is executed against large databases. +This command is intended for debugging and special operations, such as changing +your keyspace layout. +Don't use `KEYS` in your regular application code. +If you're looking for a way to find keys in a subset of your keyspace, consider +using `SCAN` or [sets][tdts]. + +[tdts]: /topics/data-types#sets + +Supported glob-style patterns: + +* `h?llo` matches `hello`, `hallo` and `hxllo` +* `h*llo` matches `hllo` and `heeeello` +* `h[ae]llo` matches `hello` and `hallo,` but not `hillo` +* `h[^e]llo` matches `hallo`, `hbllo`, ... but not `hello` +* `h[a-b]llo` matches `hallo` and `hbllo` + +Use `\` to escape special characters if you want to match them verbatim. diff --git a/docs/reference/patterns/_index.md b/docs/manual/patterns/_index.md similarity index 81% rename from docs/reference/patterns/_index.md rename to docs/manual/patterns/_index.md index 0802b5e6f2..3a3246f713 100644 --- a/docs/reference/patterns/_index.md +++ b/docs/manual/patterns/_index.md @@ -2,7 +2,10 @@ title: "Redis programming patterns" linkTitle: "Patterns" description: Novel patterns for working with Redis data structures -weight: 1 +weight: 6 +aliases: [ + /docs/reference/patterns +] --- The following documents describe some novel development patterns you can use with Redis. diff --git a/docs/reference/patterns/bulk-loading.md b/docs/manual/patterns/bulk-loading.md similarity index 98% rename from docs/reference/patterns/bulk-loading.md rename to docs/manual/patterns/bulk-loading.md index f09cef74fe..8676291f2e 100644 --- a/docs/reference/patterns/bulk-loading.md +++ b/docs/manual/patterns/bulk-loading.md @@ -4,8 +4,10 @@ linkTitle: "Bulk loading" weight: 1 description: > Writing data in bulk using the Redis protocol -aliases: - - /topics/mass-insertion +aliases: [ + /topics/mass-insertion, + /docs/reference/patterns/bulk-loading +] --- Bulk loading is the process of loading Redis with a large amount of pre-existing data. Ideally, you want to perform this operation quickly and efficiently. This document describes some strategies for bulk loading data in Redis. diff --git a/docs/reference/patterns/distributed-locks.md b/docs/manual/patterns/distributed-locks.md similarity index 93% rename from docs/reference/patterns/distributed-locks.md rename to docs/manual/patterns/distributed-locks.md index 634f1d6065..e4169f8970 100644 --- a/docs/reference/patterns/distributed-locks.md +++ b/docs/manual/patterns/distributed-locks.md @@ -3,9 +3,12 @@ title: "Distributed Locks with Redis" linkTitle: "Distributed locks" weight: 1 description: > - A Distributed Lock Pattern with Redis -aliases: - - /topics/distlock + A distributed lock pattern with Redis +aliases: [ + /topics/distlock, + /docs/reference/patterns/distributed-locks, + /docs/reference/patterns/distributed-locks.md +] --- Distributed locks are a very useful primitive in many environments where different processes must operate with shared resources in a mutually @@ -40,12 +43,13 @@ already available that can be used for reference. * [Redisson](https://github.com/mrniko/redisson) (Java implementation). * [Redis::DistLock](https://github.com/sbertrang/redis-distlock) (Perl implementation). * [Redlock-cpp](https://github.com/jacket-code/redlock-cpp) (C++ implementation). +* [Redis-plus-plus](https://github.com/sewenew/redis-plus-plus/#redlock) (C++ implementation). * [Redlock-cs](https://github.com/kidfashion/redlock-cs) (C#/.NET implementation). * [RedLock.net](https://github.com/samcook/RedLock.net) (C#/.NET implementation). Includes async and lock extension support. -* [ScarletLock](https://github.com/psibernetic/scarletlock) (C# .NET implementation with configurable datastore). * [Redlock4Net](https://github.com/LiZhenNet/Redlock4Net) (C# .NET implementation). * [node-redlock](https://github.com/mike-marcacci/node-redlock) (NodeJS implementation). Includes support for lock extension. * [Deno DLM](https://github.com/oslabs-beta/Deno-Redlock) (Deno implementation) +* [Rslock](https://github.com/hexcowboy/rslock) (Rust implementation). Includes async and lock extension support. ## Safety and Liveness Guarantees @@ -206,6 +210,19 @@ However this does not technically change the algorithm, so the maximum number of lock reacquisition attempts should be limited, otherwise one of the liveness properties is violated. +### Disclaimer about consistency + +Please consider thoroughly reviewing the [Analysis of Redlock](#analysis-of-redlock) section at the end of this page. +Martin Kleppman's article and antirez's answer to it are very relevant. +If you are concerned about consistency and correctness, you should pay attention to the following topics: + +1. You should implement fencing tokens. + This is especially important for processes that can take significant time and applies to any distributed locking system. + Extending locks' lifetime is also an option, but don´t assume that a lock is retained as long as the process that had acquired it is alive. +2. Redis is not using monotonic clock for TTL expiration mechanism. + That means that a wall-clock shift may result in a lock being acquired by more than one process. + Even though the problem can be mitigated by preventing admins from manually setting the server's time and setting up NTP properly, there's still a chance of this issue occurring in real life and compromising consistency. + ## Want to help? If you are into distributed systems, it would be great to have your opinion / analysis. Also reference implementations in other languages could be great. diff --git a/docs/images/2idx_0.png b/docs/manual/patterns/indexes/2idx_0.png similarity index 100% rename from docs/images/2idx_0.png rename to docs/manual/patterns/indexes/2idx_0.png diff --git a/docs/images/2idx_1.png b/docs/manual/patterns/indexes/2idx_1.png similarity index 100% rename from docs/images/2idx_1.png rename to docs/manual/patterns/indexes/2idx_1.png diff --git a/docs/images/2idx_2.png b/docs/manual/patterns/indexes/2idx_2.png similarity index 100% rename from docs/images/2idx_2.png rename to docs/manual/patterns/indexes/2idx_2.png diff --git a/docs/reference/patterns/indexes.md b/docs/manual/patterns/indexes/index.md similarity index 95% rename from docs/reference/patterns/indexes.md rename to docs/manual/patterns/indexes/index.md index cffd413220..c7aa7ee1b2 100644 --- a/docs/reference/patterns/indexes.md +++ b/docs/manual/patterns/indexes/index.md @@ -4,8 +4,10 @@ linkTitle: Secondary indexing weight: 1 description: > Building secondary indexes in Redis -aliases: - - /topics/indexing +aliases: [ + /topics/indexing, + /docs/reference/patterns/indexes +] --- Redis is not exactly a key-value store, since values can be complex data structures. However it has an external key-value shell: at API level data is addressed by the key name. It is fair to say that, natively, Redis only offers *primary key access*. However since Redis is a data structures server, its capabilities can be used for indexing, in order to create secondary indexes of different kinds, including composite (multi-column) indexes. @@ -35,7 +37,7 @@ vanilla sorted sets are limited to things where the indexing field is a number within a given range. The two commands to build these kind of indexes are `ZADD` and -`ZRANGEBYSCORE` to respectively add items and retrieve items within a +`ZRANGE` with the `BYSCORE` argument to respectively add items and retrieve items within a specified range. For instance, it is possible to index a set of person names by their @@ -50,11 +52,11 @@ person and the score will be the age. In order to retrieve all persons with an age between 20 and 40, the following command can be used: - ZRANGEBYSCORE myindex 20 40 + ZRANGE myindex 20 40 BYSCORE 1) "Manuel" 2) "Jon" -By using the **WITHSCORES** option of `ZRANGEBYSCORE` it is also possible +By using the **WITHSCORES** option of `ZRANGE` it is also possible to obtain the scores associated with the returned elements. The `ZCOUNT` command can be used in order to retrieve the number of elements @@ -62,10 +64,10 @@ within a given range, without actually fetching the elements, which is also useful, especially given the fact the operation is executed in logarithmic time regardless of the size of the range. -Ranges can be inclusive or exclusive, please refer to the `ZRANGEBYSCORE` +Ranges can be inclusive or exclusive, please refer to the `ZRANGE` command documentation for more information. -**Note**: Using the `ZREVRANGEBYSCORE` it is possible to query a range in +**Note**: Using the `ZRANGE` with the `BYSCORE` and `REV` arguments, it is possible to query a range in reversed order, which is often useful when data is indexed in a given direction (ascending or descending) but we want to retrieve information the other way around. @@ -93,7 +95,7 @@ could do: ZADD user.age.index 33 3 This time the value associated with the score in the sorted set is the -ID of the object. So once I query the index with `ZRANGEBYSCORE` I'll +ID of the object. So once I query the index with `ZRANGE` with the `BYSCORE` argument, I'll also have to retrieve the information I need with `HGETALL` or similar commands. The obvious advantage is that objects can change without touching the index, as long as we don't change the indexed field. @@ -170,7 +172,7 @@ the second is checked and so forth. If the common prefix of two strings is the same then the longer string is considered the greater of the two, so "foobar" is greater than "foo". -There are commands such as `ZRANGEBYLEX` and `ZLEXCOUNT` that +There are commands such as `ZRANGE` and `ZLEXCOUNT` that are able to query and count ranges in a lexicographically fashion, assuming they are used with sorted sets where all the elements have the same score. @@ -198,9 +200,9 @@ are ordered lexicographically. 3) "baaa" 4) "bbbb" -Now we can use `ZRANGEBYLEX` in order to perform range queries. +Now we can use `ZRANGE` with the `BYLEX` argument in order to perform range queries. - ZRANGEBYLEX myindex [a (b + ZRANGE myindex [a (b BYLEX 1) "aaaa" 2) "abbb" @@ -214,7 +216,7 @@ which are all the elements starting with `a`. There are also two more special characters indicating the infinitely negative string and the infinitely positive string, which are `-` and `+`. - ZRANGEBYLEX myindex [b + + ZRANGE myindex [b + BYLEX 1) "baaa" 2) "bbbb" @@ -235,12 +237,12 @@ we'll just do: ZADD myindex 0 banana And so forth for each search query ever encountered. Then when we want to -complete the user input, we execute a range query using `ZRANGEBYLEX`. +complete the user input, we execute a range query using `ZRANGE` with the `BYLEX` argument. Imagine the user is typing "bit" inside the search form, and we want to offer possible search keywords starting for "bit". We send Redis a command like that: - ZRANGEBYLEX myindex "[bit" "[bit\xff" + ZRANGE myindex "[bit" "[bit\xff" BYLEX Basically we create a range using the string the user is typing right now as start, and the same string plus a trailing byte set to 255, which is `\xff` in the example, as the end of the range. This way we get all the strings that start for the string the user is typing. @@ -269,7 +271,7 @@ We also need logic in order to increment the index if the search term already exists in the index, so what we'll actually do is something like that: - ZRANGEBYLEX myindex "[banana:" + LIMIT 0 1 + ZRANGE myindex "[banana:" + BYLEX LIMIT 0 1 1) "banana:1" This will return the single entry of `banana` if it exists. Then we @@ -291,7 +293,7 @@ There is more: our goal is to just have items searched very frequently. So we need some form of purging. When we actually query the index in order to complete the user input, we may see something like that: - ZRANGEBYLEX myindex "[banana:" + LIMIT 0 10 + ZRANGE myindex "[banana:" + BYLEX LIMIT 0 10 1) "banana:123" 2) "banaooo:1" 3) "banned user:49" @@ -355,7 +357,7 @@ we just store the entry as `key:value`: And search for the key with: - ZRANGEBYLEX myindex [mykey: + LIMIT 0 1 + ZRANGE myindex [mykey: + BYLEX LIMIT 0 1 1) "mykey:myvalue" Then we extract the part after the colon to retrieve the value. @@ -436,7 +438,7 @@ With an index like that, to get all the products in room 56 having a price between 10 and 30 dollars is very easy. We can just run the following command: - ZRANGEBYLEX myindex [0056:0010.00 [0056:0030.00 + ZRANGE myindex [0056:0010.00 [0056:0030.00 BYLEX The above is called a composed index. Its effectiveness depends on the order of the fields and the queries I want to run. For example the above @@ -505,7 +507,7 @@ Now things start to be interesting, and I can query the graph in many different ways. For example, who are all the people `antirez` *is friend of*? - ZRANGEBYLEX myindex "[spo:antirez:is-friend-of:" "[spo:antirez:is-friend-of:\xff" + ZRANGE myindex "[spo:antirez:is-friend-of:" "[spo:antirez:is-friend-of:\xff" BYLEX 1) "spo:antirez:is-friend-of:matteocollina" 2) "spo:antirez:is-friend-of:wonderwoman" 3) "spo:antirez:is-friend-of:spiderman" @@ -513,7 +515,7 @@ different ways. For example, who are all the people `antirez` Or, what are all the relationships `antirez` and `matteocollina` have where the first is the subject and the second is the object? - ZRANGEBYLEX myindex "[sop:antirez:matteocollina:" "[sop:antirez:matteocollina:\xff" + ZRANGE myindex "[sop:antirez:matteocollina:" "[sop:antirez:matteocollina:\xff" BYLEX 1) "sop:antirez:matteocollina:is-friend-of" 2) "sop:antirez:matteocollina:was-at-conference-with" 3) "sop:antirez:matteocollina:talked-with" @@ -551,7 +553,7 @@ Let's say we have points in the space, which represent our data samples, where ` In the next figure, the blue box represents our query. We want all the points where `x` is between 50 and 100, and where `y` is between 100 and 300. -![Points in the space](../../../images/2idx_0.png) +![Points in the space](2idx_0.png) In order to represent data that makes these kinds of queries fast to perform, we start by padding our numbers with 0. So for example imagine we want to @@ -589,7 +591,7 @@ What this maps to is to a square representing all values where the `x` variable is between 70 and 79, and the `y` variable is between 200 and 209. To identify this specific area, we can write random points in that interval. -![Small area](../../../images/2idx_1.png) +![Small area](2idx_1.png) So the above lexicographic query allows us to easily query for points in a specific square in the picture. However the square may be too small for @@ -604,7 +606,7 @@ This time the range represents all the points where `x` is between 0 and 99 and `y` is between 200 and 299. Drawing random points in this interval shows us this larger area. -![Large area](../../../images/2idx_2.png) +![Large area](2idx_2.png) So now our area is too big for our query, and still our search box is not completely included. We need more granularity, but we can easily obtain @@ -625,7 +627,7 @@ So by interleaving digits, our representation in the index would be: Let's see what are our ranges as we substitute the last 2, 4, 6, 8, ... bits with 0s ad 1s in the interleaved representation: - 2 bits: x between 70 and 75, y between 200 and 201 (range=2) + 2 bits: x between 74 and 75, y between 200 and 201 (range=2) 4 bits: x between 72 and 75, y between 200 and 203 (range=4) 6 bits: x between 72 and 79, y between 200 and 207 (range=8) 8 bits: x between 64 and 79, y between 192 and 207 (range=16) @@ -672,7 +674,7 @@ Turning this into code is simple. Here is a Ruby example: y_range_end = y_range_start | ((2**exp)-1) puts "#{x},#{y} x from #{x_range_start} to #{x_range_end}, y from #{y_range_start} to #{y_range_end}" - # Turn it into interleaved form for ZRANGEBYLEX query. + # Turn it into interleaved form for ZRANGE query. # We assume we need 9 bits for each integer, so the final # interleaved representation will be 18 bits. xbin = x_range_start.to_s(2).rjust(9,'0') @@ -681,7 +683,7 @@ Turning this into code is simple. Here is a Ruby example: # Now that we have the start of the range, calculate the end # by replacing the specified number of bits from 0 to 1. e = s[0..-(bits+1)]+("1"*bits) - puts "ZRANGEBYLEX myindex [#{s} [#{e}" + puts "ZRANGE myindex [#{s} [#{e} BYLEX" } } end diff --git a/docs/reference/patterns/twitter-clone.md b/docs/manual/patterns/twitter-clone.md similarity index 96% rename from docs/reference/patterns/twitter-clone.md rename to docs/manual/patterns/twitter-clone.md index 38a01059cd..b7d37e5792 100644 --- a/docs/reference/patterns/twitter-clone.md +++ b/docs/manual/patterns/twitter-clone.md @@ -3,6 +3,9 @@ title: "Redis patterns example" linkTitle: "Patterns example" description: Learn several Redis patterns by building a Twitter clone weight: 20 +aliases: [ + /docs/reference/patterns/twitter-clone +] --- This article describes the design and implementation of a [very simple Twitter clone](https://github.com/antirez/retwis) written using PHP with Redis as the only database. The programming community has traditionally considered key-value stores as a special purpose database that couldn't be used as a drop-in replacement for a relational database for the development of web applications. This article will try to show that Redis data structures on top of a key-value layer are an effective data model to implement many kinds of applications. @@ -16,7 +19,7 @@ data layout using Redis, and how to apply different data structures. Our Twitter clone, called [Retwis](https://github.com/antirez/retwis), is structurally simple, has very good performance, and can be distributed among any number of web and Redis servers with little efforts. [View the Retwis source code](https://github.com/antirez/retwis). -I used PHP for the example since it can be read by everybody. The same (or better) results can be obtained using Ruby, Python, Erlang, and so on. +I used PHP for the example because of its universal readability. The same (or better) results can be obtained using Ruby, Python, Erlang, and so on. A few clones exist (however not all the clones use the same data layout as the current version of this tutorial, so please, stick with the official PHP implementation for the sake of following the article better). @@ -149,7 +152,7 @@ The Hash data type --- This is the last data structure we use in our program, and is extremely easy -to gasp since there is an equivalent in almost every programming language out +to grasp since there is an equivalent in almost every programming language out there: Hashes. Redis Hashes are basically like Ruby or Python hashes, a collection of fields associated with values: @@ -214,7 +217,7 @@ We can add new followers with: ZADD followers:1000 1401267618 1234 => Add user 1234 with time 1401267618 -Another important thing we need is a place were we can add the updates to display in the user's home page. We'll need to access this data in chronological order later, from the most recent update to the oldest, so the perfect kind of data structure for this is a List. Basically every new update will be `LPUSH`ed in the user updates key, and thanks to `LRANGE`, we can implement pagination and so on. Note that we use the words _updates_ and _posts_ interchangeably, since updates are actually "little posts" in some way. +Another important thing we need is a place where we can add the updates to display in the user's home page. We'll need to access this data in chronological order later, from the most recent update to the oldest, so the perfect kind of data structure for this is a List. Basically every new update will be `LPUSH`ed in the user updates key, and thanks to `LRANGE`, we can implement pagination and so on. Note that we use the words _updates_ and _posts_ interchangeably, since updates are actually "little posts" in some way. posts:1000 => a List of post ids - every new post is LPUSHed here. diff --git a/docs/manual/pipelining.md b/docs/manual/pipelining/index.md similarity index 99% rename from docs/manual/pipelining.md rename to docs/manual/pipelining/index.md index e1653925b7..177681afb8 100644 --- a/docs/manual/pipelining.md +++ b/docs/manual/pipelining/index.md @@ -1,7 +1,7 @@ --- title: "Redis pipelining" linkTitle: "Pipelining" -weight: 1 +weight: 2 description: How to optimize round-trip times by batching Redis commands aliases: - /topics/pipelining @@ -92,7 +92,7 @@ call. Consequently, the number of total queries performed per second initially increases almost linearly with longer pipelines, and eventually reaches 10 times the baseline obtained without pipelining, as shown in this figure. -![Pipeline size and IOPs](../../images/pipeline_iops.png) +![Pipeline size and IOPs](pipeline_iops.png) ## A real world code example diff --git a/docs/images/pipeline_iops.png b/docs/manual/pipelining/pipeline_iops.png similarity index 100% rename from docs/images/pipeline_iops.png rename to docs/manual/pipelining/pipeline_iops.png diff --git a/docs/manual/pubsub.md b/docs/manual/pubsub.md deleted file mode 100644 index fac9e917a4..0000000000 --- a/docs/manual/pubsub.md +++ /dev/null @@ -1,222 +0,0 @@ ---- -title: Redis Pub/Sub -linkTitle: "Pub/sub" -weight: 1 -description: How to use pub/sub channels in Redis -aliases: - - /topics/pubsub - - /docs/manual/pub-sub ---- - -`SUBSCRIBE`, `UNSUBSCRIBE` and `PUBLISH` -implement the [Publish/Subscribe messaging -paradigm](http://en.wikipedia.org/wiki/Publish/subscribe) where -(citing Wikipedia) senders (publishers) are not programmed to send -their messages to specific receivers (subscribers). Rather, published -messages are characterized into channels, without knowledge of what (if -any) subscribers there may be. Subscribers express interest in one or -more channels, and only receive messages that are of interest, without -knowledge of what (if any) publishers there are. This decoupling of -publishers and subscribers can allow for greater scalability and a more -dynamic network topology. - -For instance in order to subscribe to channels `foo` and `bar` the -client issues a `SUBSCRIBE` providing the names of the channels: - -```bash -SUBSCRIBE foo bar -``` - -Messages sent by other clients to these channels will be pushed by Redis -to all the subscribed clients. - -A client subscribed to one or more channels should not issue commands, -although it can subscribe and unsubscribe to and from other channels. -The replies to subscription and unsubscribing operations are sent in -the form of messages, so that the client can just read a coherent -stream of messages where the first element indicates the type of -message. The commands that are allowed in the context of a subscribed -client are `SUBSCRIBE`, `SSUBSCRIBE`, `SUNSUBSCRIBE`, `PSUBSCRIBE`, `UNSUBSCRIBE`, `PUNSUBSCRIBE`, `PING`, `RESET`, and `QUIT`. - -Please note that `redis-cli` will not accept any commands once in -subscribed mode and can only quit the mode with `Ctrl-C`. - -## Format of pushed messages - -A message is an [array-reply](/topics/protocol#array-reply) with three elements. - -The first element is the kind of message: - -* `subscribe`: means that we successfully subscribed to the channel -given as the second element in the reply. The third argument represents -the number of channels we are currently subscribed to. - -* `unsubscribe`: means that we successfully unsubscribed from the -channel given as second element in the reply. The third argument -represents the number of channels we are currently subscribed to. When -the last argument is zero, we are no longer subscribed to any channel, -and the client can issue any kind of Redis command as we are outside the -Pub/Sub state. - -* `message`: it is a message received as result of a `PUBLISH` command -issued by another client. The second element is the name of the -originating channel, and the third argument is the actual message -payload. - -## Database & Scoping - -Pub/Sub has no relation to the key space. -It was made to not interfere with it on any level, including database numbers. - -Publishing on db 10, will be heard by a subscriber on db 1. - -If you need scoping of some kind, prefix the channels with the name of the -environment (test, staging, production...). - -## Wire protocol example - -``` -SUBSCRIBE first second -*3 -$9 -subscribe -$5 -first -:1 -*3 -$9 -subscribe -$6 -second -:2 -``` - -At this point, from another client we issue a `PUBLISH` operation -against the channel named `second`: - -``` -> PUBLISH second Hello -``` - -This is what the first client receives: - -``` -*3 -$7 -message -$6 -second -$5 -Hello -``` - -Now the client unsubscribes itself from all the channels using the -`UNSUBSCRIBE` command without additional arguments: - -``` -UNSUBSCRIBE -*3 -$11 -unsubscribe -$6 -second -:1 -*3 -$11 -unsubscribe -$5 -first -:0 -``` - -## Pattern-matching subscriptions - -The Redis Pub/Sub implementation supports pattern matching. Clients may -subscribe to glob-style patterns in order to receive all the messages -sent to channel names matching a given pattern. - -For instance: - -``` -PSUBSCRIBE news.* -``` - -Will receive all the messages sent to the channel `news.art.figurative`, -`news.music.jazz`, etc. -All the glob-style patterns are valid, so multiple wildcards are supported. - -``` -PUNSUBSCRIBE news.* -``` - -Will then unsubscribe the client from that pattern. -No other subscriptions will be affected by this call. - -Messages received as a result of pattern matching are sent in a -different format: - -* The type of the message is `pmessage`: it is a message received -as result of a `PUBLISH` command issued by another client, matching -a pattern-matching subscription. The second element is the original -pattern matched, the third element is the name of the originating -channel, and the last element the actual message payload. - -Similarly to `SUBSCRIBE` and `UNSUBSCRIBE`, `PSUBSCRIBE` and -`PUNSUBSCRIBE` commands are acknowledged by the system sending a message -of type `psubscribe` and `punsubscribe` using the same format as the -`subscribe` and `unsubscribe` message format. - -## Messages matching both a pattern and a channel subscription - -A client may receive a single message multiple times if it's subscribed -to multiple patterns matching a published message, or if it is -subscribed to both patterns and channels matching the message. Like in -the following example: - -``` -SUBSCRIBE foo -PSUBSCRIBE f* -``` - -In the above example, if a message is sent to channel `foo`, the client -will receive two messages: one of type `message` and one of type -`pmessage`. - -## The meaning of the subscription count with pattern matching - -In `subscribe`, `unsubscribe`, `psubscribe` and `punsubscribe` -message types, the last argument is the count of subscriptions still -active. This number is actually the total number of channels and -patterns the client is still subscribed to. So the client will exit -the Pub/Sub state only when this count drops to zero as a result of -unsubscribing from all the channels and patterns. - -## Sharded Pub/Sub - -From 7.0, sharded Pub/Sub is introduced in which shard channels are assigned to slots by the same algorithm used to assign keys to slots. -A shard message must be sent to a node that own the slot the shard channel is hashed to. -The cluster makes sure the published shard messages are forwarded to all nodes in the shard, so clients can subscribe to a shard channel by connecting to either the master responsible for the slot, or to any of its replicas. -`SSUBSCRIBE`, `SUNSUBSCRIBE` and `SPUBLISH` are used to implement sharded Pub/Sub. - -Sharded Pub/Sub helps to scale the usage of Pub/Sub in cluster mode. -It restricts the propagation of message to be within the shard of a cluster. -Hence, the amount of data passing through the cluster bus is limited in comparison to global Pub/Sub where each message propagates to each node in the cluster. -This allows users to horizontally scale the Pub/Sub usage by adding more shards. - - -## Programming example - -Pieter Noordhuis provided a great example using EventMachine -and Redis to create [a multi user high performance web -chat](https://gist.github.com/pietern/348262). - -## Client library implementation hints - -Because all the messages received contain the original subscription -causing the message delivery (the channel in the case of message type, -and the original pattern in the case of pmessage type) client libraries -may bind the original subscription to callbacks (that can be anonymous -functions, blocks, function pointers), using a hash table. - -When a message is received an O(1) lookup can be done in order to -deliver the message to the registered callback. diff --git a/docs/reference/_index.md b/docs/reference/_index.md index eeed48e834..6e3c864bb6 100644 --- a/docs/reference/_index.md +++ b/docs/reference/_index.md @@ -1,6 +1,7 @@ --- title: "Redis reference" linkTitle: "Reference" -description: Specifications, patterns, internals, and optimization -weight: 20 +description: Specifications and protocols +weight: 70 + --- diff --git a/docs/reference/arm.md b/docs/reference/arm.md index 2eec312367..e0323160f3 100644 --- a/docs/reference/arm.md +++ b/docs/reference/arm.md @@ -1,7 +1,7 @@ --- title: "ARM support" linkTitle: "ARM support" -weight: 1 +weight: 11 description: > Exploring Redis on the ARM CPU Architecture aliases: diff --git a/docs/reference/clients.md b/docs/reference/clients.md index b5da8d83e9..45dbcb1608 100644 --- a/docs/reference/clients.md +++ b/docs/reference/clients.md @@ -1,7 +1,7 @@ --- title: "Redis client handling" linkTitle: "Client handling" -weight: 1 +weight: 5 description: > How the Redis server manages client connections aliases: @@ -47,8 +47,7 @@ However, Redis does the following two things when serving clients: In Redis 2.4 there was a hard-coded limit for the maximum number of clients that could be handled simultaneously. -In Redis 2.6 and newer, this limit is dynamic: by default it is set to 10000 clients, unless -otherwise stated by the `maxclients` directive in `redis.conf`. +In Redis 2.6 and newer, this limit is configurable using the `maxclients` directive in `redis.conf`. The default is 10,000 clients. However, Redis checks with the kernel what the maximum number of file descriptors that we are able to open is (the *soft limit* is checked). If the @@ -112,7 +111,7 @@ Every client is also subject to a query buffer limit. This is a non-configurable Redis is built to handle a very large number of client connections. Client connections tend to consume memory, and when there are many of them, the aggregate memory consumption can be extremely high, leading to data eviction or out-of-memory errors. -These cases can be mitigated to an extent using [output buffer limits](#output-buffers-limits), but Redis allows us a more robust configuration to limit the aggregate memory used by all clients' connections. +These cases can be mitigated to an extent using [output buffer limits](#output-buffer-limits), but Redis allows us a more robust configuration to limit the aggregate memory used by all clients' connections. This mechanism is called **client eviction**, and it's essentially a safety mechanism that will disconnect clients once the aggregate memory usage of all clients is above a threshold. diff --git a/docs/reference/cluster-spec.md b/docs/reference/cluster-spec.md index 4261662338..6a1eb47cc4 100644 --- a/docs/reference/cluster-spec.md +++ b/docs/reference/cluster-spec.md @@ -1,7 +1,7 @@ --- title: Redis cluster specification linkTitle: Cluster spec -weight: 1 +weight: 9 description: > Detailed specification for Redis cluster aliases: @@ -199,6 +199,23 @@ Examples: * For the key `foo{bar}{zap}` the substring `bar` will be hashed, since the algorithm stops at the first valid or invalid (without bytes inside) match of `{` and `}`. * What follows from the algorithm is that if the key starts with `{}`, it is guaranteed to be hashed as a whole. This is useful when using binary data as key names. +#### Glob-style patterns + +Commands accepting a glob-style pattern, including `KEYS`, `SCAN` and `SORT`, are optimized for patterns that imply a single slot. +This means that if all keys that can match a pattern must belong to a specific slot, only this slot is searched for keys matching the pattern. +The pattern slot optimization is introduced in Redis 8.0. + +The optimization kicks in when the pattern meets the following conditions: + +* the pattern contains a hashtag, +* there are no wildcards or escape characters before the hashtag, and +* the hashtag within curly braces doesn't contain any wildcards or escape characters. + +For example, `SCAN 0 MATCH {abc}*` can successfully recognize the hashtag and scans only the slot corresponding to `abc`. +However, the patterns `*{abc}`, `{a*c}`, or `{a\*bc}` cannot recognize the hashtag, so all slots need to be scanned. + +#### Hash slot example code + Adding the hash tags exception, the following is an implementation of the `HASH_SLOT` function in Ruby and C language. Ruby example code: diff --git a/docs/reference/command-arguments.md b/docs/reference/command-arguments.md index 2c6d6d62e7..a6ff30189e 100644 --- a/docs/reference/command-arguments.md +++ b/docs/reference/command-arguments.md @@ -1,7 +1,7 @@ --- title: "Redis command arguments" linkTitle: "Command arguments" -weight: 1 +weight: 7 description: How Redis commands expose their documentation programmatically aliases: - /topics/command-arguments @@ -16,6 +16,10 @@ Every element in the _arguments_ array is a map with the following fields: * **name:** the argument's name, always present. The name of an argument is given for identification purposes alone. It isn't displayed during the command's syntax rendering. + The same name can appear more than once in the entire argument tree, but it is unique compared to other sibling arguments' names. + This allows obtaining a unique identifier for each argument (the concatenation of all names in the path from the root to any argument). +* **display_text:** the argument's display string, present in arguments that have a displayable representation (all arguments that aren't oneof/block). + This is the string used in the command's syntax rendering. * **type:** the argument's type, always present. An argument must have one of the following types: - **string:** a string argument. diff --git a/docs/reference/command-tips.md b/docs/reference/command-tips.md index d7ffd83e37..5a373624d7 100644 --- a/docs/reference/command-tips.md +++ b/docs/reference/command-tips.md @@ -2,7 +2,7 @@ title: "Redis command tips" linkTitle: "Command tips" weight: 1 -description: Programm +description: Get additional information about a command aliases: - /topics/command-tips --- @@ -20,21 +20,21 @@ However, the following sections describe proposed tips and demonstrate the conve This tip indicates that the command's output isn't deterministic. That means that calls to the command may yield different results with the same arguments and data. -That difference could be the result of the command's random nature (e.g., `RANDOMKEY` and `SPOP`); the call's timing (e.g. `TTL`); or generic differences that relate to the server's state (e.g. `INFO` and `CLIENT LIST`). +That difference could be the result of the command's random nature (e.g., `RANDOMKEY` and `SPOP`); the call's timing (e.g., `TTL`); or generic differences that relate to the server's state (e.g., `INFO` and `CLIENT LIST`). **Note:** -prior to Redis 7.0, this tip was the _random_ command flag. +Prior to Redis 7.0, this tip was the _random_ command flag. ## nondeterministic_output_order -The existence of this tip indicates that the command's output is deterministic, but its ordering is random (e.g. `HGETALL` and `SMEMBERS`). +The existence of this tip indicates that the command's output is deterministic, but its ordering is random (e.g., `HGETALL` and `SMEMBERS`). **Note:** -prior to Redis 7.0, this tip was the _sort_\__for_\__script_ flag. +Prior to Redis 7.0, this tip was the _sort_\__for_\__script_ flag. ## request_policy -This tip can help clients determine the shard(s) to send the command in clustering mode. +This tip can help clients determine the shards to send the command in clustering mode. The default behavior a client should implement for commands without the _request_policy_ tip is as follows: 1. The command doesn't accept key name arguments: the client can execute the command on an arbitrary shard. @@ -50,7 +50,9 @@ In cases where the client should adopt a behavior different than the default, th This tip is in-use by commands that don't accept key name arguments. The command operates atomically per shard. - **multi_shard:** the client should execute the command on several shards. - The shards that execute the command are determined by the hash slots of its input key name arguments. + The client should split the inputs according to the hash slots of its input key name arguments. + For example, the command `DEL {foo} {foo}1 bar` should be split to `DEL {foo} {foo}1` and `DEL bar`. + If the keys are hashed to more than a single slot, the command must be split even if all the slots are managed by the same shard. Examples for such commands include `MSET`, `MGET` and `DEL`. However, note that `SUNIONSTORE` isn't considered as _multi_shard_ because all of its keys must belong to the same hash slot. - **special:** indicates a non-trivial form of the client's request policy, such as the `SCAN` command. diff --git a/docs/manual/eviction.md b/docs/reference/eviction/index.md similarity index 95% rename from docs/manual/eviction.md rename to docs/reference/eviction/index.md index cbf1372200..0cf1b5be97 100644 --- a/docs/manual/eviction.md +++ b/docs/reference/eviction/index.md @@ -1,13 +1,12 @@ --- title: Key eviction linkTitle: Eviction -weight: 1 +weight: 6 description: Overview of Redis key eviction policies (LRU, LFU, etc.) aliases: [ /topics/lru_cache, /topics/lru_cache.md, - /manual/eviction, - /manual/eviction.md, + /docs/manual/eviction ] --- @@ -16,8 +15,7 @@ evict old data as you add new data. This behavior is well known in the developer community, since it is the default behavior for the popular *memcached* system. -This page covers the more general topic of the Redis `maxmemory` directive used to limit the memory usage to a fixed amount. This page it also covers in -depth the LRU eviction algorithm used by Redis, that is actually an approximation of +This page covers the more general topic of the Redis `maxmemory` directive used to limit the memory usage to a fixed amount. It also extensively covers the LRU eviction algorithm used by Redis, which is actually an approximation of the exact LRU. ## `Maxmemory` configuration directive @@ -91,7 +89,7 @@ If a command results in a lot of memory being used (like a big set intersection ## Approximated LRU algorithm Redis LRU algorithm is not an exact implementation. This means that Redis is -not able to pick the *best candidate* for eviction, that is, the access that +not able to pick the *best candidate* for eviction, that is, the key that was accessed the furthest in the past. Instead it will try to run an approximation of the LRU algorithm, by sampling a small number of keys, and evicting the one that is the best (with the oldest access time) among the sampled keys. @@ -109,7 +107,7 @@ costs more memory. However, the approximation is virtually equivalent for an application using Redis. This figure compares the LRU approximation used by Redis with true LRU. -![LRU comparison](../../images/lru_comparison.png) +![LRU comparison](lru_comparison.png) The test to generate the above graphs filled a Redis server with a given number of keys. The keys were accessed from the first to the last. The first keys are the best candidates for eviction using an LRU algorithm. Later more 50% of keys are added, in order to force half of the old keys to be evicted. @@ -160,7 +158,7 @@ By default Redis is configured to: * Saturate the counter at, around, one million requests. * Decay the counter every one minute. -Those should be reasonable values and were tested experimental, but the user may want to play with these configuration settings to pick optimal values. +Those should be reasonable values and were tested experimentally, but the user may want to play with these configuration settings to pick optimal values. Instructions about how to tune these parameters can be found inside the example `redis.conf` file in the source distribution. Briefly, they are: @@ -169,7 +167,7 @@ lfu-log-factor 10 lfu-decay-time 1 ``` -The decay time is the obvious one, it is the amount of minutes a counter should be decayed, when sampled and found to be older than that value. A special value of `0` means: always decay the counter every time is scanned, and is rarely useful. +The decay time is the obvious one, it is the amount of minutes a counter should be decayed, when sampled and found to be older than that value. A special value of `0` means: we will never decay the counter. The counter *logarithm factor* changes how many hits are needed to saturate the frequency counter, which is just in the range 0-255. The higher the factor, the more accesses are needed to reach the maximum. The lower the factor, the better is the resolution of the counter for low accesses, according to the following table: diff --git a/docs/images/lru_comparison.png b/docs/reference/eviction/lru_comparison.png similarity index 100% rename from docs/images/lru_comparison.png rename to docs/reference/eviction/lru_comparison.png diff --git a/docs/reference/gopher.md b/docs/reference/gopher.md index 211d65b767..f34812323c 100644 --- a/docs/reference/gopher.md +++ b/docs/reference/gopher.md @@ -1,13 +1,13 @@ --- title: "Redis and the Gopher protocol" linkTitle: "Gopher protocol" -weight: 1 +weight: 10 description: The Redis Gopher protocol implementation aliases: - /topics/gopher --- -** Note: Support for Gopher was removed is Redis 7.0 ** +** Note: Support for Gopher was removed in Redis 7.0 ** Redis contains an implementation of the Gopher protocol, as specified in the [RFC 1436](https://www.ietf.org/rfc/rfc1436.txt). diff --git a/docs/reference/internals/_index.md b/docs/reference/internals/_index.md index 3aea94d563..43f6409921 100644 --- a/docs/reference/internals/_index.md +++ b/docs/reference/internals/_index.md @@ -1,7 +1,7 @@ --- title: "Redis internals" linkTitle: "Internals" -weight: 1 +weight: 12 description: Documents describing internals in early Redis implementations aliases: - /topics/internals diff --git a/docs/reference/internals/internals-rediseventlib.md b/docs/reference/internals/internals-rediseventlib.md index 134817536f..d644e72cee 100644 --- a/docs/reference/internals/internals-rediseventlib.md +++ b/docs/reference/internals/internals-rediseventlib.md @@ -17,7 +17,7 @@ Let us figure it out through a series of Q&As. Q: What do you expect a network server to be doing all the time?
A: Watch for inbound connections on the port its listening and accept them. -Q: Calling [accept](http://man.cx/accept%282%29 accept) yields a descriptor. What do I do with it?
+Q: Calling [accept](http://man.cx/accept%282%29) yields a descriptor. What do I do with it?
A: Save the descriptor and do a non-blocking read/write operation on it. Q: Why does the read/write have to be non-blocking?
diff --git a/docs/reference/key-specs.md b/docs/reference/key-specs.md index 95e874f145..b911e9f42d 100644 --- a/docs/reference/key-specs.md +++ b/docs/reference/key-specs.md @@ -1,7 +1,7 @@ --- title: "Command key specifications" linkTitle: "Command key specifications" - weight: 1 + weight: 3 description: What are command key specification and how to use them in your client aliases: - /topics/key-specs @@ -25,7 +25,7 @@ Even if the client encounters an unfamiliar type of key specification, it can al That said, most cluster-aware clients only require a single key name to perform correct command routing, so it is possible that although a command features one unfamiliar specification, its other specification may still be usable by the client. -Key specifications are maps with three keys: +Key specifications are maps with the following keys: 1. **begin_search:**: the starting index for keys' extraction. 2. **find_keys:** the rule for identifying the keys relative to the BS. diff --git a/docs/reference/modules/_index.md b/docs/reference/modules/_index.md index 64e3b1d9aa..1823e4b485 100644 --- a/docs/reference/modules/_index.md +++ b/docs/reference/modules/_index.md @@ -1,7 +1,7 @@ --- title: "Redis modules API" linkTitle: "Modules API" -weight: 1 +weight: 2 description: > Introduction to writing Redis modules aliases: diff --git a/docs/reference/modules/modules-api-ref.md b/docs/reference/modules/modules-api-ref.md index 91cf5167af..0471a18b8e 100644 --- a/docs/reference/modules/modules-api-ref.md +++ b/docs/reference/modules/modules-api-ref.md @@ -50,6 +50,7 @@ aliases: * [Module fork API](#section-module-fork-api) * [Server hooks implementation](#section-server-hooks-implementation) * [Module Configurations API](#section-module-configurations-api) +* [RDB load/save API](#section-rdb-load-save-api) * [Key eviction API](#section-key-eviction-api) * [Miscellaneous APIs](#section-miscellaneous-apis) * [Defrag API](#section-defrag-api) @@ -277,9 +278,15 @@ it allows the ACLs to be checked before the command is executed. Register a new command in the Redis server, that will be handled by calling the function pointer 'cmdfunc' using the RedisModule calling -convention. The function returns `REDISMODULE_ERR` if the specified command -name is already busy or a set of invalid flags were passed, otherwise -`REDISMODULE_OK` is returned and the new command is registered. +convention. + +The function returns `REDISMODULE_ERR` in these cases: +- If creation of module command is called outside the `RedisModule_OnLoad`. +- The specified command is already busy. +- The command name contains some chars that are not allowed. +- A set of invalid flags were passed. + +Otherwise `REDISMODULE_OK` is returned and the new command is registered. This function must be called during the initialization of the module inside the `RedisModule_OnLoad()` function. Calling this function outside @@ -363,7 +370,8 @@ This information is used by ACL, Cluster and the `COMMAND` command. NOTE: The scheme described above serves a limited purpose and can only be used to find keys that exist at constant indices. For non-trivial key arguments, you may pass 0,0,0 and use -[`RedisModule_SetCommandInfo`](#RedisModule_SetCommandInfo) to set key specs using a more advanced scheme. +[`RedisModule_SetCommandInfo`](#RedisModule_SetCommandInfo) to set key specs using a more advanced scheme and use +[`RedisModule_SetCommandACLCategories`](#RedisModule_SetCommandACLCategories) to set Redis ACL categories of the commands. @@ -422,6 +430,28 @@ Returns `REDISMODULE_OK` on success and `REDISMODULE_ERR` in case of the followi * `parent` is already a subcommand (we do not allow more than one level of command nesting) * `parent` is a command with an implementation (`RedisModuleCmdFunc`) (A parent command should be a pure container of subcommands) * `parent` already has a subcommand called `name` +* Creating a subcommand is called outside of `RedisModule_OnLoad`. + + + +### `RedisModule_SetCommandACLCategories` + + int RedisModule_SetCommandACLCategories(RedisModuleCommand *command, + const char *aclflags); + +**Available since:** 7.2.0 + +[`RedisModule_SetCommandACLCategories`](#RedisModule_SetCommandACLCategories) can be used to set ACL categories to module +commands and subcommands. The set of ACL categories should be passed as +a space separated C string 'aclflags'. + +Example, the acl flags 'write slow' marks the command as part of the write and +slow ACL categories. + +On success `REDISMODULE_OK` is returned. On error `REDISMODULE_ERR` is returned. + +This function can only be called during the `RedisModule_OnLoad` function. If called +outside of this function, an error is returned. @@ -753,7 +783,7 @@ Otherwise zero is returned. ### `RedisModule_Milliseconds` - long long RedisModule_Milliseconds(void); + mstime_t RedisModule_Milliseconds(void); **Available since:** 4.0.0 @@ -769,6 +799,31 @@ Return the current UNIX time in milliseconds. Return counter of micro-seconds relative to an arbitrary point in time. + + +### `RedisModule_Microseconds` + + ustime_t RedisModule_Microseconds(void); + +**Available since:** 7.2.0 + +Return the current UNIX time in microseconds + + + +### `RedisModule_CachedMicroseconds` + + ustime_t RedisModule_CachedMicroseconds(void); + +**Available since:** 7.2.0 + +Return the cached UNIX time in microseconds. +It is updated in the server cron job and before executing a command. +It is useful for complex call stacks, such as a command causing a +key space notification, causing a module to execute a [`RedisModule_Call`](#RedisModule_Call), +causing another notification, etc. +It makes sense that all this callbacks would use the same clock. + ### `RedisModule_BlockedClientMeasureTimeStart` @@ -852,6 +907,13 @@ See [`RedisModule_SignalModifiedKey()`](#RedisModule_SignalModifiedKey). Setting this flag indicates module awareness of diskless async replication (repl-diskless-load=swapdb) and that redis could be serving reads during replication instead of blocking with LOADING status. +`REDISMODULE_OPTIONS_ALLOW_NESTED_KEYSPACE_NOTIFICATIONS`: +Declare that the module wants to get nested key-space notifications. +By default, Redis will not fire key-space notifications that happened inside +a key-space notification callback. This flag allows to change this behavior +and fire nested key-space notifications. Notice: if enabled, the module +should protected itself from infinite recursion. + ### `RedisModule_SignalModifiedKey` @@ -1225,7 +1287,8 @@ is not a valid string representation of a stream ID. The special IDs "+" and ### `RedisModule_StringCompare` - int RedisModule_StringCompare(RedisModuleString *a, RedisModuleString *b); + int RedisModule_StringCompare(const RedisModuleString *a, + const RedisModuleString *b); **Available since:** 4.0.0 @@ -1351,6 +1414,30 @@ and not just: The function always returns `REDISMODULE_OK`. + + +### `RedisModule_ReplyWithErrorFormat` + + int RedisModule_ReplyWithErrorFormat(RedisModuleCtx *ctx, + const char *fmt, + ...); + +**Available since:** 7.2.0 + +Reply with the error create from a printf format and arguments. + +Note that 'fmt' must contain all the error, including +the initial error code. The function only provides the initial "-", so +the usage is, for example: + + RedisModule_ReplyWithErrorFormat(ctx,"ERR Wrong Type: %s",type); + +and not just: + + RedisModule_ReplyWithErrorFormat(ctx,"Wrong Type: %s",type); + +The function always returns `REDISMODULE_OK`. + ### `RedisModule_ReplyWithSimpleString` @@ -2045,11 +2132,13 @@ Available flags and their meaning: * `REDISMODULE_CTX_FLAGS_RESP3`: Indicate the that client attached to this context is using RESP3. + * `REDISMODULE_CTX_FLAGS_SERVER_STARTUP`: The Redis instance is starting + ### `RedisModule_AvoidReplicaTraffic` - int RedisModule_AvoidReplicaTraffic(); + int RedisModule_AvoidReplicaTraffic(void); **Available since:** 6.0.0 @@ -2123,14 +2212,42 @@ operations on the key. The return value is the handle representing the key, that must be closed with [`RedisModule_CloseKey()`](#RedisModule_CloseKey). -If the key does not exist and WRITE mode is requested, the handle +If the key does not exist and `REDISMODULE_WRITE` mode is requested, the handle is still returned, since it is possible to perform operations on a yet not existing key (that will be created, for example, after -a list push operation). If the mode is just READ instead, and the +a list push operation). If the mode is just `REDISMODULE_READ` instead, and the key does not exist, NULL is returned. However it is still safe to call [`RedisModule_CloseKey()`](#RedisModule_CloseKey) and [`RedisModule_KeyType()`](#RedisModule_KeyType) on a NULL value. +Extra flags that can be pass to the API under the mode argument: +* `REDISMODULE_OPEN_KEY_NOTOUCH` - Avoid touching the LRU/LFU of the key when opened. +* `REDISMODULE_OPEN_KEY_NONOTIFY` - Don't trigger keyspace event on key misses. +* `REDISMODULE_OPEN_KEY_NOSTATS` - Don't update keyspace hits/misses counters. +* `REDISMODULE_OPEN_KEY_NOEXPIRE` - Avoid deleting lazy expired keys. +* `REDISMODULE_OPEN_KEY_NOEFFECTS` - Avoid any effects from fetching the key. + + + +### `RedisModule_GetOpenKeyModesAll` + + int RedisModule_GetOpenKeyModesAll(void); + +**Available since:** 7.2.0 + + +Returns the full OpenKey modes mask, using the return value +the module can check if a certain set of OpenKey modes are supported +by the redis server version in use. +Example: + + int supportedMode = RedisModule_GetOpenKeyModesAll(); + if (supportedMode & REDISMODULE_OPEN_KEY_NOTOUCH) { + // REDISMODULE_OPEN_KEY_NOTOUCH is supported + } else{ + // REDISMODULE_OPEN_KEY_NOTOUCH is not supported + } + ### `RedisModule_CloseKey` @@ -3300,6 +3417,7 @@ Return the reply type as one of the following: - `REDISMODULE_REPLY_BIG_NUMBER` - `REDISMODULE_REPLY_VERBATIM_STRING` - `REDISMODULE_REPLY_ATTRIBUTE` +- `REDISMODULE_REPLY_PROMISE` @@ -3439,6 +3557,40 @@ Returns: The `key` and `value` arguments are used to return by reference, and may be NULL if not required. + + +### `RedisModule_CallReplyPromiseSetUnblockHandler` + + void RedisModule_CallReplyPromiseSetUnblockHandler(RedisModuleCallReply *reply, + RedisModuleOnUnblocked on_unblock, + void *private_data); + +**Available since:** 7.2.0 + +Set unblock handler (callback and private data) on the given promise `RedisModuleCallReply`. +The given reply must be of promise type (`REDISMODULE_REPLY_PROMISE`). + + + +### `RedisModule_CallReplyPromiseAbort` + + int RedisModule_CallReplyPromiseAbort(RedisModuleCallReply *reply, + void **private_data); + +**Available since:** 7.2.0 + +Abort the execution of a given promise `RedisModuleCallReply`. +return `REDMODULE_OK` in case the abort was done successfully and `REDISMODULE_ERR` +if its not possible to abort the execution (execution already finished). +In case the execution was aborted (`REDMODULE_OK` was returned), the `private_data` out parameter +will be set with the value of the private data that was given on '[`RedisModule_CallReplyPromiseSetUnblockHandler`](#RedisModule_CallReplyPromiseSetUnblockHandler)' +so the caller will be able to release the private data. + +If the execution was aborted successfully, it is promised that the unblock handler will not be called. +That said, it is possible that the abort operation will successes but the operation will still continue. +This can happened if, for example, a module implements some blocking command and does not respect the +disconnect callback. For pure Redis commands this can not happened. + ### `RedisModule_CallReplyStringPtr` @@ -3461,6 +3613,17 @@ Return the pointer and length of a string or error reply. Return a new string object from a call reply of type string, error or integer. Otherwise (wrong reply type) return NULL. + + +### `RedisModule_SetContextUser` + + void RedisModule_SetContextUser(RedisModuleCtx *ctx, + const RedisModuleUser *user); + +**Available since:** 7.0.6 + +Modifies the user that [`RedisModule_Call`](#RedisModule_Call) will use (e.g. for ACL checks) + ### `RedisModule_Call` @@ -3494,7 +3657,17 @@ Exported API to call any Redis command from modules. * `0` -- Return the reply in auto mode, i.e. the reply format will be the same as the client attached to the given RedisModuleCtx. This will probably used when you want to pass the reply directly to the client. - * `C` -- Check if command can be executed according to ACL rules. + * `C` -- Run a command as the user attached to the context. + User is either attached automatically via the client that directly + issued the command and created the context or via RedisModule_SetContextUser. + If the context is not directly created by an issued command (such as a + background context and no user was set on it via RedisModule_SetContextUser, + RedisModule_Call will fail. + Checks if the command can be executed according to ACL rules and causes + the command to run as the determined user, so that any future user + dependent activity, such as ACL checks within scripts will proceed as + expected. + Otherwise, the command will run as the Redis unrestricted user. * `S` -- Run the command in a script mode, this means that it will raise an error if a command which are not allowed inside a script (flagged with the `deny-script` flag) is invoked (like SHUTDOWN). @@ -3507,6 +3680,38 @@ Exported API to call any Redis command from modules. invoking the command, the error is returned using errno mechanism. This flag allows to get the error also as an error CallReply with relevant error message. + * 'D' -- A "Dry Run" mode. Return before executing the underlying call(). + If everything succeeded, it will return with a NULL, otherwise it will + return with a CallReply object denoting the error, as if it was called with + the 'E' code. + * 'K' -- Allow running blocking commands. If enabled and the command gets blocked, a + special REDISMODULE_REPLY_PROMISE will be returned. This reply type + indicates that the command was blocked and the reply will be given asynchronously. + The module can use this reply object to set a handler which will be called when + the command gets unblocked using RedisModule_CallReplyPromiseSetUnblockHandler. + The handler must be set immediately after the command invocation (without releasing + the Redis lock in between). If the handler is not set, the blocking command will + still continue its execution but the reply will be ignored (fire and forget), + notice that this is dangerous in case of role change, as explained below. + The module can use RedisModule_CallReplyPromiseAbort to abort the command invocation + if it was not yet finished (see RedisModule_CallReplyPromiseAbort documentation for more + details). It is also the module's responsibility to abort the execution on role change, either by using + server event (to get notified when the instance becomes a replica) or relying on the disconnect + callback of the original client. Failing to do so can result in a write operation on a replica. + Unlike other call replies, promise call reply **must** be freed while the Redis GIL is locked. + Notice that on unblocking, the only promise is that the unblock handler will be called, + If the blocking RedisModule_Call caused the module to also block some real client (using RedisModule_BlockClient), + it is the module responsibility to unblock this client on the unblock handler. + On the unblock handler it is only allowed to perform the following: + * Calling additional Redis commands using RedisModule_Call + * Open keys using RedisModule_OpenKey + * Replicate data to the replica or AOF + + Specifically, it is not allowed to call any Redis module API which are client related such as: + * RedisModule_Reply* API's + * RedisModule_BlockClient + * RedisModule_GetCurrentUserName + * **...**: The actual arguments to the Redis command. On success a `RedisModuleCallReply` object is returned, otherwise @@ -3665,12 +3870,16 @@ documentation, especially [https://redis.io/topics/modules-native-types](https:/ so that meta information such as key name and db id can be obtained. * **copy2**: Similar to `copy`, but provides the `RedisModuleKeyOptCtx` parameter so that meta information such as key names and db ids can be obtained. +* **aux_save2**: Similar to `aux_save`, but with small semantic change, if the module + saves nothing on this callback then no data about this aux field will be written to the + RDB and it will be possible to load the RDB even if the module is not loaded. Note: the module name "AAAAAAAAA" is reserved and produces an error, it happens to be pretty lame as well. -If there is already a module registering a type with the same name, -and if the module name or encver is invalid, NULL is returned. +If [`RedisModule_CreateDataType()`](#RedisModule_CreateDataType) is called outside of `RedisModule_OnLoad()` function, +there is already a module registering a type with the same name, +or if the module name or encver is invalid, NULL is returned. Otherwise the new type is registered into Redis, and a reference of type `RedisModuleType` is returned: the caller of the function should store this reference into a global variable to make future use of it in the @@ -4216,15 +4425,75 @@ latency-monitor-threshold. For a guide about blocking commands in modules, see [https://redis.io/topics/modules-blocking-ops](https://redis.io/topics/modules-blocking-ops). + + +### `RedisModule_RegisterAuthCallback` + + void RedisModule_RegisterAuthCallback(RedisModuleCtx *ctx, + RedisModuleAuthCallback cb); + +**Available since:** 7.2.0 + +This API registers a callback to execute in addition to normal password based authentication. +Multiple callbacks can be registered across different modules. When a Module is unloaded, all the +auth callbacks registered by it are unregistered. +The callbacks are attempted (in the order of most recently registered first) when the AUTH/HELLO +(with AUTH field provided) commands are called. +The callbacks will be called with a module context along with a username and a password, and are +expected to take one of the following actions: +(1) Authenticate - Use the `RedisModule_AuthenticateClient`* API and return `REDISMODULE_AUTH_HANDLED`. +This will immediately end the auth chain as successful and add the OK reply. +(2) Deny Authentication - Return `REDISMODULE_AUTH_HANDLED` without authenticating or blocking the +client. Optionally, `err` can be set to a custom error message and `err` will be automatically +freed by the server. +This will immediately end the auth chain as unsuccessful and add the ERR reply. +(3) Block a client on authentication - Use the [`RedisModule_BlockClientOnAuth`](#RedisModule_BlockClientOnAuth) API and return +`REDISMODULE_AUTH_HANDLED`. Here, the client will be blocked until the [`RedisModule_UnblockClient`](#RedisModule_UnblockClient) API is used +which will trigger the auth reply callback (provided through the [`RedisModule_BlockClientOnAuth`](#RedisModule_BlockClientOnAuth)). +In this reply callback, the Module should authenticate, deny or skip handling authentication. +(4) Skip handling Authentication - Return `REDISMODULE_AUTH_NOT_HANDLED` without blocking the +client. This will allow the engine to attempt the next module auth callback. +If none of the callbacks authenticate or deny auth, then password based auth is attempted and +will authenticate or add failure logs and reply to the clients accordingly. + +Note: If a client is disconnected while it was in the middle of blocking module auth, that +occurrence of the AUTH or HELLO command will not be tracked in the INFO command stats. + +The following is an example of how non-blocking module based authentication can be used: + + int auth_cb(RedisModuleCtx *ctx, RedisModuleString *username, RedisModuleString *password, RedisModuleString **err) { + const char *user = RedisModule_StringPtrLen(username, NULL); + const char *pwd = RedisModule_StringPtrLen(password, NULL); + if (!strcmp(user,"foo") && !strcmp(pwd,"valid_password")) { + RedisModule_AuthenticateClientWithACLUser(ctx, "foo", 3, NULL, NULL, NULL); + return REDISMODULE_AUTH_HANDLED; + } + + else if (!strcmp(user,"foo") && !strcmp(pwd,"wrong_password")) { + RedisModuleString *log = RedisModule_CreateString(ctx, "Module Auth", 11); + RedisModule_ACLAddLogEntryByUserName(ctx, username, log, REDISMODULE_ACL_LOG_AUTH); + RedisModule_FreeString(ctx, log); + const char *err_msg = "Auth denied by Misc Module."; + *err = RedisModule_CreateString(ctx, err_msg, strlen(err_msg)); + return REDISMODULE_AUTH_HANDLED; + } + return REDISMODULE_AUTH_NOT_HANDLED; + } + + int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { + if (RedisModule_Init(ctx,"authmodule",1,REDISMODULE_APIVER_1)== REDISMODULE_ERR) + return REDISMODULE_ERR; + RedisModule_RegisterAuthCallback(ctx, auth_cb); + return REDISMODULE_OK; + } + ### `RedisModule_BlockClient` RedisModuleBlockedClient *RedisModule_BlockClient(RedisModuleCtx *ctx, RedisModuleCmdFunc reply_callback, - RedisModuleCmdFunc timeout_callback, - void (*free_privdata)(RedisModuleCtx*, void*), - long long timeout_ms); + ; **Available since:** 4.0.0 @@ -4267,18 +4536,49 @@ is not account for the total command duration. To include such time you should use [`RedisModule_BlockedClientMeasureTimeStart()`](#RedisModule_BlockedClientMeasureTimeStart) and [`RedisModule_BlockedClientMeasureTimeEnd()`](#RedisModule_BlockedClientMeasureTimeEnd) one, or multiple times within the blocking command background work. + + +### `RedisModule_BlockClientOnAuth` + + RedisModuleBlockedClient *RedisModule_BlockClientOnAuth(RedisModuleCtx *ctx, + RedisModuleAuthCallback reply_callback, + ; + +**Available since:** 7.2.0 + +Block the current client for module authentication in the background. If module auth is not in +progress on the client, the API returns NULL. Otherwise, the client is blocked and the `RedisModule_BlockedClient` +is returned similar to the [`RedisModule_BlockClient`](#RedisModule_BlockClient) API. +Note: Only use this API from the context of a module auth callback. + + + +### `RedisModule_BlockClientGetPrivateData` + + void *RedisModule_BlockClientGetPrivateData(RedisModuleBlockedClient *blocked_client); + +**Available since:** 7.2.0 + +Get the private data that was previusely set on a blocked client + + + +### `RedisModule_BlockClientSetPrivateData` + + void RedisModule_BlockClientSetPrivateData(RedisModuleBlockedClient *blocked_client, + void *private_data); + +**Available since:** 7.2.0 + +Set private data on a blocked client + ### `RedisModule_BlockClientOnKeys` RedisModuleBlockedClient *RedisModule_BlockClientOnKeys(RedisModuleCtx *ctx, RedisModuleCmdFunc reply_callback, - RedisModuleCmdFunc timeout_callback, - void (*free_privdata)(RedisModuleCtx*, void*), - long long timeout_ms, - RedisModuleString **keys, - int numkeys, - void *privdata); + ; **Available since:** 6.0.0 @@ -4340,6 +4640,25 @@ Note: Under normal circumstances [`RedisModule_UnblockClient`](#RedisModule_Unbl handled as if it were timed-out (You must implement the timeout callback in that case). + + +### `RedisModule_BlockClientOnKeysWithFlags` + + RedisModuleBlockedClient *RedisModule_BlockClientOnKeysWithFlags(RedisModuleCtx *ctx, + RedisModuleCmdFunc reply_callback, + ; + +**Available since:** 7.2.0 + +Same as [`RedisModule_BlockClientOnKeys`](#RedisModule_BlockClientOnKeys), but can take `REDISMODULE_BLOCK_`* flags +Can be either `REDISMODULE_BLOCK_UNBLOCK_DEFAULT`, which means default behavior (same +as calling [`RedisModule_BlockClientOnKeys`](#RedisModule_BlockClientOnKeys)) + +The flags is a bit mask of these: + +- `REDISMODULE_BLOCK_UNBLOCK_DELETED`: The clients should to be awakened in case any of `keys` are deleted. + Mostly useful for commands that require the key to exist (like XREADGROUP) + ### `RedisModule_SignalKeyAsReady` @@ -4352,8 +4671,6 @@ This function is used in order to potentially unblock a client blocked on keys with [`RedisModule_BlockClientOnKeys()`](#RedisModule_BlockClientOnKeys). When this function is called, all the clients blocked for this key will get their `reply_callback` called. -Note: The function has no effect if the signaled key doesn't exist. - ### `RedisModule_UnblockClient` @@ -4617,6 +4934,12 @@ is interested in. This can be an ORed mask of any of the following flags: - `REDISMODULE_NOTIFY_STREAM`: Stream events - `REDISMODULE_NOTIFY_MODULE`: Module types events - `REDISMODULE_NOTIFY_KEYMISS`: Key-miss events + Notice, key-miss event is the only type + of event that is fired from within a read command. + Performing RedisModule_Call with a write command from within + this notification is wrong and discourage. It will + cause the read command that trigger the event to be + replicated to the AOF/Replica. - `REDISMODULE_NOTIFY_ALL`: All events (Excluding `REDISMODULE_NOTIFY_KEYMISS`) - `REDISMODULE_NOTIFY_LOADED`: A special notification available only for modules, indicates that the key was loaded from persistence. @@ -4648,13 +4971,48 @@ Warning: the notification callbacks are performed in a synchronous manner, so notification callbacks must to be fast, or they would slow Redis down. If you need to take long actions, use threads to offload them. +Moreover, the fact that the notification is executed synchronously means +that the notification code will be executed in the middle on Redis logic +(commands logic, eviction, expire). Changing the key space while the logic +runs is dangerous and discouraged. In order to react to key space events with +write actions, please refer to [`RedisModule_AddPostNotificationJob`](#RedisModule_AddPostNotificationJob). + See [https://redis.io/topics/notifications](https://redis.io/topics/notifications) for more information. + + +### `RedisModule_AddPostNotificationJob` + + int RedisModule_AddPostNotificationJob(RedisModuleCtx *ctx, + RedisModulePostNotificationJobFunc callback, + void *privdata, + void (*free_privdata)(void*)); + +**Available since:** 7.2.0 + +When running inside a key space notification callback, it is dangerous and highly discouraged to perform any write +operation (See [`RedisModule_SubscribeToKeyspaceEvents`](#RedisModule_SubscribeToKeyspaceEvents)). In order to still perform write actions in this scenario, +Redis provides [`RedisModule_AddPostNotificationJob`](#RedisModule_AddPostNotificationJob) API. The API allows to register a job callback which Redis will call +when the following condition are promised to be fulfilled: +1. It is safe to perform any write operation. +2. The job will be called atomically along side the key space notification. + +Notice, one job might trigger key space notifications that will trigger more jobs. +This raises a concerns of entering an infinite loops, we consider infinite loops +as a logical bug that need to be fixed in the module, an attempt to protect against +infinite loops by halting the execution could result in violation of the feature correctness +and so Redis will make no attempt to protect the module from infinite loops. + +'`free_pd`' can be NULL and in such case will not be used. + +Return `REDISMODULE_OK` on success and `REDISMODULE_ERR` if was called while loading data from disk (AOF or RDB) or +if the instance is a readonly replica. + ### `RedisModule_GetNotifyKeyspaceEvents` - int RedisModule_GetNotifyKeyspaceEvents(); + int RedisModule_GetNotifyKeyspaceEvents(void); **Available since:** 6.0.0 @@ -5051,6 +5409,36 @@ for detailed usage. Returns `REDISMODULE_OK` on success and `REDISMODULE_ERR` on failure and will set an errno describing why the operation failed. + + +### `RedisModule_SetModuleUserACLString` + + int RedisModule_SetModuleUserACLString(RedisModuleCtx *ctx, + RedisModuleUser *user, + const char *acl, + RedisModuleString **error); + +**Available since:** 7.0.6 + +Sets the permission of a user with a complete ACL string, such as one +would use on the redis ACL SETUSER command line API. This differs from +[`RedisModule_SetModuleUserACL`](#RedisModule_SetModuleUserACL), which only takes single ACL operations at a time. + +Returns `REDISMODULE_OK` on success and `REDISMODULE_ERR` on failure +if a `RedisModuleString` is provided in error, a string describing the error +will be returned + + + +### `RedisModule_GetModuleUserACLString` + + RedisModuleString *RedisModule_GetModuleUserACLString(RedisModuleUser *user); + +**Available since:** 7.0.6 + +Get the ACL string for a given user +Returns a `RedisModuleString` + ### `RedisModule_GetCurrentUserName` @@ -5165,6 +5553,22 @@ Returns `REDISMODULE_OK` on success and `REDISMODULE_ERR` on error. For more information about ACL log, please refer to [https://redis.io/commands/acl-log](https://redis.io/commands/acl-log) + + +### `RedisModule_ACLAddLogEntryByUserName` + + int RedisModule_ACLAddLogEntryByUserName(RedisModuleCtx *ctx, + RedisModuleString *username, + RedisModuleString *object, + RedisModuleACLLogEntryReason reason); + +**Available since:** 7.2.0 + +Adds a new entry in the ACL log with the `username` `RedisModuleString` provided. +Returns `REDISMODULE_OK` on success and `REDISMODULE_ERR` on error. + +For more information about ACL log, please refer to [https://redis.io/commands/acl-log](https://redis.io/commands/acl-log) + ### `RedisModule_AuthenticateClientWithUser` @@ -5919,7 +6323,7 @@ command should return an error. Here is an example: - int ... myCommandImplementation() { + int ... myCommandImplementation(void) { if (getExternalAPIs() == 0) { reply with an error here if we cannot have the APIs } @@ -5933,7 +6337,7 @@ And the function registerAPI() is: static int api_loaded = 0; if (api_loaded != 0) return 1; // APIs already resolved. - myFunctionPointer = RedisModule_GetOtherModuleAPI("..."); + myFunctionPointer = RedisModule_GetSharedAPI("..."); if (myFunctionPointer == NULL) return 0; return 1; @@ -6080,6 +6484,16 @@ or used elsewhere. Modify the filtered command by deleting an argument at the specified position. + + +### `RedisModule_CommandFilterGetClientId` + + unsigned long long RedisModule_CommandFilterGetClientId(RedisModuleCommandFilterCtx *fctx); + +**Available since:** 7.2.0 + +Get Client ID for client that issued the command we are filtering + ### `RedisModule_MallocSize` @@ -6131,7 +6545,7 @@ it does not include the allocation size of the keys and values. ### `RedisModule_GetUsedMemoryRatio` - float RedisModule_GetUsedMemoryRatio(); + float RedisModule_GetUsedMemoryRatio(void); **Available since:** 6.0.0 @@ -6151,7 +6565,7 @@ currently used, relative to the Redis "maxmemory" configuration. ### `RedisModule_ScanCursorCreate` - RedisModuleScanCursor *RedisModule_ScanCursorCreate(); + RedisModuleScanCursor *RedisModule_ScanCursorCreate(void); **Available since:** 6.0.0 @@ -6649,6 +7063,22 @@ Here is a list of events you can use as 'eid' and related sub events: // name of each modified configuration item uint32_t num_changes; // The number of elements in the config_names array +* `RedisModule_Event_Key` + + Called when a key is removed from the keyspace. We can't modify any key in + the event. + The following sub events are available: + + * `REDISMODULE_SUBEVENT_KEY_DELETED` + * `REDISMODULE_SUBEVENT_KEY_EXPIRED` + * `REDISMODULE_SUBEVENT_KEY_EVICTED` + * `REDISMODULE_SUBEVENT_KEY_OVERWRITTEN` + + The data pointer can be casted to a RedisModuleKeyInfo + structure with the following fields: + + RedisModuleKey *key; // Key name + The function returns `REDISMODULE_OK` if the module was successfully subscribed for the specified event. If the API is called from a wrong context or unsupported event is given then `REDISMODULE_ERR` is returned. @@ -6756,6 +7186,7 @@ Example implementation: If the registration fails, `REDISMODULE_ERR` is returned and one of the following errno is set: +* EBUSY: Registering the Config outside of `RedisModule_OnLoad`. * EINVAL: The provided flags are invalid for the registration or the name of the config contains invalid characters. * EALREADY: The provided configuration name is already used. @@ -6858,9 +7289,85 @@ Create an integer config that server clients can interact with via the Applies all pending configurations on the module load. This should be called after all of the configurations have been registered for the module inside of `RedisModule_OnLoad`. +This will return `REDISMODULE_ERR` if it is called outside `RedisModule_OnLoad`. This API needs to be called when configurations are provided in either `MODULE LOADEX` or provided as startup arguments. + + +## RDB load/save API + + + +### `RedisModule_RdbStreamCreateFromFile` + + RedisModuleRdbStream *RedisModule_RdbStreamCreateFromFile(const char *filename); + +**Available since:** 7.2.0 + +Create a stream object to save/load RDB to/from a file. + +This function returns a pointer to `RedisModuleRdbStream` which is owned +by the caller. It requires a call to [`RedisModule_RdbStreamFree()`](#RedisModule_RdbStreamFree) to free +the object. + + + +### `RedisModule_RdbStreamFree` + + void RedisModule_RdbStreamFree(RedisModuleRdbStream *stream); + +**Available since:** 7.2.0 + +Release an RDB stream object. + + + +### `RedisModule_RdbLoad` + + int RedisModule_RdbLoad(RedisModuleCtx *ctx, + RedisModuleRdbStream *stream, + int flags); + +**Available since:** 7.2.0 + +Load RDB file from the `stream`. Dataset will be cleared first and then RDB +file will be loaded. + +`flags` must be zero. This parameter is for future use. + +On success `REDISMODULE_OK` is returned, otherwise `REDISMODULE_ERR` is returned +and errno is set accordingly. + +Example: + + RedisModuleRdbStream *s = RedisModule_RdbStreamCreateFromFile("exp.rdb"); + RedisModule_RdbLoad(ctx, s, 0); + RedisModule_RdbStreamFree(s); + + + +### `RedisModule_RdbSave` + + int RedisModule_RdbSave(RedisModuleCtx *ctx, + RedisModuleRdbStream *stream, + int flags); + +**Available since:** 7.2.0 + +Save dataset to the RDB stream. + +`flags` must be zero. This parameter is for future use. + +On success `REDISMODULE_OK` is returned, otherwise `REDISMODULE_ERR` is returned +and errno is set accordingly. + +Example: + + RedisModuleRdbStream *s = RedisModule_RdbStreamCreateFromFile("exp.rdb"); + RedisModule_RdbSave(ctx, s, 0); + RedisModule_RdbStreamFree(s); + ## Key eviction API @@ -6920,11 +7427,32 @@ returns `REDISMODULE_OK` if when key is valid. ## Miscellaneous APIs + + +### `RedisModule_GetModuleOptionsAll` + + int RedisModule_GetModuleOptionsAll(void); + +**Available since:** 7.2.0 + + +Returns the full module options flags mask, using the return value +the module can check if a certain set of module options are supported +by the redis server version in use. +Example: + + int supportedFlags = RedisModule_GetModuleOptionsAll(); + if (supportedFlags & REDISMODULE_OPTIONS_ALLOW_NESTED_KEYSPACE_NOTIFICATIONS) { + // REDISMODULE_OPTIONS_ALLOW_NESTED_KEYSPACE_NOTIFICATIONS is supported + } else{ + // REDISMODULE_OPTIONS_ALLOW_NESTED_KEYSPACE_NOTIFICATIONS is not supported + } + ### `RedisModule_GetContextFlagsAll` - int RedisModule_GetContextFlagsAll(); + int RedisModule_GetContextFlagsAll(void); **Available since:** 6.0.9 @@ -6945,7 +7473,7 @@ Example: ### `RedisModule_GetKeyspaceNotificationFlagsAll` - int RedisModule_GetKeyspaceNotificationFlagsAll(); + int RedisModule_GetKeyspaceNotificationFlagsAll(void); **Available since:** 6.0.9 @@ -6966,7 +7494,7 @@ Example: ### `RedisModule_GetServerVersion` - int RedisModule_GetServerVersion(); + int RedisModule_GetServerVersion(void); **Available since:** 6.0.9 @@ -6978,7 +7506,7 @@ Example for 6.0.7 the return value will be 0x00060007. ### `RedisModule_GetTypeMethodVersion` - int RedisModule_GetTypeMethodVersion(); + int RedisModule_GetTypeMethodVersion(void); **Available since:** 6.2.0 @@ -7219,20 +7747,27 @@ There is no guarantee that this info is always available, so this may return -1. ## Function index * [`RedisModule_ACLAddLogEntry`](#RedisModule_ACLAddLogEntry) +* [`RedisModule_ACLAddLogEntryByUserName`](#RedisModule_ACLAddLogEntryByUserName) * [`RedisModule_ACLCheckChannelPermissions`](#RedisModule_ACLCheckChannelPermissions) * [`RedisModule_ACLCheckCommandPermissions`](#RedisModule_ACLCheckCommandPermissions) * [`RedisModule_ACLCheckKeyPermissions`](#RedisModule_ACLCheckKeyPermissions) * [`RedisModule_AbortBlock`](#RedisModule_AbortBlock) +* [`RedisModule_AddPostNotificationJob`](#RedisModule_AddPostNotificationJob) * [`RedisModule_Alloc`](#RedisModule_Alloc) * [`RedisModule_AuthenticateClientWithACLUser`](#RedisModule_AuthenticateClientWithACLUser) * [`RedisModule_AuthenticateClientWithUser`](#RedisModule_AuthenticateClientWithUser) * [`RedisModule_AutoMemory`](#RedisModule_AutoMemory) * [`RedisModule_AvoidReplicaTraffic`](#RedisModule_AvoidReplicaTraffic) * [`RedisModule_BlockClient`](#RedisModule_BlockClient) +* [`RedisModule_BlockClientGetPrivateData`](#RedisModule_BlockClientGetPrivateData) +* [`RedisModule_BlockClientOnAuth`](#RedisModule_BlockClientOnAuth) * [`RedisModule_BlockClientOnKeys`](#RedisModule_BlockClientOnKeys) +* [`RedisModule_BlockClientOnKeysWithFlags`](#RedisModule_BlockClientOnKeysWithFlags) +* [`RedisModule_BlockClientSetPrivateData`](#RedisModule_BlockClientSetPrivateData) * [`RedisModule_BlockedClientDisconnected`](#RedisModule_BlockedClientDisconnected) * [`RedisModule_BlockedClientMeasureTimeEnd`](#RedisModule_BlockedClientMeasureTimeEnd) * [`RedisModule_BlockedClientMeasureTimeStart`](#RedisModule_BlockedClientMeasureTimeStart) +* [`RedisModule_CachedMicroseconds`](#RedisModule_CachedMicroseconds) * [`RedisModule_Call`](#RedisModule_Call) * [`RedisModule_CallReplyArrayElement`](#RedisModule_CallReplyArrayElement) * [`RedisModule_CallReplyAttribute`](#RedisModule_CallReplyAttribute) @@ -7243,6 +7778,8 @@ There is no guarantee that this info is always available, so this may return -1. * [`RedisModule_CallReplyInteger`](#RedisModule_CallReplyInteger) * [`RedisModule_CallReplyLength`](#RedisModule_CallReplyLength) * [`RedisModule_CallReplyMapElement`](#RedisModule_CallReplyMapElement) +* [`RedisModule_CallReplyPromiseAbort`](#RedisModule_CallReplyPromiseAbort) +* [`RedisModule_CallReplyPromiseSetUnblockHandler`](#RedisModule_CallReplyPromiseSetUnblockHandler) * [`RedisModule_CallReplyProto`](#RedisModule_CallReplyProto) * [`RedisModule_CallReplySetElement`](#RedisModule_CallReplySetElement) * [`RedisModule_CallReplyStringPtr`](#RedisModule_CallReplyStringPtr) @@ -7256,6 +7793,7 @@ There is no guarantee that this info is always available, so this may return -1. * [`RedisModule_CommandFilterArgInsert`](#RedisModule_CommandFilterArgInsert) * [`RedisModule_CommandFilterArgReplace`](#RedisModule_CommandFilterArgReplace) * [`RedisModule_CommandFilterArgsCount`](#RedisModule_CommandFilterArgsCount) +* [`RedisModule_CommandFilterGetClientId`](#RedisModule_CommandFilterGetClientId) * [`RedisModule_CreateCommand`](#RedisModule_CreateCommand) * [`RedisModule_CreateDataType`](#RedisModule_CreateDataType) * [`RedisModule_CreateDict`](#RedisModule_CreateDict) @@ -7351,9 +7889,12 @@ There is no guarantee that this info is always available, so this may return -1. * [`RedisModule_GetKeyspaceNotificationFlagsAll`](#RedisModule_GetKeyspaceNotificationFlagsAll) * [`RedisModule_GetLFU`](#RedisModule_GetLFU) * [`RedisModule_GetLRU`](#RedisModule_GetLRU) +* [`RedisModule_GetModuleOptionsAll`](#RedisModule_GetModuleOptionsAll) +* [`RedisModule_GetModuleUserACLString`](#RedisModule_GetModuleUserACLString) * [`RedisModule_GetModuleUserFromUserName`](#RedisModule_GetModuleUserFromUserName) * [`RedisModule_GetMyClusterID`](#RedisModule_GetMyClusterID) * [`RedisModule_GetNotifyKeyspaceEvents`](#RedisModule_GetNotifyKeyspaceEvents) +* [`RedisModule_GetOpenKeyModesAll`](#RedisModule_GetOpenKeyModesAll) * [`RedisModule_GetRandomBytes`](#RedisModule_GetRandomBytes) * [`RedisModule_GetRandomHexChars`](#RedisModule_GetRandomHexChars) * [`RedisModule_GetSelectedDb`](#RedisModule_GetSelectedDb) @@ -7412,6 +7953,7 @@ There is no guarantee that this info is always available, so this may return -1. * [`RedisModule_MallocSizeDict`](#RedisModule_MallocSizeDict) * [`RedisModule_MallocSizeString`](#RedisModule_MallocSizeString) * [`RedisModule_MallocUsableSize`](#RedisModule_MallocUsableSize) +* [`RedisModule_Microseconds`](#RedisModule_Microseconds) * [`RedisModule_Milliseconds`](#RedisModule_Milliseconds) * [`RedisModule_ModuleTypeGetType`](#RedisModule_ModuleTypeGetType) * [`RedisModule_ModuleTypeGetValue`](#RedisModule_ModuleTypeGetValue) @@ -7424,8 +7966,13 @@ There is no guarantee that this info is always available, so this may return -1. * [`RedisModule_PublishMessage`](#RedisModule_PublishMessage) * [`RedisModule_PublishMessageShard`](#RedisModule_PublishMessageShard) * [`RedisModule_RandomKey`](#RedisModule_RandomKey) +* [`RedisModule_RdbLoad`](#RedisModule_RdbLoad) +* [`RedisModule_RdbSave`](#RedisModule_RdbSave) +* [`RedisModule_RdbStreamCreateFromFile`](#RedisModule_RdbStreamCreateFromFile) +* [`RedisModule_RdbStreamFree`](#RedisModule_RdbStreamFree) * [`RedisModule_Realloc`](#RedisModule_Realloc) * [`RedisModule_RedactClientCommandArgument`](#RedisModule_RedactClientCommandArgument) +* [`RedisModule_RegisterAuthCallback`](#RedisModule_RegisterAuthCallback) * [`RedisModule_RegisterBoolConfig`](#RedisModule_RegisterBoolConfig) * [`RedisModule_RegisterClusterMessageReceiver`](#RedisModule_RegisterClusterMessageReceiver) * [`RedisModule_RegisterCommandFilter`](#RedisModule_RegisterCommandFilter) @@ -7450,6 +7997,7 @@ There is no guarantee that this info is always available, so this may return -1. * [`RedisModule_ReplyWithEmptyArray`](#RedisModule_ReplyWithEmptyArray) * [`RedisModule_ReplyWithEmptyString`](#RedisModule_ReplyWithEmptyString) * [`RedisModule_ReplyWithError`](#RedisModule_ReplyWithError) +* [`RedisModule_ReplyWithErrorFormat`](#RedisModule_ReplyWithErrorFormat) * [`RedisModule_ReplyWithLongDouble`](#RedisModule_ReplyWithLongDouble) * [`RedisModule_ReplyWithLongLong`](#RedisModule_ReplyWithLongLong) * [`RedisModule_ReplyWithMap`](#RedisModule_ReplyWithMap) @@ -7487,13 +8035,16 @@ There is no guarantee that this info is always available, so this may return -1. * [`RedisModule_SetAbsExpire`](#RedisModule_SetAbsExpire) * [`RedisModule_SetClientNameById`](#RedisModule_SetClientNameById) * [`RedisModule_SetClusterFlags`](#RedisModule_SetClusterFlags) +* [`RedisModule_SetCommandACLCategories`](#RedisModule_SetCommandACLCategories) * [`RedisModule_SetCommandInfo`](#RedisModule_SetCommandInfo) +* [`RedisModule_SetContextUser`](#RedisModule_SetContextUser) * [`RedisModule_SetDisconnectCallback`](#RedisModule_SetDisconnectCallback) * [`RedisModule_SetExpire`](#RedisModule_SetExpire) * [`RedisModule_SetLFU`](#RedisModule_SetLFU) * [`RedisModule_SetLRU`](#RedisModule_SetLRU) * [`RedisModule_SetModuleOptions`](#RedisModule_SetModuleOptions) * [`RedisModule_SetModuleUserACL`](#RedisModule_SetModuleUserACL) +* [`RedisModule_SetModuleUserACLString`](#RedisModule_SetModuleUserACLString) * [`RedisModule_SignalKeyAsReady`](#RedisModule_SignalKeyAsReady) * [`RedisModule_SignalModifiedKey`](#RedisModule_SignalModifiedKey) * [`RedisModule_StopTimer`](#RedisModule_StopTimer) diff --git a/docs/reference/modules/modules-blocking-ops.md b/docs/reference/modules/modules-blocking-ops.md index cda8f869ae..80bb4bc51b 100644 --- a/docs/reference/modules/modules-blocking-ops.md +++ b/docs/reference/modules/modules-blocking-ops.md @@ -23,15 +23,6 @@ Redis modules have the ability to implement blocking commands as well, this documentation shows how the API works and describes a few patterns that can be used in order to model blocking commands. -NOTE: This API is currently *experimental*, so it can only be used if -the macro `REDISMODULE_EXPERIMENTAL_API` is defined. This is required because -these calls are still not in their final stage of design, so may change -in the future, certain parts may be deprecated and so forth. - -To use this part of the modules API include the modules header like that: - - #define REDISMODULE_EXPERIMENTAL_API - #include "redismodule.h" How blocking and resuming works. --- diff --git a/docs/reference/protocol-spec.md b/docs/reference/protocol-spec.md index 9ae6cc3129..c5ace66fd8 100644 --- a/docs/reference/protocol-spec.md +++ b/docs/reference/protocol-spec.md @@ -1,214 +1,274 @@ --- -title: "RESP protocol spec" +title: "Redis serialization protocol specification" linkTitle: "Protocol spec" -weight: 1 -description: Redis serialization protocol (RESP) specification +weight: 4 +description: Redis serialization protocol (RESP) is the wire protocol that clients implement aliases: - /topics/protocol --- -Redis clients use a protocol called **RESP** (REdis Serialization Protocol) to communicate with the Redis server. While the protocol was designed specifically for Redis, it can be used for other client-server software projects. +To communicate with the Redis server, Redis clients use a protocol called REdis Serialization Protocol (RESP). +While the protocol was designed specifically for Redis, you can use it for other client-server software projects. -RESP is a compromise between the following things: +RESP is a compromise among the following considerations: * Simple to implement. * Fast to parse. * Human readable. -RESP can serialize different data types like integers, strings, and arrays. There is also a specific type for errors. Requests are sent from the client to the Redis server as arrays of strings that represent the arguments of the command to execute. Redis replies with a command-specific data type. +RESP can serialize different data types including integers, strings, and arrays. +It also features an error-specific type. +A client sends a request to the Redis server as an array of strings. +The array's contents are the command and its arguments that the server should execute. +The server's reply type is command-specific. -RESP is binary-safe and does not require processing of bulk data transferred from one process to another because it uses prefixed-length to transfer bulk data. +RESP is binary-safe and uses prefixed length to transfer bulk data so it does not require processing bulk data transferred from one process to another. -Note: the protocol outlined here is only used for client-server communication. Redis Cluster uses a different binary protocol in order to exchange messages between nodes. +RESP is the protocol you should implement in your Redis client. -## Network layer +{{% alert title="Note" color="info" %}} +The protocol outlined here is used only for client-server communication. +[Redis Cluster](/docs/reference/cluster-spec) uses a different binary protocol for exchanging messages between nodes. +{{% /alert %}} -A client connects to a Redis server by creating a TCP connection to the port 6379. +## RESP versions +Support for the first version of the RESP protocol was introduced in Redis 1.2. +Using RESP with Redis 1.2 was optional and had mainly served the purpose of working the kinks out of the protocol. -While RESP is technically non-TCP specific, the protocol is only used with TCP connections (or equivalent stream-oriented connections like Unix sockets) in the context of Redis. +In Redis 2.0, the protocol's next version, a.k.a RESP2, became the standard communication method for clients with the Redis server. -## Request-Response model +[RESP3](https://github.com/redis/redis-specifications/blob/master/protocol/RESP3.md) is a superset of RESP2 that mainly aims to make a client author's life a little bit easier. +Redis 6.0 introduced experimental opt-in support of RESP3's features (excluding streaming strings and streaming aggregates). +In addition, the introduction of the `HELLO` command allows clients to handshake and upgrade the connection's protocol version (see [Client handshake](#client-handshake)). -Redis accepts commands composed of different arguments. -Once a command is received, it is processed and a reply is sent back to the client. +Up to and including Redis 7, both RESP2 and RESP3 clients can invoke all core commands. +However, commands may return differently typed replies for different protocol versions. -This is the simplest model possible; however, there are two exceptions: +Future versions of Redis may change the default protocol version, but it is unlikely that RESP2 will become entirely deprecated. +It is possible, however, that new features in upcoming versions will require the use of RESP3. + +## Network layer +A client connects to a Redis server by creating a TCP connection to its port (the default is 6379). -* Redis supports pipelining (covered later in this document). So it is possible for clients to send multiple commands at once and wait for replies later. -* When a Redis client subscribes to a Pub/Sub channel, the protocol changes semantics and becomes a *push* protocol. The client no longer requires sending commands because the server will automatically send new messages to the client (for the channels the client is subscribed to) as soon as they are received. +While RESP is technically non-TCP specific, the protocol is used exclusively with TCP connections (or equivalent stream-oriented connections like Unix sockets) in the context of Redis. -Excluding these two exceptions, the Redis protocol is a simple request-response protocol. +## Request-Response model +The Redis server accepts commands composed of different arguments. +Then, the server processes the command and sends the reply back to the client. + +This is the simplest model possible; however, there are some exceptions: + +* Redis requests can be [pipelined](#multiple-commands-and-pipelining). + Pipelining enables clients to send multiple commands at once and wait for replies later. +* When a RESP2 connection subscribes to a [Pub/Sub](/docs/manual/pubsub) channel, the protocol changes semantics and becomes a *push* protocol. + The client no longer requires sending commands because the server will automatically send new messages to the client (for the channels the client is subscribed to) as soon as they are received. +* The `MONITOR` command. + Invoking the `MONITOR` command switches the connection to an ad-hoc push mode. + The protocol of this mode is not specified but is obvious to parse. +* [Protected mode](/docs/management/security/#protected-mode). + Connections opened from a non-loopback address to a Redis while in protected mode are denied and terminated by the server. + Before terminating the connection, Redis unconditionally sends a `-DENIED` reply, regardless of whether the client writes to the socket. +* The [RESP3 Push type](#resp3-pushes). + As the name suggests, a push type allows the server to send out-of-band data to the connection. + The server may push data at any time, and the data isn't necessarily related to specific commands executed by the client. + +Excluding these exceptions, the Redis protocol is a simple request-response protocol. ## RESP protocol description +RESP is essentially a serialization protocol that supports several data types. +In RESP, the first byte of data determines its type. -The RESP protocol was introduced in Redis 1.2, but it became the -standard way for talking with the Redis server in Redis 2.0. -This is the protocol you should implement in your Redis client. +Redis generally uses RESP as a [request-response](#request-response-model) protocol in the following way: -RESP is actually a serialization protocol that supports the following -data types: Simple Strings, Errors, Integers, Bulk Strings, and Arrays. +* Clients send commands to a Redis server as an [array](#arrays) of [bulk strings](#bulk-strings). + The first (and sometimes also the second) bulk string in the array is the command's name. + Subsequent elements of the array are the arguments for the command. +* The server replies with a RESP type. + The reply's type is determined by the command's implementation and possibly by the client's protocol version. -Redis uses RESP as a request-response protocol in the -following way: +RESP is a binary protocol that uses control sequences encoded in standard ASCII. +The `A` character, for example, is encoded with the binary byte of value 65. +Similarly, the characters CR (`\r`), LF (`\n`) and SP (` `) have binary byte values of 13, 10 and 32, respectively. -* Clients send commands to a Redis server as a RESP Array of Bulk Strings. -* The server replies with one of the RESP types according to the command implementation. +The `\r\n` (CRLF) is the protocol's _terminator_, which **always** separates its parts. -In RESP, the first byte determines the data type: +The first byte in an RESP-serialized payload always identifies its type. +Subsequent bytes constitute the type's contents. -* For **Simple Strings**, the first byte of the reply is "+" -* For **Errors**, the first byte of the reply is "-" -* For **Integers**, the first byte of the reply is ":" -* For **Bulk Strings**, the first byte of the reply is "$" -* For **Arrays**, the first byte of the reply is "`*`" +We categorize every RESP data type as either _simple_, _bulk_ or _aggregate_. -RESP can represent a Null value using a special variation of Bulk Strings or Array as specified later. +Simple types are similar to scalars in programming languages that represent plain literal values. Booleans and Integers are such examples. -In RESP, different parts of the protocol are always terminated with "\r\n" (CRLF). +RESP strings are either _simple_ or _bulk_. +Simple strings never contain carriage return (`\r`) or line feed (`\n`) characters. +Bulk strings can contain any binary data and may also be referred to as _binary_ or _blob_. +Note that bulk strings may be further encoded and decoded, e.g. with a wide multi-byte encoding, by the client. - +Aggregates, such as Arrays and Maps, can have varying numbers of sub-elements and nesting levels. -## RESP Simple Strings +The following table summarizes the RESP data types that Redis supports: -Simple Strings are encoded as follows: a plus character, followed by a string that cannot contain a CR or LF character (no newlines are allowed), and terminated by CRLF (that is "\r\n"). +| RESP data type | Minimal protocol version | Category | First byte | +| --- | --- | --- | --- | +| [Simple strings](#simple-strings) | RESP2 | Simple | `+` | +| [Simple Errors](#simple-errors) | RESP2 | Simple | `-` | +| [Integers](#integers) | RESP2 | Simple | `:` | +| [Bulk strings](#bulk-strings) | RESP2 | Aggregate | `$` | +| [Arrays](#arrays) | RESP2 | Aggregate | `*` | +| [Nulls](#nulls) | RESP3 | Simple | `_` | +| [Booleans](#booleans) | RESP3 | Simple | `#` | +| [Doubles](#doubles) | RESP3 | Simple | `,` | +| [Big numbers](#big-numbers) | RESP3 | Simple | `(` | +| [Bulk errors](#bulk-errors) | RESP3 | Aggregate | `!` | +| [Verbatim strings](#verbatim-strings) | RESP3 | Aggregate | `=` | +| [Maps](#maps) | RESP3 | Aggregate | `%` | +| [Sets](#sets) | RESP3 | Aggregate | `~` | +| [Pushes](#pushes) | RESP3 | Aggregate | `>` | -Simple Strings are used to transmit non binary-safe strings with minimal overhead. For example, many Redis commands reply with just "OK" on success. The RESP Simple String is encoded with the following 5 bytes: + - "+OK\r\n" +### Simple strings +Simple strings are encoded as a plus (`+`) character, followed by a string. +The string mustn't contain a CR (`\r`) or LF (`\n`) character and is terminated by CRLF (i.e., `\r\n`). -In order to send binary-safe strings, use RESP Bulk Strings instead. +Simple strings transmit short, non-binary strings with minimal overhead. +For example, many Redis commands reply with just "OK" on success. +The encoding of this Simple String is the following 5 bytes: -When Redis replies with a Simple String, a client library should respond with a string composed of the first character after the '+' -up to the end of the string, excluding the final CRLF bytes. + +OK\r\n - +When Redis replies with a simple string, a client library should return to the caller a string value composed of the first character after the `+` up to the end of the string, excluding the final CRLF bytes. -## RESP Errors +To send binary strings, use [bulk strings](#bulk-strings) instead. -RESP has a specific data type for errors. They are similar to -RESP Simple Strings, but the first character is a minus '-' character instead -of a plus. The real difference between Simple Strings and Errors in RESP is that clients treat errors -as exceptions, and the string that composes -the Error type is the error message itself. + + +### Simple errors +RESP has specific data types for errors. +Simple errors, or simply just errors, are similar to [simple strings](#simple-strings), but their first character is the minus (`-`) character. +The difference between simple strings and errors in RESP is that clients should treat errors as exceptions, whereas the string encoded in the error type is the error message itself. The basic format is: - "-Error message\r\n" + -Error message\r\n -Error replies are only sent when something goes wrong, for instance if -you try to perform an operation against the wrong data type, or if the command -does not exist. The client should raise an exception when it receives an Error reply. +Redis replies with an error only when something goes wrong, for example, when you try to operate against the wrong data type, or when the command does not exist. +The client should raise an exception when it receives an Error reply. The following are examples of error replies: - -ERR unknown command 'helloworld' + -ERR unknown command 'asdf' -WRONGTYPE Operation against a key holding the wrong kind of value -The first word after the "-", up to the first space or newline, represents -the kind of error returned. This is just a convention used by Redis and is not -part of the RESP Error format. +The first upper-case word after the `-`, up to the first space or newline, represents the kind of error returned. +This word is called an _error prefix_. +Note that the error prefix is a convention used by Redis rather than part of the RESP error type. -For example, `ERR` is the generic error, while `WRONGTYPE` is a more specific -error that implies that the client tried to perform an operation against the -wrong data type. This is called an **Error Prefix** and is a way to allow -the client to understand the kind of error returned by the server without checking the exact error message. +For example, in Redis, `ERR` is a generic error, whereas `WRONGTYPE` is a more specific error that implies that the client attempted an operation against the wrong data type. +The error prefix allows the client to understand the type of error returned by the server without checking the exact error message. -A client implementation may return different types of exceptions for different -errors or provide a generic way to trap errors by directly providing -the error name to the caller as a string. +A client implementation can return different types of exceptions for various errors, or provide a generic way for trapping errors by directly providing the error name to the caller as a string. -However, such a feature should not be considered vital as it is rarely useful, and a limited client implementation may simply return a generic error condition, such as `false`. +However, such a feature should not be considered vital as it is rarely useful. +Also, simpler client implementations can return a generic error value, such as `false`. -## RESP Integers +### Integers +This type is a CRLF-terminated string that represents a signed, base-10, 64-bit integer. -This type is just a CRLF-terminated string that represents an integer, -prefixed by a ":" byte. For example, ":0\r\n" and ":1000\r\n" are integer replies. +RESP encodes integers in the following way: -Many Redis commands return RESP Integers, like `INCR`, `LLEN`, and `LASTSAVE`. + :[<+|->]\r\n -There is no special meaning for the returned integer. It is just an -incremental number for `INCR`, a UNIX time for `LASTSAVE`, and so forth. However, -the returned integer is guaranteed to be in the range of a signed 64-bit integer. +* The colon (`:`) as the first byte. +* An optional plus (`+`) or minus (`-`) as the sign. +* One or more decimal digits (`0`..`9`) as the integer's unsigned, base-10 value. +* The CRLF terminator. -Integer replies are also used in order to return true or false. -For instance, commands like `EXISTS` or `SISMEMBER` will return 1 for true -and 0 for false. +For example, `:0\r\n` and `:1000\r\n` are integer replies (of zero and one thousand, respectively). -Other commands like `SADD`, `SREM`, and `SETNX` will return 1 if the operation -was actually performed and 0 otherwise. +Many Redis commands return RESP integers, including `INCR`, `LLEN`, and `LASTSAVE`. +An integer, by itself, has no special meaning other than in the context of the command that returned it. +For example, it is an incremental number for `INCR`, a UNIX timestamp for `LASTSAVE`, and so forth. +However, the returned integer is guaranteed to be in the range of a signed 64-bit integer. -The following commands will reply with an integer: `SETNX`, `DEL`, -`EXISTS`, `INCR`, `INCRBY`, `DECR`, `DECRBY`, `DBSIZE`, `LASTSAVE`, -`RENAMENX`, `MOVE`, `LLEN`, `SADD`, `SREM`, `SISMEMBER`, `SCARD`. +In some cases, integers can represent true and false Boolean values. +For instance, `SISMEMBER` returns 1 for true and 0 for false. + +Other commands, including `SADD`, `SREM`, and `SETNX`, return 1 when the data changes and 0 otherwise. - -## RESP Bulk Strings +### Bulk strings +A bulk string represents a single binary string. +The string can be of any size, but by default, Redis limits it to 512 MB (see the `proto-max-bulk-len` configuration directive). -Bulk Strings are used in order to represent a single binary-safe -string up to 512 MB in length. +RESP encodes bulk strings in the following way: -Bulk Strings are encoded in the following way: + $\r\n\r\n -* A "$" byte followed by the number of bytes composing the string (a prefixed length), terminated by CRLF. -* The actual string data. +* The dollar sign (`$`) as the first byte. +* One or more decimal digits (`0`..`9`) as the string's length, in bytes, as an unsigned, base-10 value. +* The CRLF terminator. +* The data. * A final CRLF. So the string "hello" is encoded as follows: - "$5\r\nhello\r\n" + $5\r\nhello\r\n -An empty string is encoded as: +The empty string's encoding is: - "$0\r\n\r\n" + $0\r\n\r\n -RESP Bulk Strings can also be used in order to signal non-existence of a value -using a special format to represent a Null value. In this -format, the length is -1, and there is no data. Null is represented as: + + +#### Null bulk strings +Whereas RESP3 has a dedicated data type for [null values](#nulls), RESP2 has no such type. +Instead, due to historical reasons, the representation of null values in RESP2 is via predetermined forms of the [bulk strings](#bulk-strings) and [arrays](#arrays) types. - "$-1\r\n" +The null bulk string represents a non-existing value. +The `GET` command returns the Null Bulk String when the target key doesn't exist. -This is called a **Null Bulk String**. +It is encoded as a bulk string with the length of negative one (-1), like so: + + $-1\r\n -The client library API should not return an empty string, but a nil object, -when the server replies with a Null Bulk String. -For example, a Ruby library should return 'nil' while a C library should -return NULL (or set a special flag in the reply object). +A Redis client should return a nil object when the server replies with a null bulk string rather than the empty string. +For example, a Ruby library should return `nil` while a C library should return `NULL` (or set a special flag in the reply object). -## RESP Arrays +### Arrays +Clients send commands to the Redis server as RESP arrays. +Similarly, some Redis commands that return collections of elements use arrays as their replies. +An example is the `LRANGE` command that returns elements of a list. -Clients send commands to the Redis server using RESP Arrays. Similarly, -certain Redis commands, that return collections of elements to the client, -use RESP Arrays as their replies. An example is the `LRANGE` command that -returns elements of a list. +RESP Arrays' encoding uses the following format: -RESP Arrays are sent using the following format: + *\r\n... -* A `*` character as the first byte, followed by the number of elements in the array as a decimal number, followed by CRLF. -* An additional RESP type for every element of the Array. +* An asterisk (`*`) as the first byte. +* One or more decimal digits (`0`..`9`) as the number of elements in the array as an unsigned, base-10 value. +* The CRLF terminator. +* An additional RESP type for every element of the array. So an empty Array is just the following: - "*0\r\n" + *0\r\n -While an array of two RESP Bulk Strings "hello" and "world" is encoded as: +Whereas the encoding of an array consisting of the two bulk strings "hello" and "world" is: - "*2\r\n$5\r\nhello\r\n$5\r\nworld\r\n" + *2\r\n$5\r\nhello\r\n$5\r\nworld\r\n -As you can see after the `*CRLF` part prefixing the array, the other -data types composing the array are just concatenated one after the other. +As you can see, after the `*CRLF` part prefixing the array, the other data types that compose the array are concatenated one after the other. For example, an Array of three integers is encoded as follows: - "*3\r\n:1\r\n:2\r\n:3\r\n" + *3\r\n:1\r\n:2\r\n:3\r\n -Arrays can contain mixed types, so it's not necessary for the -elements to be of the same type. For instance, a list of four -integers and a bulk string can be encoded as follows: +Arrays can contain mixed data types. +For instance, the following encoding is of a list of four integers and a bulk string: *5\r\n :1\r\n @@ -218,28 +278,14 @@ integers and a bulk string can be encoded as follows: $5\r\n hello\r\n -(The reply was split into multiple lines for clarity). +(The raw RESP encoding is split into multiple lines for readability). -The first line the server sent is `*5\r\n` in order to specify that five -replies will follow. Then every reply constituting the items of the -Multi Bulk reply are transmitted. +The first line the server sent is `*5\r\n`. +This numeric value tells the client that five reply types are about to follow it. +Then, every successive reply constitutes an element in the array. -Null Arrays exist as well and are an alternative way to -specify a Null value (usually the Null Bulk String is used, but for historical -reasons we have two formats). - -For instance, when the `BLPOP` command times out, it returns a Null Array -that has a count of `-1` as in the following example: - - "*-1\r\n" - -A client library API should return a null object and not an empty Array when -Redis replies with a Null Array. This is necessary to distinguish -between an empty list and a different condition (for instance the timeout -condition of the `BLPOP` command). - -Nested arrays are possible in RESP. For example a nested array of two arrays -is encoded as follows: +All of the aggregate RESP types support nesting. +For example, a nested array of two arrays is encoded as follows: *2\r\n *3\r\n @@ -250,16 +296,38 @@ is encoded as follows: +Hello\r\n -World\r\n -(The format was split into multiple lines to make it easier to read). +(The raw RESP encoding is split into multiple lines for readability). + +The above encodes a two-element array. +The first element is an array that, in turn, contains three integers (1, 2, 3). +The second element is another array containing a simple string and an error. + +{{% alert title="Multi bulk reply" color="info" %}} +In some places, the RESP Array type may be referred to as _multi bulk_. +The two are the same. +{{% /alert %}} + + + +#### Null arrays +Whereas RESP3 has a dedicated data type for [null values](#nulls), RESP2 has no such type. Instead, due to historical reasons, the representation of null values in RESP2 is via predetermined forms of the [Bulk Strings](#bulk-strings) and [arrays](#arrays) types. -The above RESP data type encodes a two-element Array consisting of an Array that contains three Integers (1, 2, 3) and an array of a Simple String and an Error. +Null arrays exist as an alternative way of representing a null value. +For instance, when the `BLPOP` command times out, it returns a null array. -## Null elements in Arrays +The encoding of a null array is that of an array with the length of -1, i.e.: -Single elements of an Array may be Null. This is used in Redis replies to signal that these elements are missing and not empty strings. This -can happen with the SORT command when used with the GET _pattern_ option -if the specified key is missing. Example of an Array reply containing a -Null element: + *-1\r\n + +When Redis replies with a null array, the client should return a null object rather than an empty array. +This is necessary to distinguish between an empty list and a different condition (for instance, the timeout condition of the `BLPOP` command). + +#### Null elements in arrays +Single elements of an array may be [null bulk string](#null-bulk-strings). +This is used in Redis replies to signal that these elements are missing and not empty strings. This can happen, for example, with the `SORT` command when used with the `GET pattern` option +if the specified key is missing. + +Here's an example of an array reply containing a null element: *3\r\n $5\r\n @@ -268,25 +336,288 @@ Null element: $5\r\n world\r\n -The second element is a Null. The client library should return something -like this: +Above, the second element is null. +The client library should return to its caller something like this: ["hello",nil,"world"] -Note that this is not an exception to what was said in the previous sections, but -an example to further specify the protocol. + + +### Nulls +The null data type represents non-existent values. + +Nulls' encoding is the underscore (`_`) character, followed by the CRLF terminator (`\r\n`). +Here's Null's raw RESP encoding: + + _\r\n + +{{% alert title="Null Bulk String, Null Arrays and Nulls" color="info" %}} +Due to historical reasons, RESP2 features two specially crafted values for representing null values of bulk strings and arrays. +This duality has always been a redundancy that added zero semantical value to the protocol itself. + +The null type, introduced in RESP3, aims to fix this wrong. +{{% /alert %}} + + + +### Booleans +RESP booleans are encoded as follows: + + #\r\n + +* The octothorpe character (`#`) as the first byte. +* A `t` character for true values, or an `f` character for false ones. +* The CRLF terminator. + + + +### Doubles +The Double RESP type encodes a double-precision floating point value. +Doubles are encoded as follows: + + ,[<+|->][.][[sign]]\r\n + +* The comma character (`,`) as the first byte. +* An optional plus (`+`) or minus (`-`) as the sign. +* One or more decimal digits (`0`..`9`) as an unsigned, base-10 integral value. +* An optional dot (`.`), followed by one or more decimal digits (`0`..`9`) as an unsigned, base-10 fractional value. +* An optional capital or lowercase letter E (`E` or `e`), followed by an optional plus (`+`) or minus (`-`) as the exponent's sign, ending with one or more decimal digits (`0`..`9`) as an unsigned, base-10 exponent value. +* The CRLF terminator. + +Here's the encoding of the number 1.23: + + ,1.23\r\n + +Because the fractional part is optional, the integer value of ten (10) can, therefore, be RESP-encoded both as an integer as well as a double: + + :10\r\n + ,10\r\n + +In such cases, the Redis client should return native integer and double values, respectively, providing that these types are supported by the language of its implementation. + +The positive infinity, negative infinity and NaN values are encoded as follows: + + ,inf\r\n + ,-inf\r\n + ,nan\r\n + + + +### Big numbers +This type can encode integer values outside the range of signed 64-bit integers. + +Big numbers use the following encoding: + + ([+|-]\r\n + +* The left parenthesis character (`(`) as the first byte. +* An optional plus (`+`) or minus (`-`) as the sign. +* One or more decimal digits (`0`..`9`) as an unsigned, base-10 value. +* The CRLF terminator. + +Example: + + (3492890328409238509324850943850943825024385\r\n + +Big numbers can be positive or negative but can't include fractionals. +Client libraries written in languages with a big number type should return a big number. +When big numbers aren't supported, the client should return a string and, when possible, signal to the caller that the reply is a big integer (depending on the API used by the client library). + + + +### Bulk errors +This type combines the purpose of [simple errors](#simple-errors) with the expressive power of [bulk strings](#bulk-strings). + +It is encoded as: + + !\r\n\r\n + +* An exclamation mark (`!`) as the first byte. +* One or more decimal digits (`0`..`9`) as the error's length, in bytes, as an unsigned, base-10 value. +* The CRLF terminator. +* The error itself. +* A final CRLF. + +As a convention, the error begins with an uppercase (space-delimited) word that conveys the error message. + +For instance, the error "SYNTAX invalid syntax" is represented by the following protocol encoding: -## Send commands to a Redis server + !21\r\n + SYNTAX invalid syntax\r\n -Now that you are familiar with the RESP serialization format, you can use it to help write a Redis client library. We can further specify -how the interaction between the client and the server works: +(The raw RESP encoding is split into multiple lines for readability). -* A client sends the Redis server a RESP Array consisting of only Bulk Strings. + + +### Verbatim strings +This type is similar to the [bulk string](#bulk-strings), with the addition of providing a hint about the data's encoding. + +A verbatim string's RESP encoding is as follows: + + =\r\n:\r\n + +* An equal sign (`=`) as the first byte. +* One or more decimal digits (`0`..`9`) as the string's total length, in bytes, as an unsigned, base-10 value. +* The CRLF terminator. +* Exactly three (3) bytes represent the data's encoding. +* The colon (`:`) character separates the encoding and data. +* The data. +* A final CRLF. + +Example: + + =15\r\n + txt:Some string\r\n + +(The raw RESP encoding is split into multiple lines for readability). + +Some client libraries may ignore the difference between this type and the string type and return a native string in both cases. +However, interactive clients, such as command line interfaces (e.g., [`redis-cli`](/docs/manual/cli)), can use this type and know that their output should be presented to the human user as is and without quoting the string. + +For example, the Redis command `INFO` outputs a report that includes newlines. +When using RESP3, `redis-cli` displays it correctly because it is sent as a Verbatim String reply (with its three bytes being "txt"). +When using RESP2, however, the `redis-cli` is hard-coded to look for the `INFO` command to ensure its correct display to the user. + + + +### Maps +The RESP map encodes a collection of key-value tuples, i.e., a dictionary or a hash. + +It is encoded as follows: + + %\r\n... + +* A percent character (`%`) as the first byte. +* One or more decimal digits (`0`..`9`) as the number of entries, or key-value tuples, in the map as an unsigned, base-10 value. +* The CRLF terminator. +* Two additional RESP types for every key and value in the map. + +For example, the following JSON object: + + { + "first": 1, + "second": 2 + } + +Can be encoded in RESP like so: + + %2\r\n + +first\r\n + :1\r\n + +second\r\n + :2\r\n + +(The raw RESP encoding is split into multiple lines for readability). + +Both map keys and values can be any of RESP's types. + +Redis clients should return the idiomatic dictionary type that their language provides. +However, low-level programming languages (such as C, for example) will likely return an array along with type information that indicates to the caller that it is a dictionary. + +{{% alert title="Map pattern in RESP2" color="info" %}} +RESP2 doesn't have a map type. +A map in RESP2 is represented by a flat array containing the keys and the values. +The first element is a key, followed by the corresponding value, then the next key and so on, like this: +`key1, value1, key2, value2, ...`. +{{% /alert %}} + + + +### Sets +Sets are somewhat like [Arrays](#arrays) but are unordered and should only contain unique elements. + +RESP set's encoding is: + + ~\r\n... + +* A tilde (`~`) as the first byte. +* One or more decimal digits (`0`..`9`) as the number of elements in the set as an unsigned, base-10 value. +* The CRLF terminator. +* An additional RESP type for every element of the Set. + +Clients should return the native set type if it is available in their programming language. +Alternatively, in the absence of a native set type, an array coupled with type information can be used (in C, for example). + + + +### Pushes +RESP's pushes contain out-of-band data. +They are an exception to the protocol's request-response model and provide a generic _push mode_ for connections. + +Push events are encoded similarly to [arrays](#arrays), differing only in their first byte: + + >\r\n... + +* A greater-than sign (`>`) as the first byte. +* One or more decimal digits (`0`..`9`) as the number of elements in the message as an unsigned, base-10 value. +* The CRLF terminator. +* An additional RESP type for every element of the push event. + +Pushed data may precede or follow any of RESP's data types but never inside them. +That means a client won't find push data in the middle of a map reply, for example. +It also means that pushed data may appear before or after a command's reply, as well as by itself (without calling any command). + +Clients should react to pushes by invoking a callback that implements their handling of the pushed data. + +## Client handshake +New RESP connections should begin the session by calling the `HELLO` command. +This practice accomplishes two things: + +1. It allows servers to be backward compatible with RESP2 versions. + This is needed in Redis to make the transition to version 3 of the protocol gentler. +2. The `HELLO` command returns information about the server and the protocol that the client can use for different goals. + +The `HELLO` command has the following high-level syntax: + + HELLO [optional-arguments] + +The first argument of the command is the protocol version we want the connection to be set. +By default, the connection starts in RESP2 mode. +If we specify a connection version that is too big and unsupported by the server, it should reply with a `-NOPROTO` error. Example: + + Client: HELLO 4 + Server: -NOPROTO sorry, this protocol version is not supported. + +At that point, the client may retry with a lower protocol version. + +Similarly, the client can easily detect a server that is only able to speak RESP2: + + Client: HELLO 3 + Server: -ERR unknown command 'HELLO' + +The client can then proceed and use RESP2 to communicate with the server. + +Note that even if the protocol's version is supported, the `HELLO` command may return an error, perform no action and remain in RESP2 mode. +For example, when used with invalid authentication credentials in the command's optional `!AUTH` clause: + + Client: HELLO 3 AUTH default mypassword + Server: -ERR invalid password + (the connection remains in RESP2 mode) + +A successful reply to the `HELLO` command is a map reply. +The information in the reply is partly server-dependent, but certain fields are mandatory for all the RESP3 implementations: +* **server**: "redis" (or other software name). +* **version**: the server's version. +* **proto**: the highest supported version of the RESP protocol. + +In Redis' RESP3 implementation, the following fields are also emitted: + +* **id**: the connection's identifier (ID). +* **mode**: "standalone", "sentinel" or "cluster". +* **role**: "master" or "replica". +* **modules**: list of loaded modules as an Array of Bulk Strings. + +## Sending commands to a Redis server +Now that you are familiar with the RESP serialization format, you can use it to help write a Redis client library. +We can further specify how the interaction between the client and the server works: + +* A client sends the Redis server an [array](#arrays) consisting of only bulk strings. * A Redis server replies to clients, sending any valid RESP data type as a reply. -So for example a typical interaction could be the following. +So, for example, a typical interaction could be the following. -The client sends the command **LLEN mylist** in order to get the length of the list stored at key *mylist*. Then the server replies with an Integer reply as in the following example (C: is the client, S: the server). +The client sends the command `LLEN mylist` to get the length of the list stored at the key _mylist_. +Then the server replies with an [integer](#integers) reply as in the following example (`C:` is the client, `S:` the server). C: *2\r\n C: $4\r\n @@ -299,51 +630,44 @@ The client sends the command **LLEN mylist** in order to get the length of the l As usual, we separate different parts of the protocol with newlines for simplicity, but the actual interaction is the client sending `*2\r\n$4\r\nLLEN\r\n$6\r\nmylist\r\n` as a whole. ## Multiple commands and pipelining - -A client can use the same connection in order to issue multiple commands. -Pipelining is supported so multiple commands can be sent with a single -write operation by the client, without the need to read the server reply -of the previous command before issuing the next one. +A client can use the same connection to issue multiple commands. +Pipelining is supported, so multiple commands can be sent with a single write operation by the client. +The client can skip reading replies and continue to send the commands one after the other. All the replies can be read at the end. For more information, see [Pipelining](/topics/pipelining). ## Inline commands +Sometimes you may need to send a command to the Redis server but only have `telnet` available. +While the Redis protocol is simple to implement, it is not ideal for interactive sessions, and `redis-cli` may not always be available. +For this reason, Redis also accepts commands in the _inline command_ format. -Sometimes you may need to send a command -to the Redis server but only have `telnet` available. While the Redis protocol is simple to implement, it is -not ideal to use in interactive sessions, and `redis-cli` may not always be -available. For this reason, Redis also accepts commands in the **inline command** format. - -The following is an example of a server/client chat using an inline command -(the server chat starts with S:, the client chat with C:) +The following example demonstrates a server/client exchange using an inline command (the server chat starts with `S:`, the client chat with `C:`): C: PING S: +PONG -The following is an example of an inline command that returns an integer: +Here's another example of an inline command where the server returns an integer: C: EXISTS somekey S: :0 -Basically, you write space-separated arguments in a telnet session. -Since no command starts with `*` that is instead used in the unified request -protocol, Redis is able to detect this condition and parse your command. +Basically, to issue an inline command, you write space-separated arguments in a telnet session. +Since no command starts with `*` (the identifying byte of RESP Arrays), Redis detects this condition and parses your command inline. -## High performance parser for the Redis protocol +## High-performance parser for the Redis protocol -While the Redis protocol is human readable and easy to implement, it can -be implemented with a performance similar to that of a binary protocol. +While the Redis protocol is human-readable and easy to implement, its implementation can exhibit performance similar to that of a binary protocol. -RESP uses prefixed lengths to transfer bulk data, so there is -never a need to scan the payload for special characters, like with JSON, nor to quote the payload that needs to be sent to the -server. +RESP uses prefixed lengths to transfer bulk data. +That makes scanning the payload for special characters unnecessary (unlike parsing JSON, for example). +For the same reason, quoting and escaping the payload isn't needed. -The Bulk and Multi Bulk lengths can be processed with code that performs -a single operation per character while at the same time scanning for the -CR character, like the following C code: +Reading the length of aggregate types (for example, bulk strings or arrays) can be processed with code that performs a single operation per character while at the same time scanning for the CR character. -``` +Example (in C): + +```c #include int main(void) { @@ -362,11 +686,17 @@ int main(void) { } ``` -After the first CR is identified, it can be skipped along with the following -LF without any processing. Then the bulk data can be read using a single -read operation that does not inspect the payload in any way. Finally, -the remaining CR and LF characters are discarded without any processing. +After the first CR is identified, it can be skipped along with the following LF without further processing. +Then, the bulk data can be read with a single read operation that doesn't inspect the payload in any way. +Finally, the remaining CR and LF characters are discarded without additional processing. + +While comparable in performance to a binary protocol, the Redis protocol is significantly more straightforward to implement in most high-level languages, reducing the number of bugs in client software. + +## Tips for Redis client authors + +* For testing purposes, use [Lua's type conversions](/topics/lua-api#lua-to-resp3-type-conversion) to have Redis reply with any RESP2/RESP3 needed. + As an example, a RESP3 double can be generated like so: + ``` + EVAL "return { double = tonumber(ARGV[1]) }" 0 1e0 + ``` -While comparable in performance to a binary protocol, the Redis protocol is -significantly simpler to implement in most high-level languages, -reducing the number of bugs in client software. diff --git a/docs/reference/signals.md b/docs/reference/signals.md index 4a63ce3c0b..1aae7d5116 100644 --- a/docs/reference/signals.md +++ b/docs/reference/signals.md @@ -1,7 +1,7 @@ --- title: "Redis signal handling" linkTitle: "Signal handling" -weight: 1 +weight: 8 description: How Redis handles common Unix signals aliases: - /topics/signals diff --git a/languages.json b/languages.json index d7a8b26be0..45bf6c212d 100644 --- a/languages.json +++ b/languages.json @@ -1,6 +1,7 @@ { "ActionScript": "actionscript", "ActiveX/COM+": "activex-com", + "Ballerina": "ballerina", "Bash": "bash", "Boomi": "boomi", "C": "c", @@ -12,11 +13,13 @@ "D": "d", "Dart": "dart", "Delphi": "delphi", + "Deno": "deno", "Elixir": "elixir", "emacs lisp": "emacs-lisp", "Erlang": "erlang", "Fancy": "fancy", "gawk": "gawk", + "Gleam": "gleam", "GNU Prolog": "gnu-prolog", "Go": "go", "Haskell": "haskell", diff --git a/libraries/c/github.com/redis/librdb.json b/libraries/c/github.com/redis/librdb.json new file mode 100644 index 0000000000..23818714d8 --- /dev/null +++ b/libraries/c/github.com/redis/librdb.json @@ -0,0 +1,5 @@ +{ + "name": "librdb", + "description": "Redis RDB file parser, with JSON, RESP and RDB-loader extensions", + "recommended": true +} diff --git a/libraries/go/github.com/go-redsync/redsync.json b/libraries/go/github.com/go-redsync/redsync.json new file mode 100644 index 0000000000..f743258688 --- /dev/null +++ b/libraries/go/github.com/go-redsync/redsync.json @@ -0,0 +1,8 @@ +{ + "name": "Redsync", + "description": "Distributed mutual exclusion lock using Redis for Go; Redsync implements the Redlock algorithm.", + "homepage": "https://github.com/go-redsync/redsync", + "github": [ + "hjr265" + ] +} diff --git a/libraries/labview/decibel.ni.com/content/docs/DOC-36322.json b/libraries/labview/decibel.ni.com/content/docs/DOC-36322.json deleted file mode 100644 index 75c8802b74..0000000000 --- a/libraries/labview/decibel.ni.com/content/docs/DOC-36322.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "name": "redis-in-labview", - "description": "LabVIEW toolkit for Redis", - "twitter": [ - "iwac" - ] -} \ No newline at end of file diff --git a/libraries/php/github.com/RobiNN1/phpCacheAdmin.json b/libraries/php/github.com/RobiNN1/phpCacheAdmin.json new file mode 100644 index 0000000000..67ec634b1b --- /dev/null +++ b/libraries/php/github.com/RobiNN1/phpCacheAdmin.json @@ -0,0 +1,4 @@ +{ + "name": "phpCacheAdmin", + "description": "A web dashboard for your favorite caching system." +} diff --git a/libraries/python/github.com/lsbardel/python-stdnet.json b/libraries/python/github.com/lsbardel/python-stdnet.json deleted file mode 100644 index 6d8007d161..0000000000 --- a/libraries/python/github.com/lsbardel/python-stdnet.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "name": "Stdnet", - "description": "Redis data manager with advanced query and search API.", - "twitter": [ - "lsbardel" - ] -} \ No newline at end of file diff --git a/libraries/ruby/github.com/carlhoerberg/meerkat.json b/libraries/ruby/github.com/carlhoerberg/meerkat.json index f114d9d99d..d4bc7069ce 100644 --- a/libraries/ruby/github.com/carlhoerberg/meerkat.json +++ b/libraries/ruby/github.com/carlhoerberg/meerkat.json @@ -1,7 +1,6 @@ { "name": "Meerkat", "description": "Rack middleware for Server Sent Events with multiple backends.", - "homepage": "https://carlhoerberg.github.io/meerkat/", "twitter": [ "carlhoerberg" ] diff --git a/libraries/typescript/github.com/redis/redis-om-node.json b/libraries/typescript/github.com/redis/redis-om-node.json index 538cd8b23f..544fae76a0 100644 --- a/libraries/typescript/github.com/redis/redis-om-node.json +++ b/libraries/typescript/github.com/redis/redis-om-node.json @@ -1,5 +1,5 @@ { - "name": "Redis OM for Python", - "description": "Object mapping, and more, for Redis and Node.js.", + "name": "Redis OM for Node.js", + "description": "Object mapping, and more, for Redis and Node.js. Written in TypeScript.", "recommended": true -} \ No newline at end of file +} diff --git a/modules/community/github.com/Clement-Jean/RedisIMS.json b/modules/community/github.com/Clement-Jean/RedisIMS.json deleted file mode 100644 index 6725b620df..0000000000 --- a/modules/community/github.com/Clement-Jean/RedisIMS.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "name": "redisims", - "license": "MIT", - "description": "A lightweight Redis module following the If Modified Since (IMS) pattern for caching", - "github": [ - "Clement-Jean" - ] -} \ No newline at end of file diff --git a/modules/community/github.com/FalkorDB/FalkorDB.json b/modules/community/github.com/FalkorDB/FalkorDB.json new file mode 100644 index 0000000000..dac23f5ead --- /dev/null +++ b/modules/community/github.com/FalkorDB/FalkorDB.json @@ -0,0 +1,8 @@ +{ + "name": "FalkorDB", + "license": "SSPL", + "description": "A graph database with a Cypher-based querying language using sparse adjacency matrices", + "github": [ + "FalkorDB" + ] +} diff --git a/modules/community/github.com/RedisLabsModules/redis-state-machine.json b/modules/community/github.com/RedisLabsModules/redis-state-machine.json new file mode 100644 index 0000000000..d25220766d --- /dev/null +++ b/modules/community/github.com/RedisLabsModules/redis-state-machine.json @@ -0,0 +1,9 @@ +{ + "name": "RedisStateMachine", + "license": "Redis Source Available License", + "description": "A module for storing state machines, and transitioning them in Redis", + "github": [ + "chayim", + "RedisLabs" + ] +} diff --git a/modules/community/github.com/dahomey-technologies/RedisCBOR.json b/modules/community/github.com/dahomey-technologies/RedisCBOR.json new file mode 100644 index 0000000000..db5f34edd0 --- /dev/null +++ b/modules/community/github.com/dahomey-technologies/RedisCBOR.json @@ -0,0 +1,8 @@ +{ + "name": "RedisCBOR", + "license": "MIT", + "description": "A CBOR data type for Redis", + "github": [ + "dahomey-technologies" + ] +} diff --git a/modules/community/github.com/ekzhang/redis-rope.json b/modules/community/github.com/ekzhang/redis-rope.json new file mode 100644 index 0000000000..ef387bfc47 --- /dev/null +++ b/modules/community/github.com/ekzhang/redis-rope.json @@ -0,0 +1,8 @@ +{ + "name": "redis-rope", + "license": "MIT", + "description": "A native data type for manipulating large strings up to exponentially faster, based on splay trees.", + "github": [ + "ekzhang" + ] +} \ No newline at end of file diff --git a/modules/community/github.com/sewenew/redis-llm.json b/modules/community/github.com/sewenew/redis-llm.json new file mode 100644 index 0000000000..bf82312fd1 --- /dev/null +++ b/modules/community/github.com/sewenew/redis-llm.json @@ -0,0 +1,8 @@ +{ + "name": "redis-llm", + "license": "Apache-2.0", + "description": "Redis module integrating LLM (Large Language Model) with Redis", + "github": [ + "sewenew" + ] +} diff --git a/modules/community/github.com/t-fuchi/RedisMMap.json b/modules/community/github.com/t-fuchi/RedisMMap.json new file mode 100644 index 0000000000..91705768e7 --- /dev/null +++ b/modules/community/github.com/t-fuchi/RedisMMap.json @@ -0,0 +1,8 @@ +{ + "name": "RedisMMap", + "license": "MIT", + "description": "Redis module for accessing values on mmapped file.", + "github": [ + "t-fuchi" + ] +} \ No newline at end of file diff --git a/modules/community/github.com/tzongw/redis-timer.json b/modules/community/github.com/tzongw/redis-timer.json new file mode 100644 index 0000000000..da65c77f17 --- /dev/null +++ b/modules/community/github.com/tzongw/redis-timer.json @@ -0,0 +1,8 @@ +{ + "name": "redis-timer", + "license": "MIT", + "description": "A module allows the delayed execution of LUA scripts, both periodic and one-time, supports replication and cluster.", + "github": [ + "tzongw" + ] +} diff --git a/resources/_index.md b/resources/_index.md new file mode 100644 index 0000000000..ba4bf01a2b --- /dev/null +++ b/resources/_index.md @@ -0,0 +1,8 @@ +--- +title: Resources +linkTitle: Resources +description: Open-source and source-available projects for connecting, using, managing, and extending Redis. +type: docs +--- + +The following sections are lists of open-source and source-available projects. diff --git a/resources/clients/index.md b/resources/clients/index.md new file mode 100644 index 0000000000..23ed93d657 --- /dev/null +++ b/resources/clients/index.md @@ -0,0 +1,12 @@ +--- +title: "Clients" +linkTitle: "Clients" +weight: 10 +description: Implementations of the Redis protocol in different programming languages. To get started with an official client, click on one of the quickstart guide links below. +layout: bazzar +bazzar: clients +aliases: + - /resources/clients + - /resources/clients/ +--- + diff --git a/resources/libraries/index.md b/resources/libraries/index.md new file mode 100644 index 0000000000..711891a988 --- /dev/null +++ b/resources/libraries/index.md @@ -0,0 +1,11 @@ +--- +title: "Libraries" +linkTitle: "Libraries" +weight: 11 +description: Libraries that use Redis and can be used by applications +layout: bazzar +bazzar: libraries +aliases: + - /docs/libraries/ +--- + diff --git a/docs/modules/index.md b/resources/modules/index.md similarity index 56% rename from docs/modules/index.md rename to resources/modules/index.md index 9911c06691..adebb01500 100644 --- a/docs/modules/index.md +++ b/resources/modules/index.md @@ -2,11 +2,12 @@ title: "Modules" linkTitle: "Modules" weight: 13 -description: List of Redis modules +description: Redis modules extend the server's functionality in various ways layout: bazzar bazzar: modules aliases: - /modules - /modules/ + - /docs/modules/ --- diff --git a/docs/tools/index.md b/resources/tools/index.md similarity index 50% rename from docs/tools/index.md rename to resources/tools/index.md index 8d8c31424c..edb4575bd9 100644 --- a/docs/tools/index.md +++ b/resources/tools/index.md @@ -2,8 +2,10 @@ title: "Tools" linkTitle: "Tools" weight: 12 -description: A list of tools for Redis +description: Tools for managing and deploying Redis layout: bazzar bazzar: tools +aliases: + - /docs/tools/ --- diff --git a/resp2_replies.json b/resp2_replies.json new file mode 100644 index 0000000000..33abc6c5cb --- /dev/null +++ b/resp2_replies.json @@ -0,0 +1,1320 @@ +{ + "ACL": [], + "ACL CAT": [ + "One of the following:", + "* [Array reply](/docs/reference/protocol-spec#arrays): an array of [Bulk string reply](/docs/reference/protocol-spec#bulk-strings) elements representing ACL categories or commands in a given category.", + "* [Simple error reply](/docs/reference/protocol-spec#simple-errors): the command returns an error if an invalid category name is given." + ], + "ACL DELUSER": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the number of users that were deleted. This number will not always match the number of arguments since certain users may not exist." + ], + "ACL DRYRUN": [ + "Any of the following:", + "* [Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK` on success.", + "* [Bulk string reply](/docs/reference/protocol-spec#bulk-strings): an error describing why the user can't execute the command." + ], + "ACL GENPASS": [ + "[Bulk string reply](/docs/reference/protocol-spec#bulk-strings): pseudorandom data. By default it contains 64 bytes, representing 256 bits of data. If `bits` was given, the output string length is the number of specified bits (rounded to the next multiple of 4) divided by 4." + ], + "ACL GETUSER": [ + "One of the following:", + "* [Array reply](/docs/reference/protocol-spec#arrays): a list of ACL rule definitions for the user.", + "* [Nil reply](/docs/reference/protocol-spec#bulk-strings): if user does not exist." + ], + "ACL HELP": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list of subcommands and their descriptions." + ], + "ACL LIST": [ + "[Array reply](/docs/reference/protocol-spec#arrays): an array of [Bulk string reply](/docs/reference/protocol-spec#bulk-strings) elements." + ], + "ACL LOG": [ + "When called to show security events:", + "* [Array reply](/docs/reference/protocol-spec#arrays): an array of [Bulk string reply](/docs/reference/protocol-spec#bulk-strings) elements representing ACL security events.", + "When called with `RESET`:", + "* [Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK` if the security log was cleared." + ], + "ACL SAVE": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`.", + "The command may fail with an error for several reasons: if the file cannot be written or if the server is not configured to use an external ACL file." + ], + "ACL SETUSER": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`.", + "If the rules contain errors, the error is returned." + ], + "ACL USERS": [ + "[Array reply](/docs/reference/protocol-spec#arrays): list of existing ACL users." + ], + "ACL WHOAMI": [ + "[Bulk string reply](/docs/reference/protocol-spec#bulk-strings): the username of the current connection." + ], + "ACL-LOAD": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK` on success.", + "", + "The command may fail with an error for several reasons: if the file is not readable, if there is an error inside the file, and in such cases, the error will be reported to the user in the error.", + "Finally, the command will fail if the server is not configured to use an external ACL file." + ], + "APPEND": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the length of the string after the append operation." + ], + "ASKING": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "AUTH": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`, or an error if the password, or username/password pair, is invalid." + ], + "BGREWRITEAOF": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): a simple string reply indicating that the rewriting started or is about to start ASAP when the call is executed with success.", + "", + "The command may reply with an error in certain cases, as documented above." + ], + "BGSAVE": [ + "One of the following:", + "* [Simple string reply](/docs/reference/protocol-spec#simple-strings): `Background saving started`.", + "* [Simple string reply](/docs/reference/protocol-spec#simple-strings): `Background saving scheduled`." + ], + "BITCOUNT": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the number of bits set to 1." + ], + "BITFIELD": [ + "One of the following:", + "* [Array reply](/docs/reference/protocol-spec#arrays): each entry being the corresponding result of the sub-command given at the same position.", + "* [Nil reply](/docs/reference/protocol-spec#bulk-strings): if OVERFLOW FAIL was given and overflows or underflows are detected." + ], + "BITFIELD_RO": [ + "[Array reply](/docs/reference/protocol-spec#arrays): each entry being the corresponding result of the sub-command given at the same position." + ], + "BITOP": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the size of the string stored in the destination key is equal to the size of the longest input string." + ], + "BITPOS": [ + "One of the following:", + "* [Integer reply](/docs/reference/protocol-spec#integers): the position of the first bit set to 1 or 0 according to the request", + "* [Integer reply](/docs/reference/protocol-spec#integers): `-1`. In case the `bit` argument is 1 and the string is empty or composed of just zero bytes", + "", + "If we look for set bits (the bit argument is 1) and the string is empty or composed of just zero bytes, -1 is returned.", + "", + "If we look for clear bits (the bit argument is 0) and the string only contains bits set to 1, the function returns the first bit not part of the string on the right. So if the string is three bytes set to the value `0xff` the command `BITPOS key 0` will return 24, since up to bit 23 all the bits are 1.", + "", + "The function considers the right of the string as padded with zeros if you look for clear bits and specify no range or the _start_ argument **only**.", + "", + "However, this behavior changes if you are looking for clear bits and specify a range with both _start_ and _end_.", + "If a clear bit isn't found in the specified range, the function returns -1 as the user specified a clear range and there are no 0 bits in that range." + ], + "BLMOVE": [ + "One of the following:", + "* [Bulk string reply](/docs/reference/protocol-spec#bulk-strings): the element being popped from the _source_ and pushed to the _destination_.", + "* [Nil reply](/docs/reference/protocol-spec#bulk-strings): the operation timed-out" + ], + "BLMPOP": [ + "One of the following:", + "* [Nil reply](/docs/reference/protocol-spec#bulk-strings): when no element could be popped and the _timeout_ is reached.", + "* [Array reply](/docs/reference/protocol-spec#arrays): a two-element array with the first element being the name of the key from which elements were popped, and the second element being an array of the popped elements." + ], + "BLPOP": [ + "One of the following:", + "* [Nil reply](/docs/reference/protocol-spec#bulk-strings): no element could be popped and the timeout expired", + "* [Array reply](/docs/reference/protocol-spec#arrays): the key from which the element was popped and the value of the popped element." + ], + "BRPOP": [ + "One of the following:", + "* [Nil reply](/docs/reference/protocol-spec#bulk-strings): no element could be popped and the timeout expired.", + "* [Array reply](/docs/reference/protocol-spec#arrays): the key from which the element was popped and the value of the popped element" + ], + "BRPOPLPUSH": [ + "One of the following:", + "* [Bulk string reply](/docs/reference/protocol-spec#bulk-strings): the element being popped from _source_ and pushed to _destination_.", + "* [Nil reply](/docs/reference/protocol-spec#bulk-strings): the timeout is reached." + ], + "BZMPOP": [ + "One of the following:", + "* [Nil reply](/docs/reference/protocol-spec#bulk-strings): when no element could be popped.", + "* [Array reply](/docs/reference/protocol-spec#arrays): a two-element array with the first element being the name of the key from which elements were popped, and the second element is an array of the popped elements. Every entry in the elements array is also an array that contains the member and its score." + ], + "BZPOPMAX": [ + "One of the following:", + "* [Nil reply](/docs/reference/protocol-spec#bulk-strings): when no element could be popped and the _timeout_ expired.", + "* [Array reply](/docs/reference/protocol-spec#arrays): the keyname, popped member, and its score." + ], + "BZPOPMIN": [ + "One of the following:", + "* [Nil reply](/docs/reference/protocol-spec#bulk-strings): when no element could be popped and the _timeout_ expired.", + "* [Array reply](/docs/reference/protocol-spec#arrays): the keyname, popped member, and its score." + ], + "CLIENT": [], + "CLIENT CACHING": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK` or an error if the argument is not \"yes\" or \"no\"." + ], + "CLIENT GETNAME": [ + "One of the following:", + "* [Bulk string reply](/docs/reference/protocol-spec#bulk-strings): the connection name of the current connection.", + "* [Nil reply](/docs/reference/protocol-spec#bulk-strings): the connection name was not set." + ], + "CLIENT GETREDIR": [ + "One of the following:", + "* [Integer reply](/docs/reference/protocol-spec#integers): `0` when not redirecting notifications to any client.", + "* [Integer reply](/docs/reference/protocol-spec#integers): `-1` if client tracking is not enabled.", + "* [Integer reply](/docs/reference/protocol-spec#integers): the ID of the client to which notification are being redirected." + ], + "CLIENT HELP": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list of subcommands and their descriptions." + ], + "CLIENT ID": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the ID of the client." + ], + "CLIENT INFO": [ + "[Bulk string reply](/docs/reference/protocol-spec#bulk-strings): a unique string for the current client, as described at the `CLIENT LIST` page." + ], + "CLIENT KILL": [ + "One of the following:", + "* [Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK` when called in 3 argument format and the connection has been closed.", + "* [Integer reply](/docs/reference/protocol-spec#integers): when called in filter/value format, the number of clients killed." + ], + "CLIENT LIST": [ + "[Bulk string reply](/docs/reference/protocol-spec#bulk-strings): information and statistics about client connections." + ], + "CLIENT NO-EVICT": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "CLIENT NO-TOUCH": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "CLIENT PAUSE": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK` or an error if the timeout is invalid." + ], + "CLIENT REPLY": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK` when called with `ON`. When called with either `OFF` or `SKIP` sub-commands, no reply is made." + ], + "CLIENT SETINFO": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK` if the attribute name was successfully set." + ], + "CLIENT SETNAME": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK` if the connection name was successfully set." + ], + "CLIENT TRACKING": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK` if the connection was successfully put in tracking mode or if the tracking mode was successfully disabled. Otherwise, an error is returned." + ], + "CLIENT TRACKINGINFO": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list of tracking information sections and their respective values." + ], + "CLIENT UNBLOCK": [ + "One of the following:", + "* [Integer reply](/docs/reference/protocol-spec#integers): `0` if the client was unblocked successfully.", + "* [Integer reply](/docs/reference/protocol-spec#integers): `1` if the client wasn't unblocked." + ], + "CLIENT UNPAUSE": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "CLUSTER": [], + "CLUSTER ADDSLOTS": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK` if the command was successful. Otherwise an error is returned." + ], + "CLUSTER ADDSLOTSRANGE": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK` if the command was successful. Otherwise an error is returned." + ], + "CLUSTER BUMPEPOCH": [ + "One of the following:", + "* [Bulk string reply](/docs/reference/protocol-spec#bulk-strings): `BUMPED` if the epoch was incremented.", + "* [Bulk string reply](/docs/reference/protocol-spec#bulk-strings): `STILL` if the node already has the greatest configured epoch in the cluster." + ], + "CLUSTER COUNT-FAILURE-REPORTS": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the number of active failure reports for the node." + ], + "CLUSTER COUNTKEYSINSLOT": [ + "[Integer reply](/docs/reference/protocol-spec#integers): The number of keys in the specified hash slot, or an error if the hash slot is invalid." + ], + "CLUSTER DELSLOTS": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK` if the command was successful. Otherwise an error is returned." + ], + "CLUSTER DELSLOTSRANGE": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK` if the command was successful. Otherwise an error is returned." + ], + "CLUSTER FAILOVER": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK` if the command was accepted and a manual failover is going to be attempted. An error if the operation cannot be executed, for example if the client is connected to a node that is already a master." + ], + "CLUSTER FLUSHSLOTS": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`" + ], + "CLUSTER FORGET": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK` if the command was executed successfully. Otherwise an error is returned." + ], + "CLUSTER GETKEYSINSLOT": [ + "[Array reply](/docs/reference/protocol-spec#arrays): an array with up to count elements." + ], + "CLUSTER HELP": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list of subcommands and their descriptions." + ], + "CLUSTER INFO": [ + "[Bulk string reply](/docs/reference/protocol-spec#bulk-strings): A map between named fields and values in the form of `:` lines separated by newlines composed by the two bytes `CRLF`." + ], + "CLUSTER KEYSLOT": [ + "[Integer reply](/docs/reference/protocol-spec#integers): The hash slot number for the specified key" + ], + "CLUSTER LINKS": [ + "[Array reply](/docs/reference/protocol-spec#arrays): an array of maps where each map contains various attributes and their values of a cluster link." + ], + "CLUSTER MEET": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK` if the command was successful. If the address or port specified are invalid an error is returned." + ], + "CLUSTER MYID": [ + "[Bulk string reply](/docs/reference/protocol-spec#bulk-strings): the node ID." + ], + "CLUSTER MYSHARDID": [ + "[Bulk string reply](/docs/reference/protocol-spec#bulk-strings): the node's shard ID." + ], + "CLUSTER NODES": [ + "[Bulk string reply](/docs/reference/protocol-spec#bulk-strings): the serialized cluster configuration." + ], + "CLUSTER REPLICAS": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list of replica nodes replicating from the specified master node provided in the same format used by `CLUSTER NODES`." + ], + "CLUSTER REPLICATE": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK` if the command was successful. Otherwise an error is returned." + ], + "CLUSTER RESET": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK` if the command was successful. Otherwise an error is returned." + ], + "CLUSTER SAVECONFIG": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK` if the command was successful. Otherwise an error is returned." + ], + "CLUSTER SET-CONFIG-EPOCH": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK` if the command was successful. Otherwise an error is returned." + ], + "CLUSTER SETSLOT": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): all the sub-commands return `OK` if the command was successful. Otherwise an error is returned." + ], + "CLUSTER SHARDS": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a nested list of a map of hash ranges and shard nodes describing individual shards." + ], + "CLUSTER SLAVES": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list of replica nodes replicating from the specified master node provided in the same format used by `CLUSTER NODES`." + ], + "CLUSTER SLOTS": [ + "[Array reply](/docs/reference/protocol-spec#arrays): nested list of slot ranges with networking information." + ], + "COMMAND": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a nested list of command details. The order of the commands in the array is random." + ], + "COMMAND COUNT": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the number of commands returned by `COMMAND`." + ], + "COMMAND DOCS": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a map, as a flattened array, where each key is a command name, and each value is the documentary information." + ], + "COMMAND GETKEYS": [ + "[Array reply](/docs/reference/protocol-spec#arrays): list of keys from the given command." + ], + "COMMAND GETKEYSANDFLAGS": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list of keys from the given command and their usage flags." + ], + "COMMAND HELP": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list of sub-commands and their descriptions." + ], + "COMMAND INFO": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a nested list of command details." + ], + "COMMAND LIST": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list of command names." + ], + "CONFIG": [], + "CONFIG GET": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list of configuration parameters matching the provided arguments." + ], + "CONFIG HELP": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list of sub-commands and their descriptions." + ], + "CONFIG RESETSTAT": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "CONFIG REWRITE": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK` when the configuration was rewritten properly. Otherwise an error is returned." + ], + "CONFIG SET": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK` when the configuration was set properly. Otherwise an error is returned." + ], + "COPY": [ + "One of the following:", + "* [Integer reply](/docs/reference/protocol-spec#integers): `1` if _source_ was copied.", + "* [Integer reply](/docs/reference/protocol-spec#integers): `0` if _source_ was not copied." + ], + "DBSIZE": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the number of keys in the currently-selected database." + ], + "DEBUG": [], + "DECR": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the value of the key after decrementing it." + ], + "DECRBY": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the value of the key after decrementing it." + ], + "DEL": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the number of keys that were removed." + ], + "DISCARD": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "DUMP": [ + "One of the following:", + "* [Bulk string reply](/docs/reference/protocol-spec#bulk-strings): The serialized value of the key.", + "* [Nil reply](/docs/reference/protocol-spec#bulk-strings): the key does not exist." + ], + "ECHO": [ + "[Bulk string reply](/docs/reference/protocol-spec#bulk-strings): the given string." + ], + "EVAL": [ + "The return value depends on the script that was executed." + ], + "EVALSHA": [ + "The return value depends on the script that was executed." + ], + "EVALSHA_RO": [ + "The return value depends on the script that was executed." + ], + "EVAL_RO": [ + "The return value depends on the script that was executed." + ], + "EXEC": [ + "One of the following:", + "* [Array reply](/docs/reference/protocol-spec#arrays): each element being the reply to each of the commands in the atomic transaction.", + "* [Nil reply](/docs/reference/protocol-spec#bulk-strings): the transaction was aborted because a `WATCH`ed key was touched." + ], + "EXISTS": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the number of keys that exist from those specified as arguments." + ], + "EXPIRE": [ + "One of the following:", + "* [Integer reply](/docs/reference/protocol-spec#integers): `0` if the timeout was not set; for example, the key doesn't exist, or the operation was skipped because of the provided arguments.", + "* [Integer reply](/docs/reference/protocol-spec#integers): `1` if the timeout was set." + ], + "EXPIREAT": [ + "One of the following:", + "* [Integer reply](/docs/reference/protocol-spec#integers): `0` if the timeout was not set; for example, the key doesn't exist, or the operation was skipped because of the provided arguments.", + "* [Integer reply](/docs/reference/protocol-spec#integers): `1` if the timeout was set." + ], + "EXPIRETIME": [ + "One of the following:", + "* [Integer reply](/docs/reference/protocol-spec#integers): the expiration Unix timestamp in seconds.", + "* [Integer reply](/docs/reference/protocol-spec#integers): `-1` if the key exists but has no associated expiration time.", + "* [Integer reply](/docs/reference/protocol-spec#integers): `-2` if the key does not exist." + ], + "FAILOVER": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK` if the command was accepted and a coordinated failover is in progress. An error if the operation cannot be executed." + ], + "FCALL": [ + "The return value depends on the function that was executed." + ], + "FCALL_RO": [ + "The return value depends on the function that was executed." + ], + "FLUSHALL": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "FLUSHDB": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "FUNCTION": [], + "FUNCTION DELETE": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "FUNCTION DUMP": [ + "[Bulk string reply](/docs/reference/protocol-spec#bulk-strings): the serialized payload" + ], + "FUNCTION FLUSH": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "FUNCTION HELP": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list of sub-commands and their descriptions" + ], + "FUNCTION KILL": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "FUNCTION LIST": [ + "[Array reply](/docs/reference/protocol-spec#arrays): information about functions and libraries." + ], + "FUNCTION LOAD": [ + "[Bulk string reply](/docs/reference/protocol-spec#bulk-strings): the library name that was loaded." + ], + "FUNCTION RESTORE": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "FUNCTION STATS": [ + "[Array reply](/docs/reference/protocol-spec#arrays): information about the function that's currently running and information about the available execution engines." + ], + "GEOADD": [ + "[Integer reply](/docs/reference/protocol-spec#integers): When used without optional arguments, the number of elements added to the sorted set (excluding score updates). If the CH option is specified, the number of elements that were changed (added or updated)." + ], + "GEODIST": [ + "One of the following:", + "* [Nil reply](/docs/reference/protocol-spec#bulk-strings): one or both of the elements are missing.", + "* [Bulk string reply](/docs/reference/protocol-spec#bulk-strings): distance as a double (represented as a string) in the specified units." + ], + "GEOHASH": [ + "[Array reply](/docs/reference/protocol-spec#arrays): an array where each element is the Geohash corresponding to each member name passed as an argument to the command." + ], + "GEOPOS": [ + "[Array reply](/docs/reference/protocol-spec#arrays): An array where each element is a two elements array representing longitude and latitude (x,y) of each member name passed as argument to the command. Non-existing elements are reported as [Nil reply](/docs/reference/protocol-spec#bulk-strings) elements of the array." + ], + "GEORADIUS": [ + "One of the following:", + "* If no `WITH*` option is specified, an [Array reply](/docs/reference/protocol-spec#arrays) of matched member names", + "* If `WITHCOORD`, `WITHDIST`, or `WITHHASH` options are specified, the command returns an [Array reply](/docs/reference/protocol-spec#arrays) of arrays, where each sub-array represents a single item:", + " 1. The distance from the center as a floating point number, in the same unit specified in the radius.", + " 1. The Geohash integer.", + " 1. The coordinates as a two items x,y array (longitude,latitude).", + "", + "For example, the command `GEORADIUS Sicily 15 37 200 km WITHCOORD WITHDIST` will return each item in the following way:", + "", + "`[\"Palermo\",\"190.4424\",[\"13.361389338970184\",\"38.115556395496299\"]]`" + ], + "GEORADIUSBYMEMBER": [ + "One of the following:", + "* If no `WITH*` option is specified, an [Array reply](/docs/reference/protocol-spec#arrays) of matched member names", + "* If `WITHCOORD`, `WITHDIST`, or `WITHHASH` options are specified, the command returns an [Array reply](/docs/reference/protocol-spec#arrays) of arrays, where each sub-array represents a single item:", + " * The distance from the center as a floating point number, in the same unit specified in the radius.", + " * The Geohash integer.", + " * The coordinates as a two items x,y array (longitude,latitude)." + ], + "GEORADIUSBYMEMBER_RO": [ + "One of the following:", + "* If no `WITH*` option is specified, an [Array reply](/docs/reference/protocol-spec#arrays) of matched member names", + "* If `WITHCOORD`, `WITHDIST`, or `WITHHASH` options are specified, the command returns an [Array reply](/docs/reference/protocol-spec#arrays) of arrays, where each sub-array represents a single item:", + " * The distance from the center as a floating point number, in the same unit specified in the radius.", + " * The Geohash integer.", + " * The coordinates as a two items x,y array (longitude,latitude)." + ], + "GEORADIUS_RO": [ + "One of the following:", + "* If no `WITH*` option is specified, an [Array reply](/docs/reference/protocol-spec#arrays) of matched member names", + "* If `WITHCOORD`, `WITHDIST`, or `WITHHASH` options are specified, the command returns an [Array reply](/docs/reference/protocol-spec#arrays) of arrays, where each sub-array represents a single item:", + " * The distance from the center as a floating point number, in the same unit specified in the radius.", + " * The Geohash integer.", + " * The coordinates as a two items x,y array (longitude,latitude)." + ], + "GEOSEARCH": [ + "One of the following:", + "* If no `WITH*` option is specified, an [Array reply](/docs/reference/protocol-spec#arrays) of matched member names", + "* If `WITHCOORD`, `WITHDIST`, or `WITHHASH` options are specified, the command returns an [Array reply](/docs/reference/protocol-spec#arrays) of arrays, where each sub-array represents a single item:", + " * The distance from the center as a floating point number, in the same unit specified in the radius.", + " * The Geohash integer.", + " * The coordinates as a two items x,y array (longitude,latitude)." + ], + "GEOSEARCHSTORE": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the number of elements in the resulting set" + ], + "GET": [ + "One of the following:", + "* [Bulk string reply](/docs/reference/protocol-spec#bulk-strings): the value of the key.", + "* [Nil reply](/docs/reference/protocol-spec#bulk-strings): if the key does not exist." + ], + "GETBIT": [ + "The bit value stored at _offset_, one of the following:", + "* [Integer reply](/docs/reference/protocol-spec#integers): `0`.", + "* [Integer reply](/docs/reference/protocol-spec#integers): `1`." + ], + "GETDEL": [ + "One of the following:", + "* [Bulk string reply](/docs/reference/protocol-spec#bulk-strings): the value of the key.", + "* [Nil reply](/docs/reference/protocol-spec#bulk-strings): if the key does not exist or if the key's value type is not a string." + ], + "GETEX": [ + "[Bulk string reply](/docs/reference/protocol-spec#bulk-strings): the value of `key`", + "[Nil reply](/docs/reference/protocol-spec#bulk-strings): if `key` does not exist." + ], + "GETRANGE": [ + "[Bulk string reply](/docs/reference/protocol-spec#bulk-strings): The substring of the string value stored at key, determined by the offsets start and end (both are inclusive)." + ], + "GETSET": [ + "One of the following:", + "* [Bulk string reply](/docs/reference/protocol-spec#bulk-strings): the old value stored at the key.", + "* [Nil reply](/docs/reference/protocol-spec#bulk-strings): if the key does not exist." + ], + "HDEL": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the number of fields that were removed from the hash, excluding any specified but non-existing fields." + ], + "HELLO": [ + "[Map reply](/docs/reference/protocol-spec#maps): a list of server properties.", + "[Simple error reply](/docs/reference/protocol-spec#simple-errors): if the `protover` requested does not exist." + ], + "HEXISTS": [ + "One of the following:", + "* [Integer reply](/docs/reference/protocol-spec#integers): `0` if the hash does not contain the field, or the key does not exist.", + "* [Integer reply](/docs/reference/protocol-spec#integers): `1` if the hash contains the field." + ], + "HGET": [ + "One of the following:", + "* [Bulk string reply](/docs/reference/protocol-spec#bulk-strings): The value associated with the field.", + "* [Nil reply](/docs/reference/protocol-spec#bulk-strings): If the field is not present in the hash or key does not exist." + ], + "HGETALL": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list of fields and their values stored in the hash, or an empty list when key does not exist." + ], + "HINCRBY": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the value of the field after the increment operation." + ], + "HINCRBYFLOAT": [ + "[Bulk string reply](/docs/reference/protocol-spec#bulk-strings): the value of the field after the increment operation." + ], + "HKEYS": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list of fields in the hash, or an empty list when the key does not exist" + ], + "HLEN": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the number of fields in the hash, or 0 when the key does not exist." + ], + "HMGET": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list of values associated with the given fields, in the same order as they are requested." + ], + "HMSET": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "HRANDFIELD": [ + "Any of the following:", + "* [Nil reply](/docs/reference/protocol-spec#bulk-strings): if the key doesn't exist", + "* [Bulk string reply](/docs/reference/protocol-spec#bulk-strings): a single, randomly selected field when the `count` option is not used", + "* [Array reply](/docs/reference/protocol-spec#arrays): a list containing `count` fields when the `count` option is used, or an empty array if the key does not exists.", + "* [Array reply](/docs/reference/protocol-spec#arrays): a list of fields and their values when `count` and `WITHVALUES` were both used." + ], + "HSCAN": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a two-element array.", + "* The first element is a [Bulk string reply](/docs/reference/protocol-spec#bulk-strings) that represents an unsigned 64-bit number, the cursor.", + "* The second element is an [Array reply](/docs/reference/protocol-spec#arrays) of field/value pairs that were scanned." + ], + "HSET": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the number of fields that were added." + ], + "HSETNX": [ + "One of the following:", + "* [Integer reply](/docs/reference/protocol-spec#integers): `0` if the field already exists in the hash and no operation was performed.", + "* [Integer reply](/docs/reference/protocol-spec#integers): `1` if the field is a new field in the hash and the value was set." + ], + "HSTRLEN": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the string length of the value associated with the _field_, or zero when the _field_ isn't present in the hash or the _key_ doesn't exist at all." + ], + "HVALS": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list of values in the hash, or an empty list when the key does not exist" + ], + "INCR": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the value of the key after the increment." + ], + "INCRBY": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the value of the key after the increment." + ], + "INCRBYFLOAT": [ + "[Bulk string reply](/docs/reference/protocol-spec#bulk-strings): the value of the key after the increment." + ], + "INFO": [ + "[Bulk string reply](/docs/reference/protocol-spec#bulk-strings): a map of info fields, one field per line in the form of `:` where the value can be a comma separated map like `=`. Also contains section header lines starting with `#` and blank lines.", + "", + "Lines can contain a section name (starting with a `#` character) or a property. All the properties are in the form of `field:value` terminated by `\\r\\n`." + ], + "KEYS": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list of keys matching _pattern_." + ], + "LASTSAVE": [ + "[Integer reply](/docs/reference/protocol-spec#integers): UNIX TIME of the last DB save executed with success." + ], + "LATENCY": [], + "LATENCY DOCTOR": [ + "[Bulk string reply](/docs/reference/protocol-spec#bulk-strings): a human readable latency analysis report." + ], + "LATENCY GRAPH": [ + "[Bulk string reply](/docs/reference/protocol-spec#bulk-strings): Latency graph" + ], + "LATENCY HELP": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list of sub-commands and their descriptions." + ], + "LATENCY HISTOGRAM": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a map where each key is a command name, and each value is a map with the total calls, and an inner map of the histogram time buckets." + ], + "LATENCY HISTORY": [ + "[Array reply](/docs/reference/protocol-spec#arrays): an array where each element is a two elements array representing the timestamp and the latency of the event." + ], + "LATENCY LATEST": [ + "[Array reply](/docs/reference/protocol-spec#arrays): an array where each element is a four elements array representing the event's name, timestamp, latest and all-time latency measurements." + ], + "LATENCY RESET": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the number of event time series that were reset." + ], + "LCS": [ + "One of the following:", + "* [Bulk string reply](/docs/reference/protocol-spec#bulk-strings): the longest common subsequence.", + "* [Integer reply](/docs/reference/protocol-spec#integers): the length of the longest common subsequence when _LEN_ is given.", + "* [Array reply](/docs/reference/protocol-spec#arrays): an array with the LCS length and all the ranges in both the strings when _IDX_ is given." + ], + "LINDEX": [ + "One of the following:", + "* [Nil reply](/docs/reference/protocol-spec#bulk-strings): when _index_ is out of range.", + "* [Bulk string reply](/docs/reference/protocol-spec#bulk-strings): the requested element." + ], + "LINSERT": [ + "One of the following:", + "* [Integer reply](/docs/reference/protocol-spec#integers): the list length after a successful insert operation.", + "* [Integer reply](/docs/reference/protocol-spec#integers): `0` when the key doesn't exist.", + "* [Integer reply](/docs/reference/protocol-spec#integers): `-1` when the pivot wasn't found." + ], + "LLEN": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the length of the list." + ], + "LMOVE": [ + "[Bulk string reply](/docs/reference/protocol-spec#bulk-strings): the element being popped and pushed." + ], + "LMPOP": [ + "One of the following:", + "* [Nil reply](/docs/reference/protocol-spec#bulk-strings): if no element could be popped.", + "* [Array reply](/docs/reference/protocol-spec#arrays): a two-element array with the first element being the name of the key from which elements were popped and the second element being an array of elements." + ], + "LOLWUT": [ + "[Bulk string reply](/docs/reference/protocol-spec#bulk-strings): a string containing generative computer art and the Redis version." + ], + "LPOP": [ + "One of the following:", + "* [Nil reply](/docs/reference/protocol-spec#bulk-strings): if the key does not exist.", + "* [Bulk string reply](/docs/reference/protocol-spec#bulk-strings): when called without the _count_ argument, the value of the first element.", + "* [Array reply](/docs/reference/protocol-spec#arrays): when called with the _count_ argument, a list of popped elements." + ], + "LPOS": [ + "Any of the following:", + "* [Nil reply](/docs/reference/protocol-spec#bulk-strings): if there is no matching element.", + "* [Integer reply](/docs/reference/protocol-spec#integers): an integer representing the matching element.", + "* [Array reply](/docs/reference/protocol-spec#arrays): If the COUNT option is given, an array of integers representing the matching elements (or an empty array if there are no matches)." + ], + "LPUSH": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the length of the list after the push operation." + ], + "LPUSHX": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the length of the list after the push operation." + ], + "LRANGE": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list of elements in the specified range, or an empty array if the key doesn't exist." + ], + "LREM": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the number of removed elements." + ], + "LSET": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "LTRIM": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "MEMORY": [], + "MEMORY DOCTOR": [ + "[Bulk string reply](/docs/reference/protocol-spec#bulk-strings): a memory problems report." + ], + "MEMORY HELP": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list of sub-commands and their descriptions" + ], + "MEMORY MALLOC-STATS": [ + "[Bulk string reply](/docs/reference/protocol-spec#bulk-strings): the memory allocator's internal statistics report" + ], + "MEMORY PURGE": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "MEMORY STATS": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a nested list of memory usage metrics and their values." + ], + "MEMORY USAGE": [ + "One of the following:", + "* [Integer reply](/docs/reference/protocol-spec#integers): the memory usage in bytes.", + "* [Nil reply](/docs/reference/protocol-spec#bulk-strings): if the key does not exist." + ], + "MGET": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list of values at the specified keys." + ], + "MIGRATE": [ + "One of the following:", + "* [Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK` on success.", + "* [Simple string reply](/docs/reference/protocol-spec#simple-strings): `NOKEY` when no keys were found in the source instance." + ], + "MODULE": [], + "MODULE HELP": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list of sub-commands and their descriptions." + ], + "MODULE LIST": [ + "[Array reply](/docs/reference/protocol-spec#arrays): list of loaded modules. Each element in the list represents a represents a module, and is in itself a list of property names and their values. The following properties is reported for each loaded module:", + "* name: the name of the module.", + "* ver: the version of the module." + ], + "MODULE LOAD": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK` if the module was loaded." + ], + "MODULE LOADEX": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK` if the module was loaded." + ], + "MODULE UNLOAD": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK` if the module was unloaded." + ], + "MONITOR": [ + "**Non-standard return value**. Dumps the received commands in an infinite flow." + ], + "MOVE": [ + "One of the following:", + "* [Integer reply](/docs/reference/protocol-spec#integers): `1` if _key_ was moved.", + "* [Integer reply](/docs/reference/protocol-spec#integers): `0` if _key_ wasn't moved." + ], + "MSET": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): always `OK` because `MSET` can't fail." + ], + "MSETNX": [ + "One of the following:", + "* [Integer reply](/docs/reference/protocol-spec#integers): `0` if no key was set (at least one key already existed).", + "* [Integer reply](/docs/reference/protocol-spec#integers): `1` if all the keys were set." + ], + "MULTI": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "OBJECT": [], + "OBJECT ENCODING": [ + "One of the following:", + "* [Nil reply](/docs/reference/protocol-spec#bulk-strings): if the key doesn't exist.", + "* [Bulk string reply](/docs/reference/protocol-spec#bulk-strings): the encoding of the object." + ], + "OBJECT FREQ": [ + "One of the following:", + "[Integer reply](/docs/reference/protocol-spec#integers): the counter's value.", + "[Nil reply](/docs/reference/protocol-spec#bulk-strings): if _key_ doesn't exist." + ], + "OBJECT HELP": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list of sub-commands and their descriptions" + ], + "OBJECT IDLETIME": [ + "One of the following:", + "[Integer reply](/docs/reference/protocol-spec#integers): the idle time in seconds.", + "[Nil reply](/docs/reference/protocol-spec#bulk-strings): if _key_ doesn't exist." + ], + "OBJECT REFCOUNT": [ + "One of the following:", + "[Integer reply](/docs/reference/protocol-spec#integers): the number of references.", + "[Nil reply](/docs/reference/protocol-spec#bulk-strings): if _key_ doesn't exist." + ], + "PERSIST": [ + "One of the following:", + "* [Integer reply](/docs/reference/protocol-spec#integers): `0` if _key_ does not exist or does not have an associated timeout.", + "* [Integer reply](/docs/reference/protocol-spec#integers): `1` if the timeout has been removed." + ], + "PEXPIRE": [ + "One of the following:", + "* [Integer reply](/docs/reference/protocol-spec#integers): `0`if the timeout was not set. For example, if the key doesn't exist, or the operation skipped because of the provided arguments.", + "* [Integer reply](/docs/reference/protocol-spec#integers): `1` if the timeout was set." + ], + "PEXPIREAT": [ + "One of the following:", + "* [Integer reply](/docs/reference/protocol-spec#integers): `1` if the timeout was set.", + "* [Integer reply](/docs/reference/protocol-spec#integers): `0` if the timeout was not set. For example, if the key doesn't exist, or the operation was skipped due to the provided arguments." + ], + "PEXPIRETIME": [ + "One of the following:", + "* [Integer reply](/docs/reference/protocol-spec#integers): Expiration Unix timestamp in milliseconds.", + "* [Integer reply](/docs/reference/protocol-spec#integers): `-1` if the key exists but has no associated expiration time.", + "* [Integer reply](/docs/reference/protocol-spec#integers): `-2` if the key does not exist." + ], + "PFADD": [ + "One of the following:", + "* [Integer reply](/docs/reference/protocol-spec#integers): `1` if at least one HyperLogLog internal register was altered.", + "* [Integer reply](/docs/reference/protocol-spec#integers): `0` if no HyperLogLog internal registers were altered." + ], + "PFCOUNT": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the approximated number of unique elements observed via `PFADD`." + ], + "PFDEBUG": [], + "PFMERGE": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "PFSELFTEST": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "PING": [ + "Any of the following:", + "* [Simple string reply](/docs/reference/protocol-spec#simple-strings): `PONG` when no argument is provided.", + "* [Bulk string reply](/docs/reference/protocol-spec#bulk-strings): the provided argument." + ], + "PSETEX": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "PSUBSCRIBE": [ + "When successful, this command doesn't return anything. Instead, for each pattern, one message with the first element being the string `psubscribe` is pushed as a confirmation that the command succeeded." + ], + "PSYNC": [ + "**Non-standard return value**, a bulk transfer of the data followed by `PING` and write requests from the master." + ], + "PTTL": [ + "One of the following:", + "* [Integer reply](/docs/reference/protocol-spec#integers): TTL in milliseconds.", + "* [Integer reply](/docs/reference/protocol-spec#integers): `-1` if the key exists but has no associated expiration.", + "* [Integer reply](/docs/reference/protocol-spec#integers): `-2` if the key does not exist." + ], + "PUBLISH": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the number of clients that received the message. Note that in a Redis Cluster, only clients that are connected to the same node as the publishing client are included in the count." + ], + "PUBSUB": [], + "PUBSUB CHANNELS": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list of active channels, optionally matching the specified pattern." + ], + "PUBSUB HELP": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list of sub-commands and their descriptions." + ], + "PUBSUB NUMPAT": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the number of patterns all the clients are subscribed to." + ], + "PUBSUB NUMSUB": [ + "[Array reply](/docs/reference/protocol-spec#arrays): the number of subscribers per channel, each even element (including the 0th) is channel name, each odd element is the number of subscribers" + ], + "PUBSUB SHARDCHANNELS": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list of active channels, optionally matching the specified pattern." + ], + "PUBSUB SHARDNUMSUB": [ + "[Array reply](/docs/reference/protocol-spec#arrays): the number of subscribers per shard channel, each even element (including the 0th) is channel name, each odd element is the number of subscribers." + ], + "PUNSUBSCRIBE": [ + "When successful, this command doesn't return anything. Instead, for each pattern, one message with the first element being the string `punsubscribe` is pushed as a confirmation that the command succeeded." + ], + "QUIT": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): OK." + ], + "RANDOMKEY": [ + "One of the following:", + "* [Nil reply](/docs/reference/protocol-spec#bulk-strings): when the database is empty.", + "* [Bulk string reply](/docs/reference/protocol-spec#bulk-strings): a random key in database." + ], + "READONLY": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "READWRITE": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "RENAME": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "RENAMENX": [ + "One of the following:", + "* [Integer reply](/docs/reference/protocol-spec#integers): `1` if _key_ was renamed to _newkey_.", + "* [Integer reply](/docs/reference/protocol-spec#integers): `0` if _newkey_ already exists." + ], + "REPLCONF": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "REPLICAOF": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "RESET": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `RESET`." + ], + "RESTORE": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "RESTORE-ASKING": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "ROLE": [ + "[Array reply](/docs/reference/protocol-spec#arrays): where the first element is one of `master`, `slave`, or `sentinel`, and the additional elements are role-specific as illustrated above." + ], + "RPOP": [ + "One of the following:", + "* [Nil reply](/docs/reference/protocol-spec#bulk-strings): if the key does not exist.", + "* [Bulk string reply](/docs/reference/protocol-spec#bulk-strings): when called without the _count_ argument, the value of the last element.", + "* [Array reply](/docs/reference/protocol-spec#arrays): when called with the _count_ argument, a list of popped elements." + ], + "RPOPLPUSH": [ + "One of the following:", + "* [Bulk string reply](/docs/reference/protocol-spec#bulk-strings): the element being popped and pushed.", + "* [Nil reply](/docs/reference/protocol-spec#bulk-strings): if the source list is empty." + ], + "RPUSH": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the length of the list after the push operation." + ], + "RPUSHX": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the length of the list after the push operation." + ], + "SADD": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the number of elements that were added to the set, not including all the elements already present in the set." + ], + "SAVE": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "SCAN": [ + "[Array reply](/docs/reference/protocol-spec#arrays): specifically, an array with two elements.", + "* The first element is a [Bulk string reply](/docs/reference/protocol-spec#bulk-strings) that represents an unsigned 64-bit number, the cursor.", + "* The second element is an [Array reply](/docs/reference/protocol-spec#arrays) with the names of scanned keys." + ], + "SCARD": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the cardinality (number of elements) of the set, or `0` if the key does not exist." + ], + "SCRIPT": [], + "SCRIPT DEBUG": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "SCRIPT EXISTS": [ + "[Array reply](/docs/reference/protocol-spec#arrays): an array of integers that correspond to the specified SHA1 digest arguments." + ], + "SCRIPT FLUSH": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "SCRIPT HELP": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list of sub-commands and their descriptions." + ], + "SCRIPT KILL": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "SCRIPT LOAD": [ + "[Bulk string reply](/docs/reference/protocol-spec#bulk-strings): the SHA1 digest of the script added into the script cache." + ], + "SDIFF": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list with members of the resulting set." + ], + "SDIFFSTORE": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the number of elements in the resulting set." + ], + "SELECT": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "SET": [ + "Any of the following:", + "* [Nil reply](/docs/reference/protocol-spec#bulk-strings): `GET` not given: Operation was aborted (conflict with one of the `XX`/`NX` options).", + "* [Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`. `GET` not given: The key was set.", + "* [Nil reply](/docs/reference/protocol-spec#bulk-strings): `GET` given: The key didn't exist before the `SET`.", + "* [Bulk string reply](/docs/reference/protocol-spec#bulk-strings): `GET` given: The previous value of the key." + ], + "SETBIT": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the original bit value stored at _offset_." + ], + "SETEX": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "SETNX": [ + "One of the following:", + "* [Integer reply](/docs/reference/protocol-spec#integers): `0` if the key was not set.", + "* [Integer reply](/docs/reference/protocol-spec#integers): `1` if the key was set." + ], + "SETRANGE": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the length of the string after it was modified by the command." + ], + "SHUTDOWN": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK` if _ABORT_ was specified and shutdown was aborted. On successful shutdown, nothing is returned because the server quits and the connection is closed. On failure, an error is returned." + ], + "SINTER": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list with the members of the resulting set." + ], + "SINTERCARD": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the number of elements in the resulting intersection." + ], + "SINTERSTORE": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the number of elements in the resulting set." + ], + "SISMEMBER": [ + "One of the following:", + "* [Integer reply](/docs/reference/protocol-spec#integers): `0` if the element is not a member of the set, or when the key does not exist.", + "* [Integer reply](/docs/reference/protocol-spec#integers): `1` if the element is a member of the set." + ], + "SLAVEOF": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "SLOWLOG": [], + "SLOWLOG GET": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list of slow log entries per the above format." + ], + "SLOWLOG HELP": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list of sub-commands and their descriptions." + ], + "SLOWLOG LEN": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the number of entries in the slow log." + ], + "SLOWLOG RESET": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "SMEMBERS": [ + "[Array reply](/docs/reference/protocol-spec#arrays): all members of the set." + ], + "SMISMEMBER": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list representing the membership of the given elements, in the same order as they are requested." + ], + "SMOVE": [ + "One of the following:", + "* [Integer reply](/docs/reference/protocol-spec#integers): `1` if the element is moved.", + "* [Integer reply](/docs/reference/protocol-spec#integers): `0` if the element is not a member of _source_ and no operation was performed." + ], + "SORT": [ + "[Array reply](/docs/reference/protocol-spec#arrays): without passing the _STORE_ option, the command returns a list of sorted elements.", + "[Integer reply](/docs/reference/protocol-spec#integers): when the _STORE_ option is specified, the command returns the number of sorted elements in the destination list." + ], + "SORT_RO": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list of sorted elements." + ], + "SPOP": [ + "One of the following:", + "* [Nil reply](/docs/reference/protocol-spec#bulk-strings): if the key does not exist.", + "* [Bulk string reply](/docs/reference/protocol-spec#bulk-strings): when called without the _count_ argument, the removed member.", + "* [Array reply](/docs/reference/protocol-spec#arrays): when called with the _count_ argument, a list of the removed members." + ], + "SPUBLISH": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the number of clients that received the message. Note that in a Redis Cluster, only clients that are connected to the same node as the publishing client are included in the count" + ], + "SRANDMEMBER": [ + "One of the following:", + "* [Bulk string reply](/docs/reference/protocol-spec#bulk-strings): without the additional _count_ argument, the command returns a randomly selected member, or a [Nil reply](/docs/reference/protocol-spec#bulk-strings) when _key_ doesn't exist.", + "* [Array reply](/docs/reference/protocol-spec#arrays): when the optional _count_ argument is passed, the command returns an array of members, or an empty array when _key_ doesn't exist." + ], + "SREM": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the number of members that were removed from the set, not including non existing members." + ], + "SSCAN": [ + "[Array reply](/docs/reference/protocol-spec#arrays): specifically, an array with two elements:", + "* The first element is a [Bulk string reply](/docs/reference/protocol-spec#bulk-strings) that represents an unsigned 64-bit number, the cursor.", + "* The second element is an [Array reply](/docs/reference/protocol-spec#arrays) with the names of scanned members." + ], + "SSUBSCRIBE": [ + "When successful, this command doesn't return anything. Instead, for each shard channel, one message with the first element being the string `ssubscribe` is pushed as a confirmation that the command succeeded. Note that this command can also return a -MOVED redirect." + ], + "STRLEN": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the length of the string stored at key, or 0 when the key does not exist." + ], + "SUBSCRIBE": [ + "When successful, this command doesn't return anything. Instead, for each channel, one message with the first element being the string `subscribe` is pushed as a confirmation that the command succeeded." + ], + "SUBSTR": [ + "[Bulk string reply](/docs/reference/protocol-spec#bulk-strings): the substring of the string value stored at key, determined by the offsets start and end (both are inclusive)." + ], + "SUNION": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list with members of the resulting set." + ], + "SUNIONSTORE": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the number of elements in the resulting set." + ], + "SUNSUBSCRIBE": [ + "When successful, this command doesn't return anything. Instead, for each shard channel, one message with the first element being the string `sunsubscribe` is pushed as a confirmation that the command succeeded." + ], + "SWAPDB": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "SYNC": [ + "**Non-standard return value**, a bulk transfer of the data followed by `PING` and write requests from the master." + ], + "TIME": [ + "[Array reply](/docs/reference/protocol-spec#arrays): specifically, a two-element array consisting of the Unix timestamp in seconds and the microseconds' count." + ], + "TOUCH": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the number of touched keys." + ], + "TTL": [ + "One of the following:", + "* [Integer reply](/docs/reference/protocol-spec#integers): TTL in seconds.", + "* [Integer reply](/docs/reference/protocol-spec#integers): `-1` if the key exists but has no associated expiration.", + "* [Integer reply](/docs/reference/protocol-spec#integers): `-2` if the key does not exist." + ], + "TYPE": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): the type of _key_, or `none` when _key_ doesn't exist." + ], + "UNLINK": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the number of keys that were unlinked." + ], + "UNSUBSCRIBE": [ + "When successful, this command doesn't return anything. Instead, for each channel, one message with the first element being the string `unsubscribe` is pushed as a confirmation that the command succeeded." + ], + "UNWATCH": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "WAIT": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the command returns the number of replicas reached by all the writes performed in the context of the current connection." + ], + "WAITAOF": [ + "[Array reply](/docs/reference/protocol-spec#arrays): The command returns an array of two integers:", + "1. The first is the number of local Redises (0 or 1) that have fsynced to AOF all writes performed in the context of the current connection", + "2. The second is the number of replicas that have acknowledged doing the same." + ], + "WATCH": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "XACK": [ + "[Integer reply](/docs/reference/protocol-spec#integers): The command returns the number of messages successfully acknowledged. Certain message IDs may no longer be part of the PEL (for example because they have already been acknowledged), and XACK will not count them as successfully acknowledged." + ], + "XADD": [ + "One of the following:", + "* [Bulk string reply](/docs/reference/protocol-spec#bulk-strings): The ID of the added entry. The ID is the one automatically generated if an asterisk (`*`) is passed as the _id_ argument, otherwise the command just returns the same ID specified by the user during insertion.", + "* [Nil reply](/docs/reference/protocol-spec#bulk-strings): if the NOMKSTREAM option is given and the key doesn't exist." + ], + "XAUTOCLAIM": [ + "[Array reply](/docs/reference/protocol-spec#arrays), specifically, an array with three elements:", + "1. A stream ID to be used as the _start_ argument for the next call to XAUTOCLAIM.", + "2. An [Array reply](/docs/reference/protocol-spec#arrays) containing all the successfully claimed messages in the same format as `XRANGE`.", + "3. An [Array reply](/docs/reference/protocol-spec#arrays) containing message IDs that no longer exist in the stream, and were deleted from the PEL in which they were found." + ], + "XCLAIM": [ + "Any of the following:", + "* [Array reply](/docs/reference/protocol-spec#arrays): when the _JUSTID_ option is specified, an array of IDs of messages successfully claimed.", + "* [Array reply](/docs/reference/protocol-spec#arrays): an array of stream entries, each of which contains an array of two elements, the entry ID and the entry data itself." + ], + "XDEL": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the number of entries that were deleted." + ], + "XGROUP": [], + "XGROUP CREATE": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "XGROUP CREATECONSUMER": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the number of created consumers, either 0 or 1." + ], + "XGROUP DELCONSUMER": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the number of pending messages the consumer had before it was deleted." + ], + "XGROUP DESTROY": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the number of destroyed consumer groups, either 0 or 1." + ], + "XGROUP HELP": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list of sub-commands and their descriptions." + ], + "XGROUP SETID": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "XINFO": [], + "XINFO CONSUMERS": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list of consumers and their attributes." + ], + "XINFO GROUPS": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list of consumer groups." + ], + "XINFO HELP": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list of sub-commands and their descriptions." + ], + "XINFO STREAM": [ + "One of the following:", + "* [Array reply](/docs/reference/protocol-spec#arrays): when the _FULL_ argument is used, a list of information about a stream in summary form.", + "* [Array reply](/docs/reference/protocol-spec#arrays): when the _FULL_ argument is used, a list of information about a stream in extended form." + ], + "XLEN": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the number of entries of the stream at _key_." + ], + "XPENDING": [ + "* [Array reply](/docs/reference/protocol-spec#arrays): different data depending on the way XPENDING is called, as explained on this page." + ], + "XRANGE": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list of stream entries with IDs matching the specified range." + ], + "XREAD": [ + "One of the following:", + "* [Array reply](/docs/reference/protocol-spec#arrays): an array where each element is an array composed of a two elements containing the key name and the entries reported for that key. The entries reported are full stream entries, having IDs and the list of all the fields and values. Field and values are guaranteed to be reported in the same order they were added by `XADD`.", + "* [Nil reply](/docs/reference/protocol-spec#bulk-strings): if the _BLOCK_ option is given and a timeout occurs, or if there is no stream that can be served." + ], + "XREADGROUP": [ + "One of the following:", + "* [Array reply](/docs/reference/protocol-spec#arrays): an array where each element is an array composed of a two elements containing the key name and the entries reported for that key. The entries reported are full stream entries, having IDs and the list of all the fields and values. Field and values are guaranteed to be reported in the same order they were added by `XADD`.", + "* [Nil reply](/docs/reference/protocol-spec#bulk-strings): if the _BLOCK_ option is given and a timeout occurs, or if there is no stream that can be served." + ], + "XREVRANGE": [ + "[Array reply](/docs/reference/protocol-spec#arrays): The command returns the entries with IDs matching the specified range. The returned entries are complete, which means that the ID and all the fields they are composed of are returned. Moreover, the entries are returned with their fields and values in the same order as `XADD` added them." + ], + "XSETID": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "XTRIM": [ + "[Integer reply](/docs/reference/protocol-spec#integers): The number of entries deleted from the stream." + ], + "ZADD": [ + "Any of the following:", + "* [Nil reply](/docs/reference/protocol-spec#bulk-strings): if the operation was aborted because of a conflict with one of the _XX/NX/LT/GT_ options.", + "* [Integer reply](/docs/reference/protocol-spec#integers): the number of new members when the _CH_ option is not used.", + "* [Integer reply](/docs/reference/protocol-spec#integers): the number of new or updated members when the _CH_ option is used.", + "* [Bulk string reply](/docs/reference/protocol-spec#bulk-strings): the updated score of the member when the _INCR_ option is used." + ], + "ZCARD": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the cardinality (number of members) of the sorted set, or 0 if the key doesn't exist." + ], + "ZCOUNT": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the number of members in the specified score range." + ], + "ZDIFF": [ + "* [Array reply](/docs/reference/protocol-spec#arrays): the result of the difference including, optionally, scores when the _WITHSCORES_ option is used." + ], + "ZDIFFSTORE": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the number of members in the resulting sorted set at _destination_." + ], + "ZINCRBY": [ + "[Bulk string reply](/docs/reference/protocol-spec#bulk-strings): the new score of _member_ as a double precision floating point number." + ], + "ZINTER": [ + "* [Array reply](/docs/reference/protocol-spec#arrays): the result of the intersection including, optionally, scores when the _WITHSCORES_ option is used." + ], + "ZINTERCARD": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the number of members in the resulting intersection." + ], + "ZINTERSTORE": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the number of members in the resulting sorted set at the _destination_." + ], + "ZLEXCOUNT": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the number of members in the specified score range." + ], + "ZMPOP": [ + "One of the following:", + "* [Nil reply](/docs/reference/protocol-spec#bulk-strings): when no element could be popped.", + "* [Array reply](/docs/reference/protocol-spec#arrays): A two-element array with the first element being the name of the key from which elements were popped, and the second element is an array of the popped elements. Every entry in the elements array is also an array that contains the member and its score." + ], + "ZMSCORE": [ + "One of the following:", + "* [Nil reply](/docs/reference/protocol-spec#bulk-strings): if the member does not exist in the sorted set.", + "* [Array reply](/docs/reference/protocol-spec#arrays): a list of [Bulk string reply](/docs/reference/protocol-spec#bulk-strings) _member_ scores as double-precision floating point numbers." + ], + "ZPOPMAX": [ + "* [Array reply](/docs/reference/protocol-spec#arrays): a list of popped elements and scores." + ], + "ZPOPMIN": [ + "* [Array reply](/docs/reference/protocol-spec#arrays): a list of popped elements and scores." + ], + "ZRANDMEMBER": [ + "[Bulk string reply](/docs/reference/protocol-spec#bulk-strings): without the additional _count_ argument, the command returns a randomly selected member, or [Nil reply](/docs/reference/protocol-spec#bulk-strings) when _key_ doesn't exist.", + "[Array reply](/docs/reference/protocol-spec#arrays): when the additional _count_ argument is passed, the command returns an array of members, or an empty array when _key_ doesn't exist. If the _WITHSCORES_ modifier is used, the reply is a list of members and their scores from the sorted set." + ], + "ZRANGE": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list of members in the specified range with, optionally, their scores when the _WITHSCORES_ option is given." + ], + "ZRANGEBYLEX": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list of elements in the specified score range." + ], + "ZRANGEBYSCORE": [ + "* [Array reply](/docs/reference/protocol-spec#arrays): a list of the members with, optionally, their scores in the specified score range." + ], + "ZRANGESTORE": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the number of elements in the resulting sorted set." + ], + "ZRANK": [ + "One of the following:", + "* [Nil reply](/docs/reference/protocol-spec#bulk-strings): if the key does not exist or the member does not exist in the sorted set.", + "* [Integer reply](/docs/reference/protocol-spec#integers): the rank of the member when _WITHSCORE_ is not used.", + "* [Array reply](/docs/reference/protocol-spec#arrays): the rank and score of the member when _WITHSCORE_ is used." + ], + "ZREM": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the number of members removed from the sorted set, not including non-existing members." + ], + "ZREMRANGEBYLEX": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the number of members removed." + ], + "ZREMRANGEBYRANK": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the number of members removed." + ], + "ZREMRANGEBYSCORE": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the number of members removed." + ], + "ZREVRANGE": [ + "* [Array reply](/docs/reference/protocol-spec#arrays): a list of members in the specified range, optionally with their scores if _WITHSCORE_ was used." + ], + "ZREVRANGEBYLEX": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list of members in the specified score range." + ], + "ZREVRANGEBYSCORE": [ + "* [Array reply](/docs/reference/protocol-spec#arrays): a list of the members and, optionally, their scores in the specified score range." + ], + "ZREVRANK": [ + "One of the following:", + "* [Nil reply](/docs/reference/protocol-spec#bulk-strings): if the key does not exist or the member does not exist in the sorted set.", + "* [Integer reply](/docs/reference/protocol-spec#integers): The rank of the member when _WITHSCORE_ is not used.", + "* [Array reply](/docs/reference/protocol-spec#arrays): The rank and score of the member when _WITHSCORE_ is used." + ], + "ZSCAN": [ + "[Array reply](/docs/reference/protocol-spec#arrays): cursor and scan response in array form." + ], + "ZSCORE": [ + "One of the following:", + "* [Bulk string reply](/docs/reference/protocol-spec#bulk-strings): the score of the member (a double-precision floating point number), represented as a string.", + "* [Nil reply](/docs/reference/protocol-spec#bulk-strings): if _member_ does not exist in the sorted set, or the key does not exist." + ], + "ZUNION": [ + "[Array reply](/docs/reference/protocol-spec#arrays): the result of the union with, optionally, their scores when _WITHSCORES_ is used." + ], + "ZUNIONSTORE": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the number of elements in the resulting sorted set." + ] +} diff --git a/resp3_replies.json b/resp3_replies.json new file mode 100644 index 0000000000..fa7da38383 --- /dev/null +++ b/resp3_replies.json @@ -0,0 +1,1385 @@ +{ + "ACL": [], + "ACL CAT": [ + "One of the following:", + "* [Array reply](/docs/reference/protocol-spec#arrays): an array of [Bulk string reply](/docs/reference/protocol-spec#bulk-strings) elements representing ACL categories or commands in a given category.", + "* [Simple error reply](/docs/reference/protocol-spec#simple-errors): the command returns an error if an invalid category name is given." + ], + "ACL DELUSER": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the number of users that were deleted. This number will not always match the number of arguments since certain users may not exist." + ], + "ACL DRYRUN": [ + "Any of the following:", + "* [Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK` on success.", + "* [Bulk string reply](/docs/reference/protocol-spec#bulk-strings): an error describing why the user can't execute the command." + ], + "ACL GENPASS": [ + "[Bulk string reply](/docs/reference/protocol-spec#bulk-strings): pseudorandom data. By default it contains 64 bytes, representing 256 bits of data. If `bits` was given, the output string length is the number of specified bits (rounded to the next multiple of 4) divided by 4." + ], + "ACL GETUSER": [ + "One of the following:", + "* [Map reply](/docs/reference/protocol-spec#maps): a set of ACL rule definitions for the user", + "* [Null reply](/docs/reference/protocol-spec#nulls): if user does not exist." + ], + "ACL HELP": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list of subcommands and their descriptions." + ], + "ACL LIST": [ + "[Array reply](/docs/reference/protocol-spec#arrays): an array of [Bulk string reply](/docs/reference/protocol-spec#bulk-strings) elements." + ], + "ACL LOAD": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK` on success.", + "", + "The command may fail with an error for several reasons: if the file is not readable, if there is an error inside the file, and in such cases, the error will be reported to the user in the error.", + "Finally, the command will fail if the server is not configured to use an external ACL file." + ], + "ACL LOG": [ + "When called to show security events:", + "* [Array reply](/docs/reference/protocol-spec#arrays): an array of [Bulk string reply](/docs/reference/protocol-spec#bulk-strings) elements representing ACL security events.", + "When called with `RESET`:", + "* [Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK` if the security log was cleared." + ], + "ACL SAVE": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`.", + "The command may fail with an error for several reasons: if the file cannot be written or if the server is not configured to use an external ACL file." + ], + "ACL SETUSER": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`.", + "If the rules contain errors, the error is returned." + ], + "ACL USERS": [ + "[Array reply](/docs/reference/protocol-spec#arrays): list of existing ACL users." + ], + "ACL WHOAMI": [ + "[Bulk string reply](/docs/reference/protocol-spec#bulk-strings): the username of the current connection." + ], + "APPEND": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the length of the string after the append operation." + ], + "ASKING": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "AUTH": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`, or an error if the password, or username/password pair, is invalid." + ], + "BGREWRITEAOF": [ + "[Bulk string reply](/docs/reference/protocol-spec#bulk-strings): a simple string reply indicating that the rewriting started or is about to start ASAP when the call is executed with success.", + "", + "The command may reply with an error in certain cases, as documented above." + ], + "BGSAVE": [ + "One of the following:", + "* [Simple string reply](/docs/reference/protocol-spec#simple-strings): `Background saving started`.", + "* [Simple string reply](/docs/reference/protocol-spec#simple-strings): `Background saving scheduled`." + ], + "BITCOUNT": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the number of bits set to 1." + ], + "BITFIELD": [ + "One of the following:", + "* [Array reply](/docs/reference/protocol-spec#arrays): each entry being the corresponding result of the sub-command given at the same position.", + "* [Null reply](/docs/reference/protocol-spec#nulls): if OVERFLOW FAIL was given and overflows or underflows are detected." + ], + "BITFIELD_RO": [ + "[Array reply](/docs/reference/protocol-spec#arrays): each entry being the corresponding result of the sub-command given at the same position." + ], + "BITOP": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the size of the string stored in the destination key is equal to the size of the longest input string." + ], + "BITPOS": [ + "One of the following:", + "* [Integer reply](/docs/reference/protocol-spec#integers): the position of the first bit set to 1 or 0 according to the request", + "* [Integer reply](/docs/reference/protocol-spec#integers): `-1`. In case the `bit` argument is 1 and the string is empty or composed of just zero bytes", + "", + "If we look for set bits (the bit argument is 1) and the string is empty or composed of just zero bytes, -1 is returned.", + "", + "If we look for clear bits (the bit argument is 0) and the string only contains bits set to 1, the function returns the first bit not part of the string on the right. So if the string is three bytes set to the value `0xff` the command `BITPOS key 0` will return 24, since up to bit 23 all the bits are 1.", + "", + "The function considers the right of the string as padded with zeros if you look for clear bits and specify no range or the _start_ argument **only**.", + "", + "However, this behavior changes if you are looking for clear bits and specify a range with both _start_ and _end_.", + "If a clear bit isn't found in the specified range, the function returns -1 as the user specified a clear range and there are no 0 bits in that range." + ], + "BLMOVE": [ + "One of the following:", + "* [Bulk string reply](/docs/reference/protocol-spec#bulk-strings): the element being popped from the _source_ and pushed to the _destination_.", + "* [Null reply](/docs/reference/protocol-spec#nulls): the operation timed-out" + ], + "BLMPOP": [ + "One of the following:", + "* [Null reply](/docs/reference/protocol-spec#nulls): when no element could be popped and the _timeout_ is reached.", + "* [Array reply](/docs/reference/protocol-spec#arrays): a two-element array with the first element being the name of the key from which elements were popped, and the second element being an array of the popped elements." + ], + "BLPOP": [ + "One of the following:", + "* [Null reply](/docs/reference/protocol-spec#nulls): no element could be popped and the timeout expired", + "* [Array reply](/docs/reference/protocol-spec#arrays): the key from which the element was popped and the value of the popped element." + ], + "BRPOP": [ + "One of the following:", + "* [Null reply](/docs/reference/protocol-spec#nulls): no element could be popped and the timeout expired.", + "* [Array reply](/docs/reference/protocol-spec#arrays): the key from which the element was popped and the value of the popped element" + ], + "BRPOPLPUSH": [ + "One of the following:", + "* [Bulk string reply](/docs/reference/protocol-spec#bulk-strings): the element being popped from _source_ and pushed to _destination_.", + "* [Null reply](/docs/reference/protocol-spec#nulls): the timeout is reached." + ], + "BZMPOP": [ + "One of the following:", + "* [Null reply](/docs/reference/protocol-spec#nulls): when no element could be popped.", + "* [Array reply](/docs/reference/protocol-spec#arrays): a two-element array with the first element being the name of the key from which elements were popped, and the second element is an array of the popped elements. Every entry in the elements array is also an array that contains the member and its score." + ], + "BZPOPMAX": [ + "One of the following:", + "* [Null reply](/docs/reference/protocol-spec#nulls): when no element could be popped and the _timeout_ expired.", + "* [Array reply](/docs/reference/protocol-spec#arrays): the keyname, popped member, and its score." + ], + "BZPOPMIN": [ + "One of the following:", + "* [Null reply](/docs/reference/protocol-spec#nulls): when no element could be popped and the _timeout_ expired.", + "* [Array reply](/docs/reference/protocol-spec#arrays): the keyname, popped member, and its score." + ], + "CLIENT": [], + "CLIENT CACHING": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK` or an error if the argument is not \"yes\" or \"no\"." + ], + "CLIENT GETNAME": [ + "One of the following:", + "* [Bulk string reply](/docs/reference/protocol-spec#bulk-strings): the connection name of the current connection.", + "* [Null reply](/docs/reference/protocol-spec#nulls): the connection name was not set." + ], + "CLIENT GETREDIR": [ + "One of the following:", + "* [Integer reply](/docs/reference/protocol-spec#integers): `0` when not redirecting notifications to any client.", + "* [Integer reply](/docs/reference/protocol-spec#integers): `-1` if client tracking is not enabled.", + "* [Integer reply](/docs/reference/protocol-spec#integers): the ID of the client to which notification are being redirected." + ], + "CLIENT HELP": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list of subcommands and their descriptions." + ], + "CLIENT ID": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the ID of the client." + ], + "CLIENT INFO": [ + "[Bulk string reply](/docs/reference/protocol-spec#bulk-strings): a unique string for the current client, as described at the `CLIENT LIST` page." + ], + "CLIENT KILL": [ + "One of the following:", + "* [Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK` when called in 3 argument format and the connection has been closed.", + "* [Integer reply](/docs/reference/protocol-spec#integers): when called in filter/value format, the number of clients killed." + ], + "CLIENT LIST": [ + "[Bulk string reply](/docs/reference/protocol-spec#bulk-strings): information and statistics about client connections." + ], + "CLIENT NO-EVICT": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "CLIENT NO-TOUCH": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "CLIENT PAUSE": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK` or an error if the timeout is invalid." + ], + "CLIENT REPLY": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK` when called with `ON`. When called with either `OFF` or `SKIP` sub-commands, no reply is made." + ], + "CLIENT SETINFO": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK` if the attribute name was successfully set." + ], + "CLIENT SETNAME": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK` if the connection name was successfully set." + ], + "CLIENT TRACKING": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK` if the connection was successfully put in tracking mode or if the tracking mode was successfully disabled. Otherwise, an error is returned." + ], + "CLIENT TRACKINGINFO": [ + "[Map reply](/docs/reference/protocol-spec#maps): a list of tracking information sections and their respective values." + ], + "CLIENT UNBLOCK": [ + "One of the following:", + "* [Integer reply](/docs/reference/protocol-spec#integers): `0` if the client was unblocked successfully.", + "* [Integer reply](/docs/reference/protocol-spec#integers): `1` if the client wasn't unblocked." + ], + "CLIENT UNPAUSE": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "CLUSTER": [], + "CLUSTER ADDSLOTS": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK` if the command was successful. Otherwise an error is returned." + ], + "CLUSTER ADDSLOTSRANGE": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK` if the command was successful. Otherwise an error is returned." + ], + "CLUSTER BUMPEPOCH": [ + "One of the following:", + "* [Bulk string reply](/docs/reference/protocol-spec#bulk-strings): `BUMPED` if the epoch was incremented.", + "* [Bulk string reply](/docs/reference/protocol-spec#bulk-strings): `STILL` if the node already has the greatest configured epoch in the cluster." + ], + "CLUSTER COUNT-FAILURE-REPORTS": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the number of active failure reports for the node." + ], + "CLUSTER COUNTKEYSINSLOT": [ + "[Integer reply](/docs/reference/protocol-spec#integers): The number of keys in the specified hash slot, or an error if the hash slot is invalid." + ], + "CLUSTER DELSLOTS": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK` if the command was successful. Otherwise an error is returned." + ], + "CLUSTER DELSLOTSRANGE": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK` if the command was successful. Otherwise an error is returned." + ], + "CLUSTER FAILOVER": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK` if the command was accepted and a manual failover is going to be attempted. An error if the operation cannot be executed, for example if the client is connected to a node that is already a master." + ], + "CLUSTER FLUSHSLOTS": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "CLUSTER FORGET": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK` if the command was executed successfully. Otherwise an error is returned." + ], + "CLUSTER GETKEYSINSLOT": [ + "[Array reply](/docs/reference/protocol-spec#arrays): an array with up to count elements." + ], + "CLUSTER HELP": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list of subcommands and their descriptions." + ], + "CLUSTER INFO": [ + "[Bulk string reply](/docs/reference/protocol-spec#bulk-strings): A map between named fields and values in the form of : lines separated by newlines composed by the two bytes CRLF" + ], + "CLUSTER KEYSLOT": [ + "[Integer reply](/docs/reference/protocol-spec#integers): The hash slot number for the specified key" + ], + "CLUSTER LINKS": [ + "[Array reply](/docs/reference/protocol-spec#arrays): an array of [Map reply](/docs/reference/protocol-spec#maps) where each map contains various attributes and their values of a cluster link." + ], + "CLUSTER MEET": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK` if the command was successful. If the address or port specified are invalid an error is returned." + ], + "CLUSTER MYID": [ + "[Bulk string reply](/docs/reference/protocol-spec#bulk-strings): the node ID." + ], + "CLUSTER MYSHARDID": [ + "[Bulk string reply](/docs/reference/protocol-spec#bulk-strings): the node's shard ID." + ], + "CLUSTER NODES": [ + "[Bulk string reply](/docs/reference/protocol-spec#bulk-strings): the serialized cluster configuration." + ], + "CLUSTER REPLICAS": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list of replica nodes replicating from the specified master node provided in the same format used by `CLUSTER NODES`." + ], + "CLUSTER REPLICATE": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK` if the command was successful. Otherwise an error is returned." + ], + "CLUSTER RESET": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK` if the command was successful. Otherwise an error is returned." + ], + "CLUSTER SAVECONFIG": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK` if the command was successful. Otherwise an error is returned." + ], + "CLUSTER SET-CONFIG-EPOCH": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK` if the command was successful. Otherwise an error is returned." + ], + "CLUSTER SETSLOT": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): all the sub-commands return `OK` if the command was successful. Otherwise an error is returned." + ], + "CLUSTER SHARDS": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a nested list of [Map reply](/docs/reference/protocol-spec#maps) of hash ranges and shard nodes describing individual shards." + ], + "CLUSTER SLAVES": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list of replica nodes replicating from the specified master node provided in the same format used by `CLUSTER NODES`." + ], + "CLUSTER SLOTS": [ + "[Array reply](/docs/reference/protocol-spec#arrays): nested list of slot ranges with networking information." + ], + "COMMAND": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a nested list of command details. The order of the commands in the array is random." + ], + "COMMAND COUNT": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the number of commands returned by `COMMAND`." + ], + "COMMAND DOCS": [ + "[Map reply](/docs/reference/protocol-spec#maps): a map where each key is a command name, and each value is the documentary information." + ], + "COMMAND GETKEYS": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list of keys from the given command." + ], + "COMMAND GETKEYSANDFLAGS": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list of keys from the given command and their usage flags." + ], + "COMMAND HELP": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list of sub-commands and their descriptions." + ], + "COMMAND INFO": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a nested list of command details." + ], + "COMMAND LIST": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list of command names." + ], + "CONFIG": [], + "CONFIG GET": [ + "[Map reply](/docs/reference/protocol-spec#maps): a list of configuration parameters matching the provided arguments." + ], + "CONFIG HELP": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list of sub-commands and their descriptions." + ], + "CONFIG RESETSTAT": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "CONFIG REWRITE": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK` when the configuration was rewritten properly. Otherwise an error is returned." + ], + "CONFIG SET": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK` when the configuration was set properly. Otherwise an error is returned." + ], + "COPY": [ + "One of the following:", + "* [Integer reply](/docs/reference/protocol-spec#integers): `1` if _source_ was copied.", + "* [Integer reply](/docs/reference/protocol-spec#integers): `0` if _source_ was not copied." + ], + "DBSIZE": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the number of keys in the currently-selected database." + ], + "DEBUG": [], + "DECR": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the value of the key after decrementing it." + ], + "DECRBY": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the value of the key after decrementing it." + ], + "DEL": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the number of keys that were removed." + ], + "DISCARD": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "DUMP": [ + "One of the following:", + "* [Bulk string reply](/docs/reference/protocol-spec#bulk-strings): the serialized value of the key.", + "* [Null reply](/docs/reference/protocol-spec#nulls): the key does not exist." + ], + "ECHO": [ + "[Bulk string reply](/docs/reference/protocol-spec#bulk-strings): the given string." + ], + "EVAL": [ + "The return value depends on the script that was executed." + ], + "EVALSHA": [ + "The return value depends on the script that was executed." + ], + "EVALSHA_RO": [ + "The return value depends on the script that was executed." + ], + "EVAL_RO": [ + "The return value depends on the script that was executed." + ], + "EXEC": [ + "One of the following:", + "* [Array reply](/docs/reference/protocol-spec#arrays): each element being the reply to each of the commands in the atomic transaction.", + "* [Null reply](/docs/reference/protocol-spec#nulls): the transaction was aborted because a `WATCH`ed key was touched." + ], + "EXISTS": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the number of keys that exist from those specified as arguments." + ], + "EXPIRE": [ + "One of the following:", + "* [Integer reply](/docs/reference/protocol-spec#integers): `0` if the timeout was not set; for example, the key doesn't exist, or the operation was skipped because of the provided arguments.", + "* [Integer reply](/docs/reference/protocol-spec#integers): `1` if the timeout was set." + ], + "EXPIREAT": [ + "One of the following:", + "* [Integer reply](/docs/reference/protocol-spec#integers): `0` if the timeout was not set; for example, the key doesn't exist, or the operation was skipped because of the provided arguments.", + "* [Integer reply](/docs/reference/protocol-spec#integers): `1` if the timeout was set." + ], + "EXPIRETIME": [ + "One of the following:", + "* [Integer reply](/docs/reference/protocol-spec#integers): the expiration Unix timestamp in seconds.", + "* [Integer reply](/docs/reference/protocol-spec#integers): `-1` if the key exists but has no associated expiration time.", + "* [Integer reply](/docs/reference/protocol-spec#integers): `-2` if the key does not exist." + ], + "FAILOVER": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK` if the command was accepted and a coordinated failover is in progress. An error if the operation cannot be executed." + ], + "FCALL": [ + "The return value depends on the function that was executed." + ], + "FCALL_RO": [ + "The return value depends on the function that was executed." + ], + "FLUSHALL": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "FLUSHDB": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "FUNCTION": [], + "FUNCTION DELETE": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "FUNCTION DUMP": [ + "[Bulk string reply](/docs/reference/protocol-spec#bulk-strings): the serialized payload" + ], + "FUNCTION FLUSH": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "FUNCTION HELP": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list of sub-commands and their descriptions." + ], + "FUNCTION KILL": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "FUNCTION LIST": [ + "[Array reply](/docs/reference/protocol-spec#arrays): information about functions and libraries." + ], + "FUNCTION LOAD": [ + "[Bulk string reply](/docs/reference/protocol-spec#bulk-strings): the library name that was loaded." + ], + "FUNCTION RESTORE": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "FUNCTION STATS": [ + "[Map reply](/docs/reference/protocol-spec#maps): information about the function that's currently running and information about the available execution engines." + ], + "GEOADD": [ + "[Integer reply](/docs/reference/protocol-spec#integers): When used without optional arguments, the number of elements added to the sorted set (excluding score updates). If the CH option is specified, the number of elements that were changed (added or updated)." + ], + "GEODIST": [ + "One of the following:", + "* [Null reply](/docs/reference/protocol-spec#nulls): one or both of the elements are missing.", + "* [Bulk string reply](/docs/reference/protocol-spec#bulk-strings): distance as a double (represented as a string) in the specified units." + ], + "GEOHASH": [ + "[Array reply](/docs/reference/protocol-spec#arrays): An array where each element is the Geohash corresponding to each member name passed as an argument to the command." + ], + "GEOPOS": [ + "[Array reply](/docs/reference/protocol-spec#arrays): An array where each element is a two elements array representing longitude and latitude (x,y) of each member name passed as argument to the command. Non-existing elements are reported as [Null reply](/docs/reference/protocol-spec#nulls) elements of the array." + ], + "GEORADIUS": [ + "One of the following:", + "* If no `WITH*` option is specified, an [Array reply](/docs/reference/protocol-spec#arrays) of matched member names", + "* If `WITHCOORD`, `WITHDIST`, or `WITHHASH` options are specified, the command returns an [Array reply](/docs/reference/protocol-spec#arrays) of arrays, where each sub-array represents a single item:", + " 1. The distance from the center as a floating point number, in the same unit specified in the radius.", + " 1. The Geohash integer.", + " 1. The coordinates as a two items x,y array (longitude,latitude).", + "", + "For example, the command `GEORADIUS Sicily 15 37 200 km WITHCOORD WITHDIST` will return each item in the following way:", + "", + "`[\"Palermo\",\"190.4424\",[\"13.361389338970184\",\"38.115556395496299\"]]`" + ], + "GEORADIUSBYMEMBER": [ + "One of the following:", + "* If no `WITH*` option is specified, an [Array reply](/docs/reference/protocol-spec#arrays) of matched member names", + "* If `WITHCOORD`, `WITHDIST`, or `WITHHASH` options are specified, the command returns an [Array reply](/docs/reference/protocol-spec#arrays) of arrays, where each sub-array represents a single item:", + " * The distance from the center as a floating point number, in the same unit specified in the radius.", + " * The Geohash integer.", + " * The coordinates as a two items x,y array (longitude,latitude)." + ], + "GEORADIUSBYMEMBER_RO": [ + "One of the following:", + "* If no `WITH*` option is specified, an [Array reply](/docs/reference/protocol-spec#arrays) of matched member names", + "* If `WITHCOORD`, `WITHDIST`, or `WITHHASH` options are specified, the command returns an [Array reply](/docs/reference/protocol-spec#arrays) of arrays, where each sub-array represents a single item:", + " * The distance from the center as a floating point number, in the same unit specified in the radius.", + " * The Geohash integer.", + " * The coordinates as a two items x,y array (longitude,latitude)." + ], + "GEORADIUS_RO": [ + "One of the following:", + "* If no `WITH*` option is specified, an [Array reply](/docs/reference/protocol-spec#arrays) of matched member names", + "* If `WITHCOORD`, `WITHDIST`, or `WITHHASH` options are specified, the command returns an [Array reply](/docs/reference/protocol-spec#arrays) of arrays, where each sub-array represents a single item:", + " * The distance from the center as a floating point number, in the same unit specified in the radius.", + " * The Geohash integer.", + " * The coordinates as a two items x,y array (longitude,latitude)." + ], + "GEOSEARCH": [ + "One of the following:", + "* If no `WITH*` option is specified, an [Array reply](/docs/reference/protocol-spec#arrays) of matched member names", + "* If `WITHCOORD`, `WITHDIST`, or `WITHHASH` options are specified, the command returns an [Array reply](/docs/reference/protocol-spec#arrays) of arrays, where each sub-array represents a single item:", + " * The distance from the center as a floating point number, in the same unit specified in the radius.", + " * The Geohash integer.", + " * The coordinates as a two items x,y array (longitude,latitude)." + ], + "GEOSEARCHSTORE": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the number of elements in the resulting set" + ], + "GET": [ + "One of the following:", + "* [Bulk string reply](/docs/reference/protocol-spec#bulk-strings): the value of the key.", + "* [Null reply](/docs/reference/protocol-spec#nulls): key does not exist." + ], + "GETBIT": [ + "The bit value stored at _offset_, one of the following:", + "* [Integer reply](/docs/reference/protocol-spec#integers): `0`.", + "* [Integer reply](/docs/reference/protocol-spec#integers): `1`." + ], + "GETDEL": [ + "One of the following:", + "* [Bulk string reply](/docs/reference/protocol-spec#bulk-strings): the value of the key.", + "* [Null reply](/docs/reference/protocol-spec#nulls): if the key does not exist or if the key's value type is not a string." + ], + "GETEX": [ + "[Bulk string reply](/docs/reference/protocol-spec#bulk-strings): the value of `key`", + "[Null reply](/docs/reference/protocol-spec#nulls): if `key` does not exist." + ], + "GETRANGE": [ + "[Bulk string reply](/docs/reference/protocol-spec#bulk-strings): The substring of the string value stored at key, determined by the offsets start and end (both are inclusive)." + ], + "GETSET": [ + "One of the following:", + "* [Bulk string reply](/docs/reference/protocol-spec#bulk-strings): the old value stored at the key.", + "* [Null reply](/docs/reference/protocol-spec#nulls): if the key does not exist." + ], + "HDEL": [ + "[Integer reply](/docs/reference/protocol-spec#integers): The number of fields that were removed from the hash, excluding any specified but non-existing fields." + ], + "HELLO": [ + "[Map reply](/docs/reference/protocol-spec#maps): a list of server properties.", + "[Simple error reply](/docs/reference/protocol-spec#simple-errors): if the `protover` requested does not exist." + ], + "HEXISTS": [ + "One of the following:", + "* [Integer reply](/docs/reference/protocol-spec#integers): `0` if the hash does not contain the field, or the key does not exist.", + "* [Integer reply](/docs/reference/protocol-spec#integers): `1` if the hash contains the field." + ], + "HGET": [ + "One of the following:", + "* [Bulk string reply](/docs/reference/protocol-spec#bulk-strings): The value associated with the field.", + "* [Null reply](/docs/reference/protocol-spec#nulls): If the field is not present in the hash or key does not exist." + ], + "HGETALL": [ + "[Map reply](/docs/reference/protocol-spec#maps): a map of fields and their values stored in the hash, or an empty list when key does not exist." + ], + "HINCRBY": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the value of the field after the increment operation." + ], + "HINCRBYFLOAT": [ + "[Bulk string reply](/docs/reference/protocol-spec#bulk-strings): The value of the field after the increment operation." + ], + "HKEYS": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list of fields in the hash, or an empty list when the key does not exist." + ], + "HLEN": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the number of the fields in the hash, or 0 when the key does not exist." + ], + "HMGET": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list of values associated with the given fields, in the same order as they are requested." + ], + "HMSET": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "HRANDFIELD": [ + "Any of the following:", + "* [Null reply](/docs/reference/protocol-spec#nulls): if the key doesn't exist", + "* [Bulk string reply](/docs/reference/protocol-spec#bulk-strings): a single, randomly selected field when the `count` option is not used", + "* [Array reply](/docs/reference/protocol-spec#arrays): a list containing `count` fields when the `count` option is used, or an empty array if the key does not exists.", + "* [Array reply](/docs/reference/protocol-spec#arrays): a list of fields and their values when `count` and `WITHVALUES` were both used." + ], + "HSCAN": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a two-element array.", + "* The first element is a [Bulk string reply](/docs/reference/protocol-spec#bulk-strings) that represents an unsigned 64-bit number, the cursor.", + "* The second element is an [Array reply](/docs/reference/protocol-spec#arrays) of field/value pairs that were scanned." + ], + "HSET": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the number of fields that were added." + ], + "HSETNX": [ + "One of the following:", + "* [Integer reply](/docs/reference/protocol-spec#integers): `0` if the field already exists in the hash and no operation was performed.", + "* [Integer reply](/docs/reference/protocol-spec#integers): `1` if the field is a new field in the hash and the value was set." + ], + "HSTRLEN": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the string length of the value associated with the _field_, or zero when the _field_ isn't present in the hash or the _key_ doesn't exist at all." + ], + "HVALS": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list of values in the hash, or an empty list when the key does not exist." + ], + "INCR": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the value of the key after the increment." + ], + "INCRBY": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the value of the key after the increment." + ], + "INCRBYFLOAT": [ + "[Bulk string reply](/docs/reference/protocol-spec#bulk-strings): the value of the key after the increment." + ], + "INFO": [ + "[Bulk string reply](/docs/reference/protocol-spec#bulk-strings): a map of info fields, one field per line in the form of `:` where the value can be a comma separated map like `=`. Also contains section header lines starting with `#` and blank lines.", + "", + "Lines can contain a section name (starting with a `#` character) or a property. All the properties are in the form of `field:value` terminated by `\\r\\n`." + ], + "KEYS": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list of keys matching _pattern_." + ], + "LASTSAVE": [ + "[Integer reply](/docs/reference/protocol-spec#integers): UNIX TIME of the last DB save executed with success." + ], + "LATENCY": [], + "LATENCY DOCTOR": [ + "[Verbatim string reply](/docs/reference/protocol-spec#verbatim-strings): a human readable latency analysis report." + ], + "LATENCY GRAPH": [ + "[Bulk string reply](/docs/reference/protocol-spec#bulk-strings): Latency graph" + ], + "LATENCY HELP": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list of sub-commands and their descriptions." + ], + "LATENCY HISTOGRAM": [ + "[Map reply](/docs/reference/protocol-spec#maps): a map where each key is a command name, and each value is a map with the total calls, and an inner map of the histogram time buckets." + ], + "LATENCY HISTORY": [ + "[Array reply](/docs/reference/protocol-spec#arrays): an array where each element is a two elements array representing the timestamp and the latency of the event." + ], + "LATENCY LATEST": [ + "[Array reply](/docs/reference/protocol-spec#arrays): an array where each element is a four elements array representing the event's name, timestamp, latest and all-time latency measurements." + ], + "LATENCY RESET": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the number of event time series that were reset." + ], + "LCS": [ + "One of the following:", + "* [Bulk string reply](/docs/reference/protocol-spec#bulk-strings): the longest common subsequence.", + "* [Integer reply](/docs/reference/protocol-spec#integers): the length of the longest common subsequence when _LEN_ is given.", + "* [Map reply](/docs/reference/protocol-spec#maps): a map with the LCS length and all the ranges in both the strings when _IDX_ is given." + ], + "LINDEX": [ + "One of the following:", + "* [Null reply](/docs/reference/protocol-spec#nulls): when _index_ is out of range.", + "* [Bulk string reply](/docs/reference/protocol-spec#bulk-strings): the requested element." + ], + "LINSERT": [ + "One of the following:", + "* [Integer reply](/docs/reference/protocol-spec#integers): the list length after a successful insert operation.", + "* [Integer reply](/docs/reference/protocol-spec#integers): `0` when the key doesn't exist.", + "* [Integer reply](/docs/reference/protocol-spec#integers): `-1` when the pivot wasn't found." + ], + "LLEN": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the length of the list." + ], + "LMOVE": [ + "[Bulk string reply](/docs/reference/protocol-spec#bulk-strings): the element being popped and pushed." + ], + "LMPOP": [ + "One of the following:", + "* [Null reply](/docs/reference/protocol-spec#nulls): if no element could be popped.", + "* [Array reply](/docs/reference/protocol-spec#arrays): a two-element array with the first element being the name of the key from which elements were popped and the second element being an array of elements." + ], + "LOLWUT": [ + "[Verbatim string reply](/docs/reference/protocol-spec#verbatim-strings): a string containing generative computer art and the Redis version." + ], + "LPOP": [ + "One of the following:", + "* [Null reply](/docs/reference/protocol-spec#nulls): if the key does not exist.", + "* [Bulk string reply](/docs/reference/protocol-spec#bulk-strings): when called without the _count_ argument, the value of the first element.", + "* [Array reply](/docs/reference/protocol-spec#arrays): when called with the _count_ argument, a list of popped elements." + ], + "LPOS": [ + "Any of the following:", + "* [Null reply](/docs/reference/protocol-spec#nulls): if there is no matching element.", + "* [Integer reply](/docs/reference/protocol-spec#integers): an integer representing the matching element.", + "* [Array reply](/docs/reference/protocol-spec#arrays): If the COUNT option is given, an array of integers representing the matching elements (or an empty array if there are no matches)." + ], + "LPUSH": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the length of the list after the push operation." + ], + "LPUSHX": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the length of the list after the push operation." + ], + "LRANGE": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list of elements in the specified range, or an empty array if the key doesn't exist." + ], + "LREM": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the number of removed elements." + ], + "LSET": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "LTRIM": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "MEMORY": [], + "MEMORY DOCTOR": [ + "[Verbatim string reply](/docs/reference/protocol-spec#verbatim-strings): a memory problems report." + ], + "MEMORY HELP": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list of sub-commands and their descriptions." + ], + "MEMORY MALLOC-STATS": [ + "[Bulk string reply](/docs/reference/protocol-spec#bulk-strings): The memory allocator's internal statistics report." + ], + "MEMORY PURGE": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "MEMORY STATS": [ + "[Map reply](/docs/reference/protocol-spec#maps): memory usage metrics and their values." + ], + "MEMORY USAGE": [ + "One of the following:", + "* [Integer reply](/docs/reference/protocol-spec#integers): the memory usage in bytes.", + "* [Null reply](/docs/reference/protocol-spec#nulls): if the key does not exist." + ], + "MGET": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list of values at the specified keys." + ], + "MIGRATE": [ + "One of the following:", + "* [Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK` on success.", + "* [Simple string reply](/docs/reference/protocol-spec#simple-strings): `NOKEY` when no keys were found in the source instance." + ], + "MODULE": [], + "MODULE HELP": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list of sub-commands and their descriptions" + ], + "MODULE LIST": [ + "[Array reply](/docs/reference/protocol-spec#arrays): list of loaded modules. Each element in the list represents a represents a module, and is a [Map reply](/docs/reference/protocol-spec#maps) of property names and their values. The following properties is reported for each loaded module:", + "* name: the name of the module.", + "* ver: the version of the module." + ], + "MODULE LOAD": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK` if the module was loaded." + ], + "MODULE LOADEX": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK` if the module was loaded." + ], + "MODULE UNLOAD": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK` if the module was unloaded." + ], + "MONITOR": [ + "**Non-standard return value**. Dumps the received commands in an infinite flow." + ], + "MOVE": [ + "One of the following:", + "* [Integer reply](/docs/reference/protocol-spec#integers): `1` if _key_ was moved.", + "* [Integer reply](/docs/reference/protocol-spec#integers): `0` if _key_ wasn't moved." + ], + "MSET": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): always `OK` because `MSET` can't fail." + ], + "MSETNX": [ + "One of the following:", + "* [Integer reply](/docs/reference/protocol-spec#integers): `0` if no key was set (at least one key already existed).", + "* [Integer reply](/docs/reference/protocol-spec#integers): `1` if all the keys were set." + ], + "MULTI": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "OBJECT": [], + "OBJECT ENCODING": [ + "One of the following:", + "* [Null reply](/docs/reference/protocol-spec#nulls): if the key doesn't exist.", + "* [Bulk string reply](/docs/reference/protocol-spec#bulk-strings): the encoding of the object." + ], + "OBJECT FREQ": [ + "One of the following:", + "[Integer reply](/docs/reference/protocol-spec#integers): the counter's value.", + "[Null reply](/docs/reference/protocol-spec#nulls): if _key_ doesn't exist." + ], + "OBJECT HELP": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list of sub-commands and their descriptions." + ], + "OBJECT IDLETIME": [ + "One of the following:", + "[Integer reply](/docs/reference/protocol-spec#integers): the idle time in seconds.", + "[Null reply](/docs/reference/protocol-spec#nulls): if _key_ doesn't exist." + ], + "OBJECT REFCOUNT": [ + "One of the following:", + "[Integer reply](/docs/reference/protocol-spec#integers): the number of references.", + "[Null reply](/docs/reference/protocol-spec#nulls): if _key_ doesn't exist." + ], + "PERSIST": [ + "One of the following:", + "* [Integer reply](/docs/reference/protocol-spec#integers): `0` if _key_ does not exist or does not have an associated timeout.", + "* [Integer reply](/docs/reference/protocol-spec#integers): `1` if the timeout has been removed." + ], + "PEXPIRE": [ + "One of the following:", + "* [Integer reply](/docs/reference/protocol-spec#integers): `0`if the timeout was not set. For example, if the key doesn't exist, or the operation skipped because of the provided arguments.", + "* [Integer reply](/docs/reference/protocol-spec#integers): `1` if the timeout was set." + ], + "PEXPIREAT": [ + "One of the following:", + "* [Integer reply](/docs/reference/protocol-spec#integers): `1` if the timeout was set.", + "* [Integer reply](/docs/reference/protocol-spec#integers): `0` if the timeout was not set. For example, if the key doesn't exist, or the operation was skipped due to the provided arguments." + ], + "PEXPIRETIME": [ + "One of the following:", + "* [Integer reply](/docs/reference/protocol-spec#integers): Expiration Unix timestamp in milliseconds.", + "* [Integer reply](/docs/reference/protocol-spec#integers): `-1` if the key exists but has no associated expiration time.", + "* [Integer reply](/docs/reference/protocol-spec#integers): `-2` if the key does not exist." + ], + "PFADD": [ + "One of the following:", + "* [Integer reply](/docs/reference/protocol-spec#integers): `1` if at least one HyperLogLog internal register was altered.", + "* [Integer reply](/docs/reference/protocol-spec#integers): `0` if no HyperLogLog internal registers were altered." + ], + "PFCOUNT": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the approximated number of unique elements observed via `PFADD`" + ], + "PFDEBUG": [], + "PFMERGE": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "PFSELFTEST": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "PING": [ + "Any of the following:", + "* [Simple string reply](/docs/reference/protocol-spec#simple-strings): `PONG` when no argument is provided.", + "* [Bulk string reply](/docs/reference/protocol-spec#bulk-strings): the provided argument." + ], + "PSETEX": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "PSUBSCRIBE": [ + "When successful, this command doesn't return anything. Instead, for each pattern, one message with the first element being the string `psubscribe` is pushed as a confirmation that the command succeeded." + ], + "PSYNC": [ + "**Non-standard return value**, a bulk transfer of the data followed by `PING` and write requests from the master." + ], + "PTTL": [ + "One of the following:", + "* [Integer reply](/docs/reference/protocol-spec#integers): TTL in milliseconds.", + "* [Integer reply](/docs/reference/protocol-spec#integers): `-1` if the key exists but has no associated expiration.", + "* [Integer reply](/docs/reference/protocol-spec#integers): `-2` if the key does not exist." + ], + "PUBLISH": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the number of clients that received the message. Note that in a Redis Cluster, only clients that are connected to the same node as the publishing client are included in the count." + ], + "PUBSUB": [], + "PUBSUB CHANNELS": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list of active channels, optionally matching the specified pattern." + ], + "PUBSUB HELP": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list of sub-commands and their descriptions." + ], + "PUBSUB NUMPAT": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the number of patterns all the clients are subscribed to." + ], + "PUBSUB NUMSUB": [ + "[Array reply](/docs/reference/protocol-spec#arrays): the number of subscribers per channel, each even element (including the 0th) is channel name, each odd element is the number of subscribers" + ], + "PUBSUB SHARDCHANNELS": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list of active channels, optionally matching the specified pattern." + ], + "PUBSUB SHARDNUMSUB": [ + "[Array reply](/docs/reference/protocol-spec#arrays): the number of subscribers per shard channel, each even element (including the 0th) is channel name, each odd element is the number of subscribers." + ], + "PUNSUBSCRIBE": [ + "When successful, this command doesn't return anything. Instead, for each pattern, one message with the first element being the string `punsubscribe` is pushed as a confirmation that the command succeeded." + ], + "QUIT": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "RANDOMKEY": [ + "One of the following:", + "* [Null reply](/docs/reference/protocol-spec#nulls): when the database is empty.", + "* [Bulk string reply](/docs/reference/protocol-spec#bulk-strings): a random key in the database." + ], + "READONLY": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "READWRITE": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "RENAME": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "RENAMENX": [ + "One of the following:", + "* [Integer reply](/docs/reference/protocol-spec#integers): `1` if _key_ was renamed to _newkey_.", + "* [Integer reply](/docs/reference/protocol-spec#integers): `0` if _newkey_ already exists." + ], + "REPLCONF": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "REPLICAOF": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "RESET": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `RESET`." + ], + "RESTORE": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "RESTORE-ASKING": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "ROLE": [ + "[Array reply](/docs/reference/protocol-spec#arrays): where the first element is one of `master`, `slave`, or `sentinel`, and the additional elements are role-specific as illustrated above." + ], + "RPOP": [ + "One of the following:", + "* [Null reply](/docs/reference/protocol-spec#nulls): if the key does not exist.", + "* [Bulk string reply](/docs/reference/protocol-spec#bulk-strings): when called without the _count_ argument, the value of the last element.", + "* [Array reply](/docs/reference/protocol-spec#arrays): when called with the _count_ argument, a list of popped elements." + ], + "RPOPLPUSH": [ + "One of the following:", + "* [Bulk string reply](/docs/reference/protocol-spec#bulk-strings): the element being popped and pushed.", + "* [Null reply](/docs/reference/protocol-spec#nulls): if the source list is empty." + ], + "RPUSH": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the length of the list after the push operation." + ], + "RPUSHX": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the length of the list after the push operation." + ], + "SADD": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the number of elements that were added to the set, not including all the elements already present in the set." + ], + "SAVE": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "SCAN": [ + "[Array reply](/docs/reference/protocol-spec#arrays): specifically, an array with two elements.", + "* The first element is a [Bulk string reply](/docs/reference/protocol-spec#bulk-strings) that represents an unsigned 64-bit number, the cursor.", + "* The second element is an [Array reply](/docs/reference/protocol-spec#arrays) with the names of scanned keys." + ], + "SCARD": [ + "[Integer reply](/docs/reference/protocol-spec#integers): The cardinality (number of elements) of the set, or 0 if the key does not exist." + ], + "SCRIPT": [], + "SCRIPT DEBUG": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "SCRIPT EXISTS": [ + "[Array reply](/docs/reference/protocol-spec#arrays): an array of integers that correspond to the specified SHA1 digest arguments." + ], + "SCRIPT FLUSH": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "SCRIPT HELP": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list of sub-commands and their descriptions." + ], + "SCRIPT KILL": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "SCRIPT LOAD": [ + "[Bulk string reply](/docs/reference/protocol-spec#bulk-strings): the SHA1 digest of the script added into the script cache." + ], + "SDIFF": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list with the members of the resulting set." + ], + "SDIFFSTORE": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the number of elements in the resulting set." + ], + "SELECT": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "SENTINEL CKQUORUM": [ + "[Bulk string reply](/docs/reference/protocol-spec#bulk-strings): Returns OK if the current Sentinel configuration is able to reach the quorum needed to failover a master, and the majority needed to authorize the failover." + ], + "SENTINEL CONFIG": [ + "One of the following:", + "* [Map reply](/docs/reference/protocol-spec#maps): When 'SENTINEL-CONFIG GET' is called, returns a map.", + "* [Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`. When 'SENTINEL-CONFIG SET' is called, returns OK on success." + ], + "SENTINEL DEBUG": [ + "One of the following:", + "* [Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`. The configuration update was successful.", + "* [Map reply](/docs/reference/protocol-spec#maps): List of configurable time parameters and their values (milliseconds)." + ], + "SENTINEL FAILOVER": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`. Force a fail over as if the master was not reachable, and without asking for agreement to other Sentinels." + ], + "SENTINEL FLUSHCONFIG": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`. Force Sentinel to rewrite its configuration on disk, including the current Sentinel state." + ], + "SENTINEL GET MASTER-ADDR-BY-NAME": [], + "SENTINEL HELP": [ + "[Array reply](/docs/reference/protocol-spec#arrays): Helpful text about subcommands." + ], + "SENTINEL INFO CACHE": [ + "[Array reply](/docs/reference/protocol-spec#arrays): This is actually a map, the odd entries are a master name, and the even entries are the last cached INFO output from that master and all its replicas." + ], + "SENTINEL IS MASTER-DOWN-BY-ADDR": [], + "SENTINEL MASTER": [ + "[Map reply](/docs/reference/protocol-spec#maps): The state and info of the specified master." + ], + "SENTINEL MASTERS": [ + "[Array reply](/docs/reference/protocol-spec#arrays): List of monitored Redis masters, and their state." + ], + "SENTINEL MONITOR": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "SENTINEL MYID": [ + "[Bulk string reply](/docs/reference/protocol-spec#bulk-strings): Node ID of the sentinel instance." + ], + "SENTINEL PENDING SCRIPTS": [ + "[Array reply](/docs/reference/protocol-spec#arrays): List of pending scripts." + ], + "SENTINEL REMOVE": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "SENTINEL REPLICAS": [ + "[Array reply](/docs/reference/protocol-spec#arrays): List of replicas for this master, and their state." + ], + "SENTINEL RESET": [ + "[Integer reply](/docs/reference/protocol-spec#integers): The number of masters that were reset." + ], + "SENTINEL SENTINELS": [ + "[Array reply](/docs/reference/protocol-spec#arrays): List of sentinel instances, and their state." + ], + "SENTINEL SET": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "SENTINEL SIMULATE FAILURE": [ + "One of the following:", + "* [Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`. The simulated flag was set.", + "* [Array reply](/docs/reference/protocol-spec#arrays): Supported simulates flags. Returned in case `HELP` was used." + ], + "SENTINEL SLAVES": [ + "[Array reply](/docs/reference/protocol-spec#arrays): List of monitored replicas, and their state." + ], + "SET": [ + "Any of the following:", + "* [Null reply](/docs/reference/protocol-spec#nulls): `GET` not given: Operation was aborted (conflict with one of the `XX`/`NX` options).", + "* [Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`. `GET` not given: The key was set.", + "* [Null reply](/docs/reference/protocol-spec#nulls): `GET` given: The key didn't exist before the `SET`.", + "* [Bulk string reply](/docs/reference/protocol-spec#bulk-strings): `GET` given: The previous value of the key." + ], + "SETBIT": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the original bit value stored at _offset_." + ], + "SETEX": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "SETNX": [ + "One of the following:", + "* [Integer reply](/docs/reference/protocol-spec#integers): `0` if the key was not set.", + "* [Integer reply](/docs/reference/protocol-spec#integers): `1` if the key was set." + ], + "SETRANGE": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the length of the string after it was modified by the command." + ], + "SHUTDOWN": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK` if _ABORT_ was specified and shutdown was aborted. On successful shutdown, nothing is returned because the server quits and the connection is closed. On failure, an error is returned." + ], + "SINTER": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list with the members of the resulting set." + ], + "SINTERCARD": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the number of the elements in the resulting intersection." + ], + "SINTERSTORE": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the number of the elements in the result set." + ], + "SISMEMBER": [ + "One of the following:", + "* [Integer reply](/docs/reference/protocol-spec#integers): `0` if the element is not a member of the set, or when the key does not exist.", + "* [Integer reply](/docs/reference/protocol-spec#integers): `1` if the element is a member of the set." + ], + "SLAVEOF": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "SLOWLOG": [], + "SLOWLOG GET": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list of slow log entries per the above format." + ], + "SLOWLOG HELP": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list of sub-commands and their descriptions." + ], + "SLOWLOG LEN": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the number of entries in the slow log." + ], + "SLOWLOG RESET": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "SMEMBERS": [ + "[Array reply](/docs/reference/protocol-spec#arrays): all members of the set." + ], + "SMISMEMBER": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list representing the membership of the given elements, in the same order as they are requested." + ], + "SMOVE": [ + "One of the following:", + "* [Integer reply](/docs/reference/protocol-spec#integers): `1` if the element is moved.", + "* [Integer reply](/docs/reference/protocol-spec#integers): `0` if the element is not a member of _source_ and no operation was performed." + ], + "SORT": [ + "[Array reply](/docs/reference/protocol-spec#arrays): without passing the _STORE_ option, the command returns a list of sorted elements.", + "[Integer reply](/docs/reference/protocol-spec#integers): when the _STORE_ option is specified, the command returns the number of sorted elements in the destination list." + ], + "SORT_RO": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list of sorted elements." + ], + "SPOP": [ + "One of the following:", + "* [Null reply](/docs/reference/protocol-spec#nulls): if the key does not exist.", + "* [Bulk string reply](/docs/reference/protocol-spec#bulk-strings): when called without the _count_ argument, the removed member.", + "* [Array reply](/docs/reference/protocol-spec#arrays): when called with the _count_ argument, a list of the removed members." + ], + "SPUBLISH": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the number of clients that received the message. Note that in a Redis Cluster, only clients that are connected to the same node as the publishing client are included in the count" + ], + "SRANDMEMBER": [ + "One of the following:", + "* [Bulk string reply](/docs/reference/protocol-spec#bulk-strings): without the additional _count_ argument, the command returns a randomly selected member, or a [Null reply](/docs/reference/protocol-spec#nulls) when _key_ doesn't exist.", + "* [Array reply](/docs/reference/protocol-spec#arrays): when the optional _count_ argument is passed, the command returns an array of members, or an empty array when _key_ doesn't exist." + ], + "SREM": [ + "[Integer reply](/docs/reference/protocol-spec#integers): Number of members that were removed from the set, not including non existing members." + ], + "SSCAN": [ + "[Array reply](/docs/reference/protocol-spec#arrays): specifically, an array with two elements:", + "* The first element is a [Bulk string reply](/docs/reference/protocol-spec#bulk-strings) that represents an unsigned 64-bit number, the cursor.", + "* The second element is an [Array reply](/docs/reference/protocol-spec#arrays) with the names of scanned members." + ], + "SSUBSCRIBE": [ + "When successful, this command doesn't return anything. Instead, for each shard channel, one message with the first element being the string 'ssubscribe' is pushed as a confirmation that the command succeeded. Note that this command can also return a -MOVED redirect." + ], + "STRLEN": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the length of the string stored at key, or 0 when the key does not exist." + ], + "SUBSCRIBE": [ + "When successful, this command doesn't return anything. Instead, for each channel, one message with the first element being the string `subscribe` is pushed as a confirmation that the command succeeded." + ], + "SUBSTR": [ + "[Bulk string reply](/docs/reference/protocol-spec#bulk-strings): the substring of the string value stored at key, determined by the offsets start and end (both are inclusive)." + ], + "SUNION": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list with the members of the resulting set." + ], + "SUNIONSTORE": [ + "[Integer reply](/docs/reference/protocol-spec#integers): Number of the elements in the resulting set." + ], + "SUNSUBSCRIBE": [ + "When successful, this command doesn't return anything. Instead, for each shard channel, one message with the first element being the string `sunsubscribe` is pushed as a confirmation that the command succeeded." + ], + "SWAPDB": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "SYNC": [ + "**Non-standard return value**, a bulk transfer of the data followed by `PING` and write requests from the master." + ], + "TIME": [ + "[Array reply](/docs/reference/protocol-spec#arrays): specifically, a two-element array consisting of the Unix timestamp in seconds and the microseconds' count." + ], + "TOUCH": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the number of touched keys." + ], + "TTL": [ + "One of the following:", + "* [Integer reply](/docs/reference/protocol-spec#integers): TTL in seconds.", + "* [Integer reply](/docs/reference/protocol-spec#integers): `-1` if the key exists but has no associated expiration.", + "* [Integer reply](/docs/reference/protocol-spec#integers): `-2` if the key does not exist." + ], + "TYPE": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): the type of _key_, or `none` when _key_ doesn't exist." + ], + "UNLINK": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the number of keys that were unlinked." + ], + "UNSUBSCRIBE": [ + "When successful, this command doesn't return anything. Instead, for each channel, one message with the first element being the string `unsubscribe` is pushed as a confirmation that the command succeeded." + ], + "UNWATCH": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "WAIT": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the number of replicas reached by all the writes performed in the context of the current connection." + ], + "WAITAOF": [ + "[Array reply](/docs/reference/protocol-spec#arrays): The command returns an array of two integers:", + "1. The first is the number of local Redises (0 or 1) that have fsynced to AOF all writes performed in the context of the current connection", + "2. The second is the number of replicas that have acknowledged doing the same." + ], + "WATCH": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "XACK": [ + "[Integer reply](/docs/reference/protocol-spec#integers): The command returns the number of messages successfully acknowledged. Certain message IDs may no longer be part of the PEL (for example because they have already been acknowledged), and XACK will not count them as successfully acknowledged." + ], + "XADD": [ + "One of the following:", + "* [Bulk string reply](/docs/reference/protocol-spec#bulk-strings): The ID of the added entry. The ID is the one automatically generated if an asterisk (`*`) is passed as the _id_ argument, otherwise the command just returns the same ID specified by the user during insertion.", + "* [Null reply](/docs/reference/protocol-spec#nulls): if the NOMKSTREAM option is given and the key doesn't exist." + ], + "XAUTOCLAIM": [ + "[Array reply](/docs/reference/protocol-spec#arrays), specifically, an array with three elements:", + "1. A stream ID to be used as the _start_ argument for the next call to XAUTOCLAIM.", + "2. An [Array reply](/docs/reference/protocol-spec#arrays) containing all the successfully claimed messages in the same format as `XRANGE`.", + "3. An [Array reply](/docs/reference/protocol-spec#arrays) containing message IDs that no longer exist in the stream, and were deleted from the PEL in which they were found." + ], + "XCLAIM": [ + "Any of the following:", + "* [Array reply](/docs/reference/protocol-spec#arrays): when the _JUSTID_ option is specified, an array of IDs of messages successfully claimed.", + "* [Array reply](/docs/reference/protocol-spec#arrays): an array of stream entries, each of which contains an array of two elements, the entry ID and the entry data itself." + ], + "XDEL": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the number of entries that were deleted." + ], + "XGROUP": [], + "XGROUP CREATE": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "XGROUP CREATECONSUMER": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the number of created consumers, either 0 or 1." + ], + "XGROUP DELCONSUMER": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the number of pending messages the consumer had before it was deleted." + ], + "XGROUP DESTROY": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the number of destroyed consumer groups, either 0 or 1." + ], + "XGROUP HELP": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list of sub-commands and their descriptions." + ], + "XGROUP SETID": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "XINFO": [], + "XINFO CONSUMERS": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list of consumers and their attributes." + ], + "XINFO GROUPS": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list of consumer groups." + ], + "XINFO HELP": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list of sub-commands and their descriptions." + ], + "XINFO STREAM": [ + "One of the following:", + "* [Map reply](/docs/reference/protocol-spec#maps): when the _FULL_ argument was not given, a list of information about a stream in summary form.", + "* [Map reply](/docs/reference/protocol-spec#maps): when the _FULL_ argument was given, a list of information about a stream in extended form." + ], + "XLEN": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the number of entries of the stream at _key_." + ], + "XPENDING": [ + "* [Array reply](/docs/reference/protocol-spec#arrays): different data depending on the way XPENDING is called, as explained on this page." + ], + "XRANGE": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list of stream entries with IDs matching the specified range." + ], + "XREAD": [ + "One of the following:", + "* [Map reply](/docs/reference/protocol-spec#maps): A map of key-value elements where each element is composed of the key name and the entries reported for that key. The entries reported are full stream entries, having IDs and the list of all the fields and values. Field and values are guaranteed to be reported in the same order they were added by `XADD`.", + "* [Null reply](/docs/reference/protocol-spec#nulls): if the _BLOCK_ option is given and a timeout occurs, or if there is no stream that can be served." + ], + "XREADGROUP": [ + "One of the following:", + "* [Map reply](/docs/reference/protocol-spec#maps): A map of key-value elements where each element is composed of the key name and the entries reported for that key. The entries reported are full stream entries, having IDs and the list of all the fields and values. Field and values are guaranteed to be reported in the same order they were added by `XADD`.", + "* [Null reply](/docs/reference/protocol-spec#nulls): if the _BLOCK_ option is given and a timeout occurs, or if there is no stream that can be served." + ], + "XREVRANGE": [ + "[Array reply](/docs/reference/protocol-spec#arrays): The command returns the entries with IDs matching the specified range. The returned entries are complete, which means that the ID and all the fields they are composed of are returned. Moreover, the entries are returned with their fields and values in the same order as `XADD` added them." + ], + "XSETID": [ + "[Simple string reply](/docs/reference/protocol-spec#simple-strings): `OK`." + ], + "XTRIM": [ + "[Integer reply](/docs/reference/protocol-spec#integers): The number of entries deleted from the stream." + ], + "ZADD": [ + "Any of the following:", + "* [Null reply](/docs/reference/protocol-spec#nulls): if the operation was aborted because of a conflict with one of the _XX/NX/LT/GT_ options.", + "* [Integer reply](/docs/reference/protocol-spec#integers): the number of new members when the _CH_ option is not used.", + "* [Integer reply](/docs/reference/protocol-spec#integers): the number of new or updated members when the _CH_ option is used.", + "* [Double reply](/docs/reference/protocol-spec#doubles): the updated score of the member when the _INCR_ option is used." + ], + "ZCARD": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the cardinality (number of members) of the sorted set, or 0 if the key doesn't exist." + ], + "ZCOUNT": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the number of members in the specified score range." + ], + "ZDIFF": [ + "* [Array reply](/docs/reference/protocol-spec#arrays): the result of the difference including, optionally, scores when the _WITHSCORES_ option is used." + ], + "ZDIFFSTORE": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the number of members in the resulting sorted set at _destination_." + ], + "ZINCRBY": [ + "[Double reply](/docs/reference/protocol-spec#doubles): the new score of _member_." + ], + "ZINTER": [ + "* [Array reply](/docs/reference/protocol-spec#arrays): the result of the intersection including, optionally, scores when the _WITHSCORES_ option is used." + ], + "ZINTERCARD": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the number of members in the resulting intersection." + ], + "ZINTERSTORE": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the number of members in the resulting sorted set at the _destination_." + ], + "ZLEXCOUNT": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the number of members in the specified score range." + ], + "ZMPOP": [ + "One of the following:", + "* [Null reply](/docs/reference/protocol-spec#nulls): when no element could be popped.", + "* [Array reply](/docs/reference/protocol-spec#arrays): A two-element array with the first element being the name of the key from which elements were popped, and the second element is an array of the popped elements. Every entry in the elements array is also an array that contains the member and its score." + ], + "ZMSCORE": [ + "One of the following:", + "* [Null reply](/docs/reference/protocol-spec#nulls): if the member does not exist in the sorted set.", + "* [Array reply](/docs/reference/protocol-spec#arrays): a list of [Double reply](/docs/reference/protocol-spec#doubles) _member_ scores as double-precision floating point numbers." + ], + "ZPOPMAX": [ + "* [Array reply](/docs/reference/protocol-spec#arrays): a list of popped elements and scores." + ], + "ZPOPMIN": [ + "* [Array reply](/docs/reference/protocol-spec#arrays): a list of popped elements and scores." + ], + "ZRANDMEMBER": [ + "[Bulk string reply](/docs/reference/protocol-spec#bulk-strings): without the additional _count_ argument, the command returns a randomly selected member, or [Null reply](/docs/reference/protocol-spec#nulls) when _key_ doesn't exist.", + "[Array reply](/docs/reference/protocol-spec#arrays): when the additional _count_ argument is passed, the command returns an array of members, or an empty array when _key_ doesn't exist. If the _WITHSCORES_ modifier is used, the reply is a list of members and their scores from the sorted set." + ], + "ZRANGE": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list of members in the specified range with, optionally, their scores when the _WITHSCORES_ option is given." + ], + "ZRANGEBYLEX": [ + "[Array reply](/docs/reference/protocol-spec#arrays): a list of elements in the specified score range." + ], + "ZRANGEBYSCORE": [ + "* [Array reply](/docs/reference/protocol-spec#arrays): a list of the members with, optionally, their scores in the specified score range." + ], + "ZRANGESTORE": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the number of elements in the resulting sorted set." + ], + "ZRANK": [ + "One of the following:", + "* [Null reply](/docs/reference/protocol-spec#nulls): if the key does not exist or the member does not exist in the sorted set.", + "* [Integer reply](/docs/reference/protocol-spec#integers): the rank of the member when _WITHSCORE_ is not used.", + "* [Array reply](/docs/reference/protocol-spec#arrays): the rank and score of the member when _WITHSCORE_ is used." + ], + "ZREM": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the number of members removed from the sorted set, not including non-existing members." + ], + "ZREMRANGEBYLEX": [ + "[Integer reply](/docs/reference/protocol-spec#integers): Number of members removed." + ], + "ZREMRANGEBYRANK": [ + "[Integer reply](/docs/reference/protocol-spec#integers): Number of members removed." + ], + "ZREMRANGEBYSCORE": [ + "[Integer reply](/docs/reference/protocol-spec#integers): Number of members removed." + ], + "ZREVRANGE": [ + "* [Array reply](/docs/reference/protocol-spec#arrays): a list of members in the specified range, optionally with their scores if _WITHSCORE_ was used." + ], + "ZREVRANGEBYLEX": [ + "[Array reply](/docs/reference/protocol-spec#arrays): List of the elements in the specified score range." + ], + "ZREVRANGEBYSCORE": [ + "* [Array reply](/docs/reference/protocol-spec#arrays): a list of the members and, optionally, their scores in the specified score range." + ], + "ZREVRANK": [ + "One of the following:", + "* [Null reply](/docs/reference/protocol-spec#nulls): if the key does not exist or the member does not exist in the sorted set.", + "* [Integer reply](/docs/reference/protocol-spec#integers): The rank of the member when _WITHSCORE_ is not used.", + "* [Array reply](/docs/reference/protocol-spec#arrays): The rank and score of the member when _WITHSCORE_ is used." + ], + "ZSCAN": [ + "[Array reply](/docs/reference/protocol-spec#arrays): cursor and scan response in array form." + ], + "ZSCORE": [ + "One of the following:", + "* [Double reply](/docs/reference/protocol-spec#doubles): the score of the member (a double-precision floating point number).", + "* [Nil reply](/docs/reference/protocol-spec#bulk-strings): if _member_ does not exist in the sorted set, or the key does not exist." + ], + "ZUNION": [ + "[Array reply](/docs/reference/protocol-spec#arrays): the result of the union with, optionally, their scores when _WITHSCORES_ is used." + ], + "ZUNIONSTORE": [ + "[Integer reply](/docs/reference/protocol-spec#integers): the number of elements in the resulting sorted set." + ] +} diff --git a/tool_types.json b/tool_types.json index 98657bac17..d56d04c4cc 100644 --- a/tool_types.json +++ b/tool_types.json @@ -1,5 +1,6 @@ { "cli": "CLI", "gui": "GUI", - "proxy": "Proxy" + "proxy": "Proxy", + "other": "Other" } diff --git a/tools/gui/github.com/qishibo/AnotherRedisDesktopManager.json b/tools/gui/github.com/qishibo/AnotherRedisDesktopManager.json new file mode 100644 index 0000000000..28b7514e10 --- /dev/null +++ b/tools/gui/github.com/qishibo/AnotherRedisDesktopManager.json @@ -0,0 +1,6 @@ +{ + "name": "Another Redis Desktop Manager", + "description": "🚀🚀🚀A faster, better and more stable Redis desktop manager [GUI client], compatible with Linux, Windows, Mac.", + "homepage": "https://github.com/qishibo/AnotherRedisDesktopManager", + "twitter": ["qii404"] +} diff --git a/tools/other/github.com/IBM/operator-for-redis-cluster.json b/tools/other/github.com/IBM/operator-for-redis-cluster.json new file mode 100644 index 0000000000..dc93eb7c56 --- /dev/null +++ b/tools/other/github.com/IBM/operator-for-redis-cluster.json @@ -0,0 +1,5 @@ +{ + "name": "IBM Operator for Redis Cluster", + "description": "The goal of this project is to simplify the deployment and management of a Redis cluster in a Kubernetes environment. It started internally at Amadeus in 2016, where it was initially designed to run on Openshift. This is the third version of the Redis operator, which leverages the Operator SDK framework for operators.", + "homepage": "https://ibm.github.io/operator-for-redis-cluster" +} diff --git a/tools/other/github.com/alibaba/RedisShake.json b/tools/other/github.com/alibaba/RedisShake.json new file mode 100644 index 0000000000..2bfa7d5cb0 --- /dev/null +++ b/tools/other/github.com/alibaba/RedisShake.json @@ -0,0 +1,4 @@ +{ + "name": "redis-shake", + "description": "redis-shake is a tool for Redis data migration and data filtering." +} diff --git a/tools/other/github.com/leonchen83/redis-rdb-cli.json b/tools/other/github.com/leonchen83/redis-rdb-cli.json new file mode 100644 index 0000000000..659cace58b --- /dev/null +++ b/tools/other/github.com/leonchen83/redis-rdb-cli.json @@ -0,0 +1,4 @@ +{ + "name": "redis-rdb-cli", + "description": "A tool that can parse, filter, split, merge rdb and analyze memory usage offline. It can also sync 2 redis data and allow user define there own sink service to migrate redis data to somewhere." +} diff --git a/tools/other/github.com/oliver006/redis_exporter.json b/tools/other/github.com/oliver006/redis_exporter.json new file mode 100644 index 0000000000..78c3e4b0bc --- /dev/null +++ b/tools/other/github.com/oliver006/redis_exporter.json @@ -0,0 +1,4 @@ +{ + "name": "Prometheus Redis Metrics Exporter", + "description": "Prometheus exporter for Redis metrics." +} diff --git a/tools/other/github.com/redis-developer/riot.json b/tools/other/github.com/redis-developer/riot.json new file mode 100644 index 0000000000..c4a7d20d61 --- /dev/null +++ b/tools/other/github.com/redis-developer/riot.json @@ -0,0 +1,4 @@ +{ + "name": "RIOT", + "description": "Redis Input/Output Tools (RIOT) is a series of utilities designed to help you get data in and out of Redis." +} diff --git a/tools/other/github.com/redis/librdb.json b/tools/other/github.com/redis/librdb.json new file mode 100644 index 0000000000..23818714d8 --- /dev/null +++ b/tools/other/github.com/redis/librdb.json @@ -0,0 +1,5 @@ +{ + "name": "librdb", + "description": "Redis RDB file parser, with JSON, RESP and RDB-loader extensions", + "recommended": true +} diff --git a/tools/other/github.com/spotahome/redis-operator.json b/tools/other/github.com/spotahome/redis-operator.json new file mode 100644 index 0000000000..f488e778f1 --- /dev/null +++ b/tools/other/github.com/spotahome/redis-operator.json @@ -0,0 +1,4 @@ +{ + "name": "Redis Operator", + "description": "Redis Operator creates/configures/manages high availability Redis with Sentinel automatic failover atop Kubernetes." +} diff --git a/wordlist b/wordlist index a9aabaea5a..d222fcd152 100644 --- a/wordlist +++ b/wordlist @@ -1,9 +1,14 @@ +3:00 AM +4:00 AM +5:00 AM +6:00 AM .rdb +ˈrɛd-ɪs 0s 0x00060007 0x00MMmmpp -100MB 100k +100MB 10GB 10k 128MB @@ -12,24 +17,24 @@ 1s 1th 2GB +3am 300ms 30ms -32MB 32bit -3GB -3MB -3am +32MB 3c3a0c 3c3a0c74aae0b56170ccb03a76b60cfe7dc1912e -4GB +3GB +3MB 4am +4GB 4k +5am 500MB 512MB 5GB -5am -60s 6am +60s 6sync 80ms 85MB @@ -38,384 +43,64 @@ 90s 97a3a64667477371c4479320d683e4c8db5858b1 A1 +acknowledgement ACKs +acl +acl-pubsub-default ACLs -AMD64 -AOF -AOFRW -AOF_START -APIs -ARGV -ASN -AUTOID +ad-hoc Aioredlock Alibaba -Arity -Async -Asyncio -Atomicvar -Auth -B1 -B2 -B3 -BCC's -BDFL-style -BPF -BPF's -BPF-optimized -Benchmarking -BigNumber -BitOp -Bitfields -C1 -C2 -C3 -C4 -C5 -CAS -CAs -CFIELD -CKQUORUM -CLI -CLI's -CP -CPUs -CRC -CRC-16 -CRC16 -CRC64 -CRDTs -CRLF -CRLF-terminated -CSV -CallReply -CentOS -Changelog -Chemeris -Citrusbyte -CloseKey -Cn -Collina's -Config -ContextFlags -Costin -Craigslist -Ctrl-a -DBs -DEL'ition -DLM -DMA -DNS -DSL -Deauthenticate -Deauthenticates -Defrag -Deno -Diskless -DistLock -Dynomite -EBADF -EBS -EC2 -EDOM -EEXIST -EFBIG -EINVAL -ENOENT -ENOTSUP -EOF -EP -EPSG:3785 -EPSG:900913 -ERANGE -Enum -Eval -EventLoop -EventMachine -FLUSHCONFIG -Failover -Failover-based -Failovers -FlameGraph -FreeBSD -FreeString -Fsyncing -GDB -GEODEL -GET-MASTER-ADDR-BY-NAME -GPG -Gbit -GeoHashes -Geohash -Geohashes -Geospatial -Github -Gottlieb -HLL -HLLs -HMAC-SHA256 -HVM -HW -Hardcoded -Haversine -Hexastore -Hitmeister -Homebrew -Hotspot -HyperLogLog -HyperLogLogs -IOPs -IPC -IPs -IPv4 -IPv6 -IS-MASTER-DOWN-BY-ADDR -Identinal -IoT -Itamar -JSON -JSON-encoded -Janowski -Jemalloc -KEYSPACE -Keyspace -KeyspaceNotification -Kleppmann -L3 -LDB -LF -LFU -LHF -LLOOGG -LRU -LRU's -LRU. -LUA -Leau -Lehmann -Levelgraph -LibLZF -Linode -Liveness -Lua -Lua's -Lua-to-Redis -Lucraft -M1 -MASTERDOWN -MERCHANTABILITY -MacBook -Matteo -Maxmemory -Memcache -MessagePack -Movablekeys -Mrkris -NAS -NATted -NFS -NIC -NICs -NOOP -NUMA -NaN -Nehalem -NoSQL -NodeJS -nonprintable -Noordhuis -NullArray -ODOWN -OOM -OR-ing -ORed -OSGEO:41001 -Ok -OpenBSD -OpenSSL -Opteron -PEL -PEM -PFAIL -PHPRedisMutex -PID -PMCs -PMU -POP3 -POSIX -POV -PRNG -PV -Parameterization -Pieter -Pipelining -Predis -Prev -Prioglio -Programm -Programmability -PubSub-related -Pubsub -Pubsub. -R1 -R2 -RC1 -RC3 -RC4 -RDB -RDB-saving -REDISMODULE_OPTIONS_HANDLE_REPL_ASYNC_LOAD -REDISPORT -REPL -RESP2 -RESP2. -RESP3 -RESP3's -RESP3-typed -RESP3. -REdis -RM_.* -RPC -RSS -RTT -RW -Rebranding -Reconfiguring -Reddit's -Redimension -Redis-rb -Redis-to-Lua -RedisCallReply -RedisConf -RedisHost. -RedisModule.* -Redisson -Redistributions -Redlock -Redlock-cpp -Redlock-cs -Redlock-php -Redlock-py -Redlock-rb -Redlock4Net -Redsync -Reshard -Resharding -Resque -RetainString -Retwis -Retwis-J -Retwis-RB -Roshi -Rx/Tx -S1 -S2 -S3 -S4 -SCP -SDOWN -SHA-256 -SHA1 -SHA256 -SIGBUS -SIGFPE -SIGILL -SIGINT -SIGSEGV -SIGTERM -SSD -SSL -SVGs -SYNC_RDB_START -Sandboxed -Sanfilippo -Sanfilippo's -ScarletLock -Selectable -Sharded -Shuttleworth -Slicehost -SmartOS -Snapchat -Snapcraft -Snapshotting -Solaris-derived -SomeOtherValue -SoundCloud -StackOverflow -StringDMA -Subcommands -T1 -T2 -TCL -TCP -TLS -TTLs -Tthe -Twemproxy -URI -USD -UTF-8 -Unmodifiable -Unregister -Untrusted -Unwatches -VM -VMs -VMware -VPS -ValueN -Variadic -Virtualized -Vladev -WSL -WSL2 -Westmere -XMODEM -XSCAN -XYZ -Xen -Xen-specific -Xeon -YCSB -Yossi -Z1 -ZMODEM -ZPOP -ZSET -ZeroBrane -Zhao -acknowledgement -acl -ad-hoc alice +allchannels allkeys allkeys-lru allkeys-random allocator allocator's allocators +AMD64 analytics antirez +antirez's +AOF aof +AOF_START aof-1 aof-2 aof-N +AOFRW api apiname +APIs appendfsync appendonly applicative args +ARGV argv argvs +Arity arity +ASN +Async async +Asyncio atomicity +Atomicvar +Auth auth authenticateClientWithUser auto-reconnection autocomplete +AUTOID +autoload +autoloader +autoloading +Autoloading +B1 +B2 +B3 backend backported backslashed @@ -425,43 +110,104 @@ balancer bazzar bc bcc +BCC's bcc's +BDFL-style +beforeSleep behaviour benchmarked +Benchmarking benchmarking big-endian +BigNumber +bikes:racing:france +bikes:racing:italy +bikes:racing:usa +bikes:rentable +birthyear +bitfield bitfield +Bitfields +bitfields +BitOp bitop +Bitwise +bitwise bitwise bool +Booleans +booleans +BPF +BPF-optimized +BPF's breakpoint broadcasted +brpop bt btree1-az +C1 +C2 +C3 +C4 +C5 +CallReply cancelled cardinalities cardinality +CAS +CAs casted +Castilla cd +CentOS +CFIELD +Changelog changelogs charset +Chemeris cheprasov +Citrusbyte cjson +CKQUORUM cleartext +CLI cli +CLI's +CloseKey cluster-config-file +Cmd cmsgpack +Cn codename codenamed +Collina's commandstats commnad +CONFIG +Config config +config-file configEpoch configs const +ContextFlags +Costin +CP cpu +cpu-profiling +CPUs +Craigslist +CRC +CRC-16 +CRC16 +CRC64 +CRDTs +CRLF +CRLF-terminated cron cryptographic +CSV +Ctrl-a ctx daemonize daemonized @@ -469,14 +215,18 @@ daemontools dataset datastore dbid +DBs de de-serialization de-serialize deallocated dearmor +Deauthenticate deauthenticate deauthenticated +Deauthenticates deduplicated +Defrag defrag defragging defragment @@ -484,6 +234,7 @@ defragmentable defragmentation defragmented del +Deno deny-oom deserialize deserialized @@ -492,11 +243,27 @@ desync desynchronize dev dir +Diskless diskless +DistLock distlock +DLM +DMA +dnf +DNF +DNS +DSL dup-sentinel -eBPF +Dynomite earts +EBADF +eBPF +EBS +EC2 +EDOM +EEXIST +EFBIG +EINVAL ele emented enable-protected-configs @@ -509,24 +276,44 @@ end-slot2 end-slotN endian endianness +Enduro +ENOENT +ENOTSUP +Enum enum enum_val enum_vals +enums +EOF +EP +EPEL epel-release epoll +EPSG:3785 +EPSG:900913 +ERANGE +Ergonom errno error1 error2 errorstats ethernet +Eval eval eval-intro +EventLoop +eventloop +EventMachine everysec executables expiries +explainer +explainers facto factorializing +Failover failover +Failover-based failover-detected failover-end failover-end-for-timeout @@ -534,9 +321,11 @@ failover-state-reconf-slaves failover-state-select-slave failover-state-send-slaveof-noone failover. +Failovers failovers fanout faq +favorited fdatasync filesystem firewalled @@ -544,38 +333,100 @@ firewalling first-arg first-args firstkey +FlameGraph +FLUSHCONFIG fmt foo0 foo1 foo2 formatter +france_location +FreeBSD +FreeString +fractionals frequencyonly fsSL fsync +fsynced +Fsyncing fsyncing +fsyncs +func +Gbit +GDB gdb geo +geo_tutorial +geoadd +GEODEL +Geohash geohash geohash-encoded +GeoHashes +Geohashes +geosearch +Geospatial geospatial +GET-MASTER-ADDR-BY-NAME getkeys-api +Github github globals +go-redis +Gottlieb +GPG gpg +Gradle +Hacktoberfest +hacktoberfest handleClientsWithPendingWrites +Hardcoded hardcoded hardlinks +HashMap +HashMaps +HashSet +HashSets +Haversine +Healthcheck +healthchecks +Henshaw +Hexastore hexastore +hget +hgetall +hincrby hiredis +Hitmeister +HLL +HLLs +HMAC-SHA256 +hmget +holdApplicationUntilProxyStarts +Homebrew hostname hostnames +Hotspot hotspots +hset +HVM +HW +HyperLogLog hyperloglog +HyperLogLog. +HyperLogLogs +Hyperloglogs +hyperloglogs i8 iamonds +IANA +Identinal idletime idx idx'-th +incr +incrby +incrby_get_mget indexable ing init @@ -587,46 +438,99 @@ internals-vm intsets invalidations iojob +IOPs iostat +IoT ip ip:port +IPC +IPs +IPv4 +IPv6 +IS-MASTER-DOWN-BY-ADDR +Istio +italy_racers +Itamar iterable +iteratively +ition +Janowski +Javadocs +JDK +Jedis +jedis +JedisCluster +jedisClusterNodes +JedisPool +JedisPooled +Jemalloc jemalloc +JKS jpeg +js +JSON +JSON-encoded +JUSTID kB keepalive -keyN keylen +keyN keyname keynum keynumidx keyrings +KEYSPACE +Keyspace keyspace keyspace-notifications +KeyspaceNotification keyspec keystep +keytool +Kleppman's +Kleppmann knockknock kqueue +L3 last-failover-wins -lastVoteEpoch lastkey +lastVoteEpoch late-defrag latencies latencystats launchd lazyfree-lazy-user-flush +LDB ldb leaderboard +Leaderboards +leaderboards +Leau +Lehmann len lenptr +Levelgraph lexicographically +LF +LFU +LHF libc +LibLZF libssl-dev +licensor +licensor's linenoise linkTitle +Linode little-endian +Liveness liveness +llen +LLOOGG +lmove_lrange +lmove_lrange ln +LoadX509KeyPair localhost log2 logfile @@ -634,105 +538,216 @@ logics loglevel lookups loopback +lpop +lpop_rpop lpush +lpush_rpush +lrange +LRU lru_cache +LRU. +LRU's lsb_release +lsb-release +ltrim +ltrim_end_of_list +LUA +Lua lua-api +lua-debugging lua-replicate-commands +Lua-to-Redis +Lua's lubs +Lucraft +M1 +MacBook macOS macroscopically malloc +MASTERDOWN +Matteo matteocollina +maxlen +MAXLEN +Maxmemory maxmemory +Memcache memcached memset memtest86 memtier_benchmark +MERCHANTABILITY +MessagePack metatag +mget middleware +MINID miranda misconfiguration misconfigured -moduleType +MKSTREAM modules-api-ref +moduleType +Movablekeys movablekeys +Mrkris +mset +multisets mutex mylist mymaster +mystream myuser myzset namespace namespacing +NaN +NAS natively +NATted +Nehalem netcat newjobs +NFS +NIC +NICs nils no-appendfsync-on-rewrite +node-redis node-redlock +NodeJS noeviction +non-reachability non-TCP non-TLS +non-loopback non-reachability non-virtualized +nonprintable +NOOP +Noordhuis nopass +Norem +NoSQL notify-keyspace-events notifyKeyspaceEvent +NRedisStack +NTP +NullArray +nullarray num-items +NUMA numactl numkeys +NX +nx observability +ODOWN +octothorpe odown +Ok ok oldval oneof onwards +OOM +OpenBSD +OpenSSL +openssl +Opteron optionals +OR-ing +ORed +ORM +OSGEO:41001 overcommit p50 p999 +Packagist pades pageview +Parameterization parameterization parametrize params parsable +PEL +PELs +PEM perf perf_events performance-on-cpu +PFAIL php-redis-lock +PHPRedisMutex +PID pid pidfile +Pieter pipelined +Pipelining pipelining +pkcs12 +PMCs pmessage +PMU +Pool2 +POP3 +POSIX +POV ppa:redislabs -pratical pre-conditions +pre-configured pre-existing pre-imported pre-loaded pre-populated pre-sharding +Predis prepend +Prepend preprocessing prerequesits +Prev prev +Prickett printf printf-alike +Prioglio privdata +PRNG probabilistically proc +Programm +Programmability programmability programmatically programmatically-generated pseudorandom +PSR-4 +Pubsub pubsub +PubSub-related +Pubsub. +PV +Pydantic qsort queueing +quickstarts +R1 +R2 +race:france +race:italy +race:usa radix rc +RC1 +RC3 +RC4 +RDB rdb-preamble +RDB-saving rdd rdd-1 rdd-2 @@ -747,26 +762,58 @@ realtime reauthenticate rebalance rebalancing +Rebranding reconfigurations reconfigures +Reconfiguring reconfiguring reconnection reconnections +Reddit's +Redimension redirections +REdis redis redis-benchmark redis-check-aof redis-cli +redis-clients redis-doc redis-hashes redis-lua redis-macOS-demo +redis-om-python +redis-om-python. +redis-py +Redis-rb redis-rb-cluster redis-server redis-stable +Redis-to-Lua +RedisCallReply +RedisConf +RediSearch +Redises +RedisHost. +RedisInsight +RedisJSON +REDISMODULE_OPTIONS_HANDLE_REPL_ASYNC_LOAD +RedisModule.* redisObjectVM +REDISPORT +Redisson +Redistributions +Redlock +Redlock-cpp +Redlock-cs +Redlock-php +Redlock-py +Redlock-rb +Redlock4Net +Redsync registerAPI reimplements +REPL repl-diskless-load repo representable @@ -778,54 +825,129 @@ rescanned reseek resends resetpass +Reshard reshard resharded +Resharding resharding reshardings +RESP2 +RESP2. +RESP3 +RESP3-typed +RESP3. +RESP3's +Resque resque resync resynchronization resynchronizations resyncs +RetainString retcode returing +Retwis +Retwis-J +Retwis-RB +RHEL +RM_.* +RM_CreateCommand +RM_CreateStringFromString +RM_IsKeysPositionRequest +RM_KeyAtPosWithFlags +RM_SetCommandInfo roadmap robj +Roshi roundtrips +RPC rpc-perf rpop +rpush +Rslock +RSS rss rtckit +RTT +RU101 +RU102 +RU202 runid runlevels +RW +Rx/Tx +S1 +S2 +S3 +S4 +SaaS +sadd +sadd_smembers +Sam-Bodden +Sandboxed sandboxed +Sanfilippo +Sanfilippo's scalable +scard +ScarletLock +SCP +sdiff +SDOWN sdown sds se seeked +Selectable semantical serverCron +setnx_xx +SHA-256 +SHA1 +SHA256 +Sharded sharded sharding +Shuttleworth si sidekiq +SIGBUS +SIGFPE +SIGILL +SIGINT signle +SIGSEGV +SIGTERM +sinter +sismember slave-reconf-done slave-reconf-inprog slave-reconf-sent +Slicehost slot1 slowlog smaps +SmartOS +smismember +Snapchat +Snapcraft snapd +Snapshotting snapshotting +Solaris-derived somekey -specfic +SomeOtherValue +Sonatype +SoundCloud spectrogram spellchecker-cli spiped spo sponsorships +srem +SSD +SSL +StackOverflow start-slot1 start-slot2 start-slotN @@ -833,13 +955,19 @@ startfrom status1 status2 stdin +storepass strace +stream_toturial +StringDMA struct -struct's struct-encoded +struct's +structs +stunnel subcommand -subcommand's subcommand. +subcommand's +Subcommands subcommands subevent subevents @@ -847,25 +975,45 @@ suboptimal subsequence substring sudo +SVGs +superset swapdb swappability +SYNC_RDB_START syncd syscall systemctl +T1 +T2 taskset +TCL tcmalloc +TCP +tcp +the-redis-keyspace +TLS +TLS-enabled tls-port tmp tmux toolkits topologies +tonumber tradeoff tradeoffs transactional try-failover +Tthe +TTL +TTLs tty tunable +Twemproxy typemethods_ptr +UI +ULID +ULIds +ULIDs un-authenticated un-gated unclaimable @@ -877,39 +1025,122 @@ unix unlink unlinked unlinks +Unmodifiable unmodifiable unpause unreachability +Unregister unregister unregisters +Untrusted untrusted untuned +Unwatches unwatches +uppercased urandom +URI +USD used_memory_scripts_eval +userSession usr +UTF-8 utf8 utils +v9 value-ptr +ValueN +Variadic variadic +venv virginia +Virtualized virtualized +Vladev +VM vm vm-max-memory -vmSwapOneObject +VMs vmstat +vmSwapOneObject +VMware volatile-lru volatile-ttl +VPS vtype +WAITAOF +Westmere wget wherefrom whitespace whitespaces whos-using-redis +WRONGTYPE +WSL +WSL2 +xack +XACK +xadd +XADD +xadd_2 +xadd_7 +xadd_bad_id +xadd_id +xautoclaim +XAUTOCLAIM +xautoclaim_cursor +xclaim +XCLAIM +xdel +XDEL +Xen +Xen-specific +Xeon xff +XGROUP +xgroup_create +xgroup_create_mkstream +xgroup_read +xgroup_read_bob +xinfo +XINFO +xinfo_consumers +xinfo_groups +xlen +XLEN +XMODEM +xpending +XPENDING +xpending_plus_minus +xrange +XRANGE +xrange_all +xrange_empty +xrange_pending +xrange_step_1 +xrange_step_2 +xrange_time +xread +XREAD +XREADGROUP +xgroup_read_id +xread_block +xrevrange +XREVRANGE +XSCAN +xtrim +XTRIM +xtrim2 +XYZ xzvf +YCSB +Yossi +Z1 +ZeroBrane zeroed-ACLs +Zhao ziplists +ZMODEM +ZPOP +ZSET zset -ˈrɛd-ɪs -enums