From 98438f66a3b71bb208b5455fb2bb3105bd236695 Mon Sep 17 00:00:00 2001 From: Sandeep Shetty Date: Tue, 16 Apr 2013 17:37:09 +0530 Subject: [PATCH 0001/2314] Added phpish/redis --- clients.json | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/clients.json b/clients.json index 327a74c93c..33b7e4da66 100644 --- a/clients.json +++ b/clients.json @@ -362,7 +362,15 @@ "description": "Lightweight, standalone, unit-tested fork of Redisent which wraps phpredis for best performance if available.", "authors": ["colinmollenhour"] }, - + + { + "name": "phpish/redis", + "language": "PHP", + "repository": "https://github.com/phpish/redis", + "description": "Simple Redis client in PHP", + "authors": ["sandeepshetty"] + }, + { "name": "redis-py", "language": "Python", From 14cc15a04a74c63864b1c96fc83c8936cb4dc04d Mon Sep 17 00:00:00 2001 From: BB Date: Sat, 18 May 2013 08:43:46 +0200 Subject: [PATCH 0002/2314] Added Redis client for Rebol. --- clients.json | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/clients.json b/clients.json index 327a74c93c..c0caf92da5 100644 --- a/clients.json +++ b/clients.json @@ -399,6 +399,14 @@ "active": true }, + { + "name": "prot-redis", + "language": "Rebol", + "repository": "https://github.com/rebolek/prot-redis", + "description": "Redis network scheme for Rebol 3", + "authors": ["rebolek"] + }, + { "name": "scala-redis", "language": "Scala", From 9db568250965da21e47258e1292757414c7f556b Mon Sep 17 00:00:00 2001 From: Tianon Gravi Date: Thu, 4 Jul 2013 14:56:29 -0600 Subject: [PATCH 0003/2314] Swapped MojoX::Redis for Mojo::Redis MojoX::Redis is deprecated in favor of Mojo::Redis --- clients.json | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/clients.json b/clients.json index 327a74c93c..53b6642e31 100644 --- a/clients.json +++ b/clients.json @@ -291,12 +291,12 @@ }, { - "name": "MojoX::Redis", + "name": "Mojo::Redis", "language": "Perl", - "url": "http://search.cpan.org/dist/MojoX-Redis", - "repository": "https://github.com/und3f/mojox-redis", + "url": "http://search.cpan.org/dist/Mojo-Redis", + "repository": "https://github.com/marcusramberg/mojo-redis", "description": "asynchronous Redis client for Mojolicious", - "authors": ["und3f"], + "authors": ["und3f", "marcusramberg", "jhthorsen"], "active": true }, From 7a87240ed0e105906d7005874df0e9142f2aafb2 Mon Sep 17 00:00:00 2001 From: Philipp Klose Date: Fri, 26 Jul 2013 02:16:15 +0200 Subject: [PATCH 0004/2314] Haxe was renamed Haxe was renamed. From "haXe" to "Haxe". --- clients.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clients.json b/clients.json index 327a74c93c..1a7d9e6d33 100644 --- a/clients.json +++ b/clients.json @@ -489,7 +489,7 @@ { "name": "hxneko-redis", - "language": "haXe", + "language": "Haxe", "url": "http://code.google.com/p/hxneko-redis", "repository": "http://code.google.com/p/hxneko-redis/source/browse", "description": "", From 4424e5354cedd12057d33a809556396ac7bc643b Mon Sep 17 00:00:00 2001 From: Matteo Centenaro Date: Fri, 23 Aug 2013 18:17:40 +0200 Subject: [PATCH 0005/2314] The redhatvm cited article have a known bug The "Understanding Virtual Memory" article cited when motivating the setting for overcommit_memory had the meaning of the values 1 and 2 reversed. I found it while reading this comment http://superuser.com/a/200504. With this commit, I'm trying to make this known to the Redis FAQ reader. The proc(5) man page has it pretty clear: /proc/sys/vm/overcommit_memory This file contains the kernel virtual memory accounting mode. Values are: 0: heuristic overcommit (this is the default) 1: always overcommit, never check 2: always check, never overcommit In mode 0, calls of mmap(2) with MAP_NORESERVE are not checked, and the default check is very weak, leading to the risk of getting a process "OOM-killed". Under Linux 2.4 any nonzero value implies mode 1. In mode 2 (available since Linux 2.6), the total virtual address space on the system is limited to (SS + RAM*(r/100)), where SS is the size of the swap space, and RAM is the size of the physical memory, and r is the contents of the file /proc/sys/vm/overcommit_ratio. --- topics/faq.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/topics/faq.md b/topics/faq.md index dfb42a0665..154c1cbf16 100644 --- a/topics/faq.md +++ b/topics/faq.md @@ -252,8 +252,12 @@ more optimistic allocation fashion, and this is indeed what you want for Redis. A good source to understand how Linux Virtual Memory work and other alternatives for `overcommit_memory` and `overcommit_ratio` is this classic from Red Hat Magazine, ["Understanding Virtual Memory"][redhatvm]. +Beware, this article had 1 and 2 configurtation value for `overcommit_memory` +reversed: reffer to the ["proc(5)"][proc5] man page for the right meaning of the +available values. [redhatvm]: http://www.redhat.com/magazine/001nov04/features/vm/ +[proc5]: http://man7.org/linux/man-pages/man5/proc.5.html ## Are Redis on disk snapshots atomic? From a7a6c8751c6a798713a2d4e35c07fc1141c2c642 Mon Sep 17 00:00:00 2001 From: Matteo Centenaro Date: Fri, 23 Aug 2013 18:23:26 +0200 Subject: [PATCH 0006/2314] Remove " araund proc(5) --- topics/faq.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/faq.md b/topics/faq.md index 154c1cbf16..5ea626080a 100644 --- a/topics/faq.md +++ b/topics/faq.md @@ -253,7 +253,7 @@ A good source to understand how Linux Virtual Memory work and other alternatives for `overcommit_memory` and `overcommit_ratio` is this classic from Red Hat Magazine, ["Understanding Virtual Memory"][redhatvm]. Beware, this article had 1 and 2 configurtation value for `overcommit_memory` -reversed: reffer to the ["proc(5)"][proc5] man page for the right meaning of the +reversed: reffer to the [proc(5)][proc5] man page for the right meaning of the available values. [redhatvm]: http://www.redhat.com/magazine/001nov04/features/vm/ From 0c9c73adbf7d10d3d6768df122dfdb5fe3f86bb3 Mon Sep 17 00:00:00 2001 From: Matteo Centenaro Date: Fri, 23 Aug 2013 18:25:19 +0200 Subject: [PATCH 0007/2314] FIX: typos here and there - configurtation -> configuration - reffer -> refer --- topics/faq.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/topics/faq.md b/topics/faq.md index 5ea626080a..c7294947ec 100644 --- a/topics/faq.md +++ b/topics/faq.md @@ -252,8 +252,8 @@ more optimistic allocation fashion, and this is indeed what you want for Redis. A good source to understand how Linux Virtual Memory work and other alternatives for `overcommit_memory` and `overcommit_ratio` is this classic from Red Hat Magazine, ["Understanding Virtual Memory"][redhatvm]. -Beware, this article had 1 and 2 configurtation value for `overcommit_memory` -reversed: reffer to the [proc(5)][proc5] man page for the right meaning of the +Beware, this article had 1 and 2 configuration value for `overcommit_memory` +reversed: refer to the [proc(5)][proc5] man page for the right meaning of the available values. [redhatvm]: http://www.redhat.com/magazine/001nov04/features/vm/ From 9ebac39d49f576a4e14ff9a4ffe642fb48aa68ba Mon Sep 17 00:00:00 2001 From: Matteo Centenaro Date: Fri, 23 Aug 2013 18:28:43 +0200 Subject: [PATCH 0008/2314] Format option values as code --- topics/faq.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/faq.md b/topics/faq.md index c7294947ec..9e53d4bde0 100644 --- a/topics/faq.md +++ b/topics/faq.md @@ -252,7 +252,7 @@ more optimistic allocation fashion, and this is indeed what you want for Redis. A good source to understand how Linux Virtual Memory work and other alternatives for `overcommit_memory` and `overcommit_ratio` is this classic from Red Hat Magazine, ["Understanding Virtual Memory"][redhatvm]. -Beware, this article had 1 and 2 configuration value for `overcommit_memory` +Beware, this article had `1` and `2` configuration values for `overcommit_memory` reversed: refer to the [proc(5)][proc5] man page for the right meaning of the available values. From f10af03633762961af88cecafd8e99b77e502270 Mon Sep 17 00:00:00 2001 From: Alexandre Curreli Date: Thu, 10 Oct 2013 11:41:30 -0400 Subject: [PATCH 0009/2314] Added scredis to Scala clients --- clients.json | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/clients.json b/clients.json index 327a74c93c..1c6ae47be7 100644 --- a/clients.json +++ b/clients.json @@ -669,5 +669,13 @@ "repository": "https://github.com/ctstone/csredis", "description": "Async (and sync) client for Redis and Sentinel", "authors": ["ctnstone"] + }, + + { + "name": "scredis", + "language": "Scala", + "repository": "https://github.com/Livestream/scredis", + "description": "Advanced async (and sync) client entirely written in Scala. Extensively used in production at http://www.livestream.com", + "authors": ["Livestream"] } ] From 1344563dcb4c539112da1c933978fc1c5ef9dcf4 Mon Sep 17 00:00:00 2001 From: Ruben Kerkhof Date: Mon, 24 Mar 2014 19:02:27 +0000 Subject: [PATCH 0010/2314] Fix a few typos --- topics/benchmarks.md | 4 ++-- topics/clients.md | 2 +- topics/cluster-tutorial.md | 4 ++-- topics/internals-vm.md | 2 +- topics/latency.md | 6 +++--- topics/persistence.md | 2 +- topics/releases.md | 2 +- topics/sentinel-clients.md | 2 +- topics/sentinel-spec.md | 4 ++-- topics/sentinel.md | 2 +- 10 files changed, 15 insertions(+), 15 deletions(-) diff --git a/topics/benchmarks.md b/topics/benchmarks.md index b645f4ea28..c2c5383b28 100644 --- a/topics/benchmarks.md +++ b/topics/benchmarks.md @@ -96,7 +96,7 @@ Using pipelining By default every client (the benchmark simulates 50 clients if not otherwise specified with `-c`) sends the next command only when the reply of the previous command is received, this means that the server will likely need a read call -in order to read each command from every client. Also RTT is payed as well. +in order to read each command from every client. Also RTT is paid as well. Redis supports [/topics/pipelining](pipelining), so it is possible to send multiple commands at once, a feature often exploited by real world applications. @@ -245,7 +245,7 @@ See the graph below. ![Data size impact](https://github.com/dspezia/redis-doc/raw/client_command/topics/Data_size.png) -+ On multi CPU sockets servers, Redis performance becomes dependant on the ++ On multi CPU sockets servers, Redis performance becomes dependent on the NUMA configuration and process location. The most visible effect is that redis-benchmark results seem non deterministic because client and server processes are distributed randomly on the cores. To get deterministic results, diff --git a/topics/clients.md b/topics/clients.md index eae7c189ec..99ae876de7 100644 --- a/topics/clients.md +++ b/topics/clients.md @@ -82,7 +82,7 @@ Output buffers limits --- Redis needs to handle a variable-length output buffer for every client, since -a command can produce a big amount of data that needs to be transfered to the +a command can produce a big amount of data that needs to be transferred to the client. However it is possible that a client sends more commands producing more output diff --git a/topics/cluster-tutorial.md b/topics/cluster-tutorial.md index d920b6eac4..4e99f5d111 100644 --- a/topics/cluster-tutorial.md +++ b/topics/cluster-tutorial.md @@ -294,7 +294,7 @@ changed the cluster layout by adding or removing nodes. Writing an example app with redis-rb-cluster --- -Before goign forward showing how to operate the Redis Cluster, doing things +Before going forward showing how to operate the Redis Cluster, doing things like a failover, or a resharding, we need to create some example application or at least to be able to understand the semantics of a simple Redis Cluster client interaction. @@ -726,7 +726,7 @@ use redis-trib again, but with the --slave option, like this: ./redis-trib.rb add-node --slave 127.0.0.1:7006 127.0.0.1:7000 Note that the command line here is exactly like the one we used to add -a new master, so we are not specifiying to which master we want to add +a new master, so we are not specifying to which master we want to add the replica. In this case what happens is that redis-trib will add the new node as replica of a random master among the masters with less replicas. diff --git a/topics/internals-vm.md b/topics/internals-vm.md index f4a157fc40..e689bccf39 100644 --- a/topics/internals-vm.md +++ b/topics/internals-vm.md @@ -189,7 +189,7 @@ Threaded VM There are basically three main ways to turn the blocking VM into a non blocking one. * 1: One way is obvious, and in my opionion, not a good idea at all, that is, turning Redis itself into a theaded server: if every request is served by a different thread automatically other clients don't need to wait for blocked ones. Redis is fast, exports atomic operations, has no locks, and is just 10k lines of code, *because* it is single threaded, so this was not an option for me. -* 2: Using non-blocking I/O against the swap file. After all you can think Redis already event-loop based, why don't just handle disk I/O in a non-blocking fashion? I also discarded this possiblity because of two main reasons. One is that non blocking file operations, unlike sockets, are an incompatibility nightmare. It's not just like calling select, you need to use OS-specific things. The other problem is that the I/O is just one part of the time consumed to handle VM, another big part is the CPU used in order to encode/decode data to/from the swap file. This is I picked option three, that is... +* 2: Using non-blocking I/O against the swap file. After all you can think Redis already event-loop based, why don't just handle disk I/O in a non-blocking fashion? I also discarded this possibility because of two main reasons. One is that non blocking file operations, unlike sockets, are an incompatibility nightmare. It's not just like calling select, you need to use OS-specific things. The other problem is that the I/O is just one part of the time consumed to handle VM, another big part is the CPU used in order to encode/decode data to/from the swap file. This is I picked option three, that is... * 3: Using I/O threads, that is, a pool of threads handling the swap I/O operations. This is what the Redis VM is using, so let's detail how this works. I/O Threads diff --git a/topics/latency.md b/topics/latency.md index 1ffa7770c5..f7f3f3da6d 100644 --- a/topics/latency.md +++ b/topics/latency.md @@ -124,7 +124,7 @@ serves all the client requests, using a technique called **multiplexing**. This means that Redis can serve a single request in every given moment, so all the requests are served sequentially. This is very similar to how Node.js works as well. However, both products are often not perceived as being slow. -This is caused in part by the small about of time to complete a single request, +This is caused in part by the small amount of time to complete a single request, but primarily because these products are designed to not block on system calls, such as reading data from or writing data to a socket. @@ -192,7 +192,7 @@ Fork time in different systems Modern hardware is pretty fast to copy the page table, but Xen is not. The problem with Xen is not virtualization-specific, but Xen-specific. For instance -using VMware or Virutal Box does not result into slow fork time. +using VMware or Virtual Box does not result into slow fork time. The following is a table that compares fork time for different Redis instance size. Data is obtained performing a BGSAVE and looking at the `latest_fork_usec` filed in the `INFO` command output. @@ -511,7 +511,7 @@ designed to track those latency problems that for one reason or the other esacped an analysis using normal tools. The software watchdog is an experimental feature. While it is designed to -be used in production enviroments care should be taken to backup the database +be used in production environments care should be taken to backup the database before proceeding as it could possibly have unexpected interactions with the normal execution of the Redis server. diff --git a/topics/persistence.md b/topics/persistence.md index 42cd83ee55..26ee650f65 100644 --- a/topics/persistence.md +++ b/topics/persistence.md @@ -17,7 +17,7 @@ RDB advantages --- * RDB is a very compact single-file point-in-time representation of your Redis data. RDB files are perfect for backups. For instance you may want to archive your RDB files every hour for the latest 24 hours, and to save an RDB snapshot every day for 30 days. This allows you to easily restore different versions of the data set in case of disasters. -* RDB is very good for disaster recovery, being a single compact file can be transfered to far data centers, or on Amazon S3 (possibly encrypted). +* RDB is very good for disaster recovery, being a single compact file can be transferred to far data centers, or on Amazon S3 (possibly encrypted). * RDB maximizes Redis performances since the only work the Redis parent process needs to do in order to persist is forking a child that will do all the rest. The parent instance will never perform disk I/O or alike. * RDB allows faster restarts with big datasets compared to AOF. diff --git a/topics/releases.md b/topics/releases.md index 25c1d9535b..dabd8f0e64 100644 --- a/topics/releases.md +++ b/topics/releases.md @@ -43,7 +43,7 @@ was forked into the `2.8` branch. This new branch can be at three different levels of stability: development, frozen, and release candidate. -* Development: new features and bug fixes are commited into the branch, but not everything going into `unstable` is merged here. Only the features that can become stable in a reasonable timeframe are merged. +* Development: new features and bug fixes are committed into the branch, but not everything going into `unstable` is merged here. Only the features that can become stable in a reasonable timeframe are merged. * Frozen: no new feature is added, unless it is almost guaranteed to have zero stability impacts on the source code, and at the same time for some reason it is a very important feature that must be shipped ASAP. Big code changes are only allowed when they are needed in order to fix bugs. * Release Candidate: only fixes are committed against this release. diff --git a/topics/sentinel-clients.md b/topics/sentinel-clients.md index a0cb8a885c..64b2beaf85 100644 --- a/topics/sentinel-clients.md +++ b/topics/sentinel-clients.md @@ -4,7 +4,7 @@ Guidelines for Redis clients with support for Redis Sentinel === Redis Sentinel is a monitoring solution for Redis instances that handles different aspects of monitoring, including notification of events, automatic failover. -Sentinel can also play the role of configuration source for Redis clients. This document is targetted at Redis clients developers that want to support Sentinel in their clients implementation with the following goals: +Sentinel can also play the role of configuration source for Redis clients. This document is targeted at Redis clients developers that want to support Sentinel in their clients implementation with the following goals: * Automatic configuration of clients via Sentinel. * Improved reliability of Redis Sentinel automatic fail over, because of Sentinel-aware clients that will automatically reconnect to the new master. diff --git a/topics/sentinel-spec.md b/topics/sentinel-spec.md index a1d33efb7f..ee5bcd13f5 100644 --- a/topics/sentinel-spec.md +++ b/topics/sentinel-spec.md @@ -20,7 +20,7 @@ a way to perform automatic fail over when a master instance is not functioning correctly. The plan is to provide an usable beta implementaiton of Redis Sentinel in a -short time, preferrably in mid July 2012. +short time, preferably in mid July 2012. In short this is what Redis Sentinel will be able to do: @@ -215,7 +215,7 @@ decided. Also Sentinels can be configured in two ways: only as monitors that can't perform the fail over, or as Sentinels that can start the failover. -What is desireable is that only a Sentinel will start the failover process, +What is desirable is that only a Sentinel will start the failover process, and this Sentinel should be selected among the Sentinels that are allowed to perform the failover. diff --git a/topics/sentinel.md b/topics/sentinel.md index 51bf73ece1..8757d2b27d 100644 --- a/topics/sentinel.md +++ b/topics/sentinel.md @@ -32,7 +32,7 @@ Obtaining Sentinel Sentinel is currently developed in the *unstable* branch of the Redis source code at Github. However an update copy of Sentinel is provided with every patch release of Redis 2.8. -The simplest way to use Sentinel is to download the latest verison of Redis 2.8 or to compile Redis latest commit in the *unstable* branch at Github. +The simplest way to use Sentinel is to download the latest version of Redis 2.8 or to compile Redis latest commit in the *unstable* branch at Github. IMPORTANT: **Even if you are using Redis 2.6, you should use Sentinel shipped with Redis 2.8**. From 421c93a42f56461c79f343704627f0e56aeac6c7 Mon Sep 17 00:00:00 2001 From: Ruben Kerkhof Date: Mon, 24 Mar 2014 19:05:47 +0000 Subject: [PATCH 0011/2314] Fix one more typo --- topics/internals-vm.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/internals-vm.md b/topics/internals-vm.md index e689bccf39..110b5733d3 100644 --- a/topics/internals-vm.md +++ b/topics/internals-vm.md @@ -188,7 +188,7 @@ Threaded VM --- There are basically three main ways to turn the blocking VM into a non blocking one. -* 1: One way is obvious, and in my opionion, not a good idea at all, that is, turning Redis itself into a theaded server: if every request is served by a different thread automatically other clients don't need to wait for blocked ones. Redis is fast, exports atomic operations, has no locks, and is just 10k lines of code, *because* it is single threaded, so this was not an option for me. +* 1: One way is obvious, and in my opinion, not a good idea at all, that is, turning Redis itself into a theaded server: if every request is served by a different thread automatically other clients don't need to wait for blocked ones. Redis is fast, exports atomic operations, has no locks, and is just 10k lines of code, *because* it is single threaded, so this was not an option for me. * 2: Using non-blocking I/O against the swap file. After all you can think Redis already event-loop based, why don't just handle disk I/O in a non-blocking fashion? I also discarded this possibility because of two main reasons. One is that non blocking file operations, unlike sockets, are an incompatibility nightmare. It's not just like calling select, you need to use OS-specific things. The other problem is that the I/O is just one part of the time consumed to handle VM, another big part is the CPU used in order to encode/decode data to/from the swap file. This is I picked option three, that is... * 3: Using I/O threads, that is, a pool of threads handling the swap I/O operations. This is what the Redis VM is using, so let's detail how this works. From 269afe15ebf9d50405c55ce1ac06d877b5a58495 Mon Sep 17 00:00:00 2001 From: Paul Espinosa Date: Mon, 24 Mar 2014 21:50:24 -0700 Subject: [PATCH 0012/2314] Fixed some typos in the BITPOS command's documentation --- commands/bitpos.md | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/commands/bitpos.md b/commands/bitpos.md index c7cf1c1344..715fc1f726 100644 --- a/commands/bitpos.md +++ b/commands/bitpos.md @@ -1,13 +1,13 @@ Return the position of the first bit set to 1 or 0 in a string. -The position is returned thinking at the string as an array of bits from left to -right where the first byte most significant bit is at position 0, the second -byte most significant big is at position 8 and so forth. +The position is returned, thinking of the string as an array of bits from left to +right, where the first byte's most significant bit is at position 0, the second +byte's most significant bit is at position 8, and so forth. The same bit position convention is followed by `GETBIT` and `SETBIT`. -By default all the bytes contained in the string are examined. -It is possible to look for bits only in a specified interval passing the additional arguments _start_ and _end_ (it is possible to just pass _start_, the operation will assume that the end if the last byte of the string. However there are semantical differences as explained later). The range is interpreted as a range of bytes and not a range of bits, so `start=0` and `end=2` means to look at the first three bytes. +By default, all the bytes contained in the string are examined. +It is possible to look for bits only in a specified interval passing the additional arguments _start_ and _end_ (it is possible to just pass _start_, the operation will assume that the end is the last byte of the string. However there are semantical differences as explained later). The range is interpreted as a range of bytes and not a range of bits, so `start=0` and `end=2` means to look at the first three bytes. Note that bit positions are returned always as absolute values starting from bit zero even when _start_ and _end_ are used to specify a range. @@ -25,11 +25,11 @@ The command returns the position of the first bit set to 1 or 0 according to the If we look for set bits (the bit argument is 1) and the string is empty or composed of just zero bytes, -1 is returned. -If we look for clear bits (the bit argument is 0) and the string only contains bit set to 1, the function returns the first bit not part of the string on the right. So if the string is tree bytes set to the value 0xff the command `BITPOS key 0` will return 24, since up to bit 23 all the bits are 1. +If we look for clear bits (the bit argument is 0) and the string only contains bit set to 1, the function returns the first bit not part of the string on the right. So if the string is three bytes set to the value 0xff the command `BITPOS key 0` will return 24, since up to bit 23 all the bits are 1. -Basically the function consider the right of the string as padded with zeros if you look for clear bits and specify no range or the _start_ argument **only**. +Basically, the function considers the right of the string as padded with zeros if you look for clear bits and specify no range or the _start_ argument **only**. -However this behavior changes if you are looking for clear bits and specify a range with both __start__ and __end__. If no clear bit is found in the specified range, the function returns -1 as the user specified a clear range and there are no 0 bits in that range. +However, this behavior changes if you are looking for clear bits and specify a range with both __start__ and __end__. If no clear bit is found in the specified range, the function returns -1 as the user specified a clear range and there are no 0 bits in that range. @examples From a64168e4a5d8081291cef99a1ec45367f23525b4 Mon Sep 17 00:00:00 2001 From: Mani Tadayon Date: Tue, 1 Apr 2014 01:49:19 -0700 Subject: [PATCH 0013/2314] Fix typos and clarify grammar in latency docs --- topics/latency.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/latency.md b/topics/latency.md index 1ffa7770c5..6852b0eca0 100644 --- a/topics/latency.md +++ b/topics/latency.md @@ -497,7 +497,7 @@ Given that `REDIS_EXPIRELOOKUPS_PER_CRON` is set to 10 by default, and the proce However the algorithm is adaptive and will loop if it founds more than 25% of keys already expired in the set of sampled keys. But given that we run the algorithm ten times per second, this means that the unlucky event of more than 25% of the keys in our random sample are expiring at least *in the same second*. -Basically this means that **if the database contains has many many keys expiring in the same second, and this keys are at least 25% of the current population of keys with an expire set**, Redis can block in order to reach back a percentage of keys already expired that is less than 25%. +Basically this means that **if the database has many many keys expiring in the same second, and these make up at least 25% of the current population of keys with an expire set**, Redis can block in order to get the percentage of keys already expired below 25%. This approach is needed in order to avoid using too much memory for keys that area already expired, and usually is absolutely harmless since it's strange that a big number of keys are going to expire in the same exact second, but it is not impossible that the user used `EXPIREAT` extensively with the same Unix time. From 065b9aa6415d89bee010d05e93fd7b230e2160de Mon Sep 17 00:00:00 2001 From: Rob Blanckaert Date: Wed, 9 Apr 2014 16:32:01 -0400 Subject: [PATCH 0014/2314] Very small typos fix in latency.md --- topics/latency.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/latency.md b/topics/latency.md index 1ffa7770c5..e32422f132 100644 --- a/topics/latency.md +++ b/topics/latency.md @@ -519,7 +519,7 @@ It is important to use it only as *last resort* when there is no way to track th This is how this feature works: -* The user enables the softare watchdog using te `CONFIG SET` command. +* The user enables the softare watchdog using the `CONFIG SET` command. * Redis starts monitoring itself constantly. * If Redis detects that the server is blocked into some operation that is not returning fast enough, and that may be the source of the latency issue, a low level report about where the server is blocked is dumped on the log file. * The user contacts the developers writing a message in the Redis Google Group, including the watchdog report in the message. From e182610c7a55dcb700a7d9ea48615190e4cb9892 Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Thu, 10 Apr 2014 21:36:07 +0200 Subject: [PATCH 0015/2314] Add clarity to "partial synchronization" Fixes #341 --- topics/replication.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/topics/replication.md b/topics/replication.md index 9e21be54a7..a43b23ac69 100644 --- a/topics/replication.md +++ b/topics/replication.md @@ -78,7 +78,8 @@ reconnect and ask the master to continue the replication. Assuming the master run id is still the same, and that the offset specified is available in the replication backlog, replication will resume from the point where it left off. If either of these conditions are unmet, a full resynchronization is performed -(which is the normal pre-2.8 behavior). +(which is the normal pre-2.8 behavior). As the run id of the connected master is not +persisted to disk, a full resynchronization is needed when the slave restarts. The new partial resynchronization feature uses the `PSYNC` command internally, while the old implementation uses the `SYNC` command. Note that a Redis 2.8 From bcc832cceb32a4b7559f3e72f9fe8eaabc7eb0c5 Mon Sep 17 00:00:00 2001 From: Julien Huang Date: Wed, 23 Apr 2014 15:14:09 +0200 Subject: [PATCH 0016/2314] Fix typo for EVAL command --- commands/eval.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands/eval.md b/commands/eval.md index 0fe933bee1..e7460de7f9 100644 --- a/commands/eval.md +++ b/commands/eval.md @@ -284,7 +284,7 @@ that if a script was sent once it is still in memory, so EVALSHA can be used against those scripts in a pipeline without the chance of an error being generated due to an unknown script (we'll see this problem in detail later). -A common patter is to call `SCRIPT LOAD` to load all the scripts that will +A common pattern is to call `SCRIPT LOAD` to load all the scripts that will appear in a pipeline, then use `EVALSHA` directly inside the pipeline without any need to check for errors resulting from the script hash not being recognized. From 734e70dc5f8bb5e7ffa3410c2715ca4441598e79 Mon Sep 17 00:00:00 2001 From: Jonathan del Strother Date: Fri, 25 Apr 2014 15:16:29 +0100 Subject: [PATCH 0017/2314] slave-read-only must be disabled on hot-swapping --- topics/admin.md | 1 + 1 file changed, 1 insertion(+) diff --git a/topics/admin.md b/topics/admin.md index 1baa94adf9..55cc35a093 100644 --- a/topics/admin.md +++ b/topics/admin.md @@ -32,5 +32,6 @@ The following steps provide a very commonly used way in order to avoid any downt * If you use a single server, make sure that the slave is started in a different port than the master instance, otherwise the slave will not be able to start at all. * Wait for the replication initial synchronization to complete (check the slave log file). * Make sure using INFO that there are the same number of keys in the master and in the slave. Check with redis-cli that the slave is working as you wish and is replying to your commands. +* Allow writes to the slave using **CONFIG SET slave-read-only no** * Configure all your clients in order to use the new instance (that is, the slave). * Once you are sure that the master is no longer receiving any query (you can check this with the [MONITOR command](/commands/monitor)), elect the slave to master using the **SLAVEOF NO ONE** command, and shut down your master. From 5d82cc964e81aa6becd3f54e2aa32e34aacf930f Mon Sep 17 00:00:00 2001 From: Christine Spang Date: Fri, 25 Apr 2014 11:59:51 -0700 Subject: [PATCH 0018/2314] s/guys/people/ Sometimes the important people aren't guys. :) --- topics/persistence.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/persistence.md b/topics/persistence.md index 42cd83ee55..f9f41a071f 100644 --- a/topics/persistence.md +++ b/topics/persistence.md @@ -269,7 +269,7 @@ Since many Redis users are in the startup scene and thus don't have plenty of money to spend we'll review the most interesting disaster recovery techniques that don't have too high costs. -* Amazon S3 and other similar services are a good way for mounting your disaster recovery system. Simply transfer your daily or hourly RDB snapshot to S3 in an encrypted form. You can encrypt your data using `gpg -c` (in symmetric encryption mode). Make sure to store your password in many different safe places (for instance give a copy to the most important guys of your organization). It is recommanded to use multiple storage services for improved data safety. +* Amazon S3 and other similar services are a good way for mounting your disaster recovery system. Simply transfer your daily or hourly RDB snapshot to S3 in an encrypted form. You can encrypt your data using `gpg -c` (in symmetric encryption mode). Make sure to store your password in many different safe places (for instance give a copy to the most important people of your organization). It is recommanded to use multiple storage services for improved data safety. * Transfer your snapshots using SCP (part of SSH) to far servers. This is a fairly simple and safe route: get a small VPS in a place that is very far from you, install ssh there, and greate an ssh client key without passphrase, then make add it in the authorized_keys file of your small VPS. You are ready to transfer backups in an automated fashion. Get at least two VPS in two different providers From 0173d72c64a2b7636cb0314f16786ba92056111d Mon Sep 17 00:00:00 2001 From: Marc Gravell Date: Mon, 28 Apr 2014 10:03:39 +0100 Subject: [PATCH 0019/2314] tutorial: list SE.Redis as a cluster client --- topics/cluster-tutorial.md | 1 + 1 file changed, 1 insertion(+) diff --git a/topics/cluster-tutorial.md b/topics/cluster-tutorial.md index d920b6eac4..961a35c91e 100644 --- a/topics/cluster-tutorial.md +++ b/topics/cluster-tutorial.md @@ -261,6 +261,7 @@ I'm aware of the following implementations: * [redis-py-cluster](https://github.com/Grokzen/redis-py-cluster) appears to be a port of redis-rb-cluster to Python. Not recently updated (last commit 6 months ago) however it may be a starting point. * The popular [Predis](https://github.com/nrk/predis) has support for Redis Cluster, the support was recently updated and is in active development. * The most used Java client, [Jedis](https://github.com/xetorthio/jedis) recently added support for Redis Cluster, see the *Jedis Cluster* section in the project README. +* [StackExchange.Redis](https://github.com/StackExchange/StackExchange.Redis) offers support for C# (and should work fine with most .NET languages; VB, F#, etc) * The `redis-cli` utility in the unstable branch of the Redis repository at Github implements a very basic cluster support when started with the `-c` switch. An easy way to test Redis Cluster is either to try and of the above clients From 78ca6be9f5a2f16e1fef5178b75927a06a07810e Mon Sep 17 00:00:00 2001 From: Craig Barnes Date: Wed, 30 Apr 2014 15:23:40 +0100 Subject: [PATCH 0020/2314] Replace non-portable "echo -en" command with "printf" --- topics/pipelining.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/pipelining.md b/topics/pipelining.md index 5be27ac5e6..c35476341e 100644 --- a/topics/pipelining.md +++ b/topics/pipelining.md @@ -36,7 +36,7 @@ This is called pipelining, and is a technique widely in use since many decades. Redis supports pipelining since the very early days, so whatever version you are running, you can use pipelining with Redis. This is an example using the raw netcat utility: - $ (echo -en "PING\r\nPING\r\nPING\r\n"; sleep 1) | nc localhost 6379 + $ (printf "PING\r\nPING\r\nPING\r\n"; sleep 1) | nc localhost 6379 +PONG +PONG +PONG From ae411d22a757df2dc5ae95689486359db46b00be Mon Sep 17 00:00:00 2001 From: Pietro Cerutti Date: Thu, 8 May 2014 12:16:46 +0200 Subject: [PATCH 0021/2314] CONFIG GET return type is an Array, not a Bulk String --- commands/config get.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands/config get.md b/commands/config get.md index 0244c364c1..e527089955 100644 --- a/commands/config get.md +++ b/commands/config get.md @@ -50,4 +50,4 @@ reported by `CONFIG GET` as "900 1 300 10". @return -The return type of the command is a @bulk-string-reply. +The return type of the command is a @array-reply. From 7a10b836d473038bfe4734b68d03a4424cec9d63 Mon Sep 17 00:00:00 2001 From: Roberto Dip Date: Sat, 10 May 2014 20:12:39 -0400 Subject: [PATCH 0022/2314] Quickstart guide: corrected the path of the redis-server and redis-cli files in the install redis instructions --- topics/quickstart.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/topics/quickstart.md b/topics/quickstart.md index c0dc6f80b3..4bfed9ffcd 100644 --- a/topics/quickstart.md +++ b/topics/quickstart.md @@ -37,8 +37,8 @@ At this point you can try if your build works correctly typing **make test**, bu It is a good idea to copy both the Redis server than the command line interface in proper places using the following commands: -* sudo cp redis-server /usr/local/bin/ -* sudo cp redis-cli /usr/local/bin/ +* sudo cp src/redis-server /usr/local/bin/ +* sudo cp src/redis-cli /usr/local/bin/ In the following documentation I assume that /usr/local/bin is in your PATH environment variable so you can execute both the binaries without specifying the full path. From 5806d58cc1bd1229415e04e368f072b177533ec3 Mon Sep 17 00:00:00 2001 From: Dov Murik Date: Thu, 29 May 2014 23:05:51 -0400 Subject: [PATCH 0023/2314] Fix struct example in eval page Removed stray output line from example of struct.pack Lua function. --- commands/eval.md | 1 - 1 file changed, 1 deletion(-) diff --git a/commands/eval.md b/commands/eval.md index fcaffb58b1..b6420ee93a 100644 --- a/commands/eval.md +++ b/commands/eval.md @@ -536,7 +536,6 @@ Example: ``` 127.0.0.1:6379> eval 'return struct.pack("HH", 1, 2)' 0 "\x01\x00\x02\x00" -3) (integer) 5 127.0.0.1:6379> eval 'return {struct.unpack("HH", ARGV[1])}' 0 "\x01\x00\x02\x00" 1) (integer) 1 2) (integer) 2 From 0b343b60634068189bffd12ddd3e95699378e07e Mon Sep 17 00:00:00 2001 From: "Haris Ibrahim K. V." Date: Sun, 15 Jun 2014 10:22:52 +0530 Subject: [PATCH 0024/2314] Update pipelining.md --- topics/pipelining.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/pipelining.md b/topics/pipelining.md index 5be27ac5e6..f2ecd13f80 100644 --- a/topics/pipelining.md +++ b/topics/pipelining.md @@ -25,7 +25,7 @@ This time is called RTT (Round Trip Time). It is very easy to see how this can a If the interface used is a loopback interface, the RTT is much shorter (for instance my host reports 0,044 milliseconds pinging 127.0.0.1), but it is still a lot if you need to perform many writes in a row. -Fortunately there is a way to improve this use cases. +Fortunately there is a way to improve this use case. Redis Pipelining --- From a54dcf34b19770a48b6640428b59f5fcc6390ced Mon Sep 17 00:00:00 2001 From: "Haris Ibrahim K. V." Date: Sun, 15 Jun 2014 10:32:13 +0530 Subject: [PATCH 0025/2314] Improves the documentation for better clarity. --- topics/pipelining.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/topics/pipelining.md b/topics/pipelining.md index f2ecd13f80..843f246641 100644 --- a/topics/pipelining.md +++ b/topics/pipelining.md @@ -54,7 +54,7 @@ To be very explicit, with pipelining the order of operations of our very first e * *Server:* 3 * *Server:* 4 -**IMPORTANT NOTE**: while the client sends commands using pipelining, the server will be forced to queue the replies, using memory. So if you need to send many many commands with pipelining it's better to send this commands up to a given reasonable number, for instance 10k commands, read the replies, and send again other 10k commands and so forth. The speed will be nearly the same, but the additional memory used will be at max the amount needed to queue the replies for this 10k commands. +**IMPORTANT NOTE**: While the client sends commands using pipelining, the server will be forced to queue the replies, using memory. So if you need to send a lot of commands with pipelining, it is better to send them as batches having a reasonable number, for instance 10k commands, read the replies, and then send another 10k commands again, and so forth. The speed will be nearly the same, but the additional memory used will be at max the amount needed to queue the replies for this 10k commands. Some benchmark --- @@ -93,16 +93,16 @@ In the following benchmark we'll use the Redis Ruby client, supporting pipelinin with_pipelining } -Running the above simple script will provide this figures in my Mac OS X system, running over the loopback interface, where pipelining will provide the smallest improvement as the RTT is already pretty low: +Running the above simple script will provide the following figures in my Mac OS X system, running over the loopback interface, where pipelining will provide the smallest improvement as the RTT is already pretty low: without pipelining 1.185238 seconds with pipelining 0.250783 seconds -As you can see using pipelining we improved the transfer by a factor of five. +As you can see, using pipelining, we improved the transfer by a factor of five. Pipelining VS Scripting --- -Using [Redis scripting](/commands/eval) (available in Redis version 2.6 or greater) a number of use cases for pipelining can be addressed more efficiently using scripts that perform a lot of the work needed server side. A big advantage of scripting is that it is able to both read and write data with minimal latency, making operations like *read, compute, write* very fast (pipelining can't help in this scenario since the client needs the reply of the read command before it can call the write command). +Using [Redis scripting](/commands/eval) (available in Redis version 2.6 or greater) a number of use cases for pipelining can be addressed more efficiently using scripts that perform a lot of the work needed at the server side. A big advantage of scripting is that it is able to both read and write data with minimal latency, making operations like *read, compute, write* very fast (pipelining can't help in this scenario since the client needs the reply of the read command before it can call the write command). Sometimes the application may also want to send `EVAL` or `EVALSHA` commands in a pipeline. This is entirely possible and Redis explicitly supports it with the [SCRIPT LOAD](http://redis.io/commands/script-load) command (it guarantees that `EVALSHA` can be called without the risk of failing). From a26e01486ded3426308c9bd622c6918b4644c717 Mon Sep 17 00:00:00 2001 From: baywet Date: Mon, 16 Jun 2014 14:36:49 -0400 Subject: [PATCH 0026/2314] Adding a note a the beginning saying that version 3.0 is required because i spent 3 days trying to make 2.8 work like a cluster --- topics/cluster-tutorial.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/topics/cluster-tutorial.md b/topics/cluster-tutorial.md index d920b6eac4..4bd3ade977 100644 --- a/topics/cluster-tutorial.md +++ b/topics/cluster-tutorial.md @@ -8,6 +8,8 @@ going into the details that are covered in the [Redis Cluster specification](/topics/cluster-spec) but just describing how the system behaves from the point of view of the user. +Note this tutorial requires Redis version 3.0 or higher. + Note that if you plan to run a serious Redis Cluster deployment, the more formal specification is an highly suggested reading. From 0923aba5a438bb7b12ebbd31205960d8b86ebb39 Mon Sep 17 00:00:00 2001 From: ChuntaoLu Date: Fri, 20 Jun 2014 18:38:32 -0400 Subject: [PATCH 0027/2314] Fix typo --- topics/data-types-intro.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/topics/data-types-intro.md b/topics/data-types-intro.md index 04d1fef8fe..0ae55d9f6a 100644 --- a/topics/data-types-intro.md +++ b/topics/data-types-intro.md @@ -684,8 +684,8 @@ the ability to return both repeating and non-repeating elements. Redis Sorted sets --- -Sorted sets are a data type which is similar to a mix between asSet and -an hash. Like sets, sorted sets are composed of unique, non-repeating +Sorted sets are a data type which is similar to a mix between a Set and +a hash. Like sets, sorted sets are composed of unique, non-repeating string elements, so in some sense a sorted set is a set as well. However while elements inside sets are not ordered, every element in From 5840db0f68afcd14233ef23a23196cc976618514 Mon Sep 17 00:00:00 2001 From: Marc Gravell Date: Mon, 23 Jun 2014 17:05:56 +0100 Subject: [PATCH 0028/2314] CLIENT KILL: minor typo --- commands/client kill.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands/client kill.md b/commands/client kill.md index 77702f38ca..ec3ccd94c9 100644 --- a/commands/client kill.md +++ b/commands/client kill.md @@ -17,7 +17,7 @@ instead of killing just by address. The following filters are available: * `CLIENT KILL TYPE type`, where *type* is one of `normal`, `slave`, `pubsub`. This closes the connections of **all the clients** in the specified class. Note that clients blocked into the `MONITOR` command are considered to belong to the `normal` class. * `CLIENT KILL SKIPME yes/no`. By default this option is set to `yes`, that is, the client calling the command will not get killed, however setting this option to `no` will have the effect of also killing the client calling the command. -It is possible to provide multiple filters at the same time. The command will ahdnle multiple filters via logical AND. For example: +It is possible to provide multiple filters at the same time. The command will handle multiple filters via logical AND. For example: CLIENT KILL addr 127.0.0.1:6379 type slave From ad29fdf2e5eae1e09d9accc5823526eff2f89c2c Mon Sep 17 00:00:00 2001 From: Pietro Cerutti Date: Tue, 24 Jun 2014 16:24:47 +0200 Subject: [PATCH 0029/2314] Add Retcl, a Redis client library written in Tcl. --- clients.json | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/clients.json b/clients.json index b0e4ed5839..034c6e9cf5 100644 --- a/clients.json +++ b/clients.json @@ -486,6 +486,14 @@ "authors": ["antirez"] }, + { + "name": "Retcl", + "language": "Tcl", + "repository": "https://github.com/gahr/retcl", + "description": "Retcl is an asynchronous, event-driven Redis client library implemented as a single-file Tcl module." + "authors": ["gahrgahr"] + }, + { "name": "ServiceStack.Redis", "language": "C#", From ad024e631b7e7e83a440bd9febb72defc453758f Mon Sep 17 00:00:00 2001 From: "Tam. Nguyen Duc" Date: Wed, 2 Jul 2014 17:26:00 +0700 Subject: [PATCH 0030/2314] Add gore to client.json Add gore (https://github.com/keimoon/gore) to client list Gore is a full feature Redis client for Go that supports Pipeline, Transaction, LUA scripting, Pubsub, Connection Pool, Sentinel and client sharding --- clients.json | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/clients.json b/clients.json index a0fe5ebbc4..3dc8621a64 100644 --- a/clients.json +++ b/clients.json @@ -156,6 +156,15 @@ "active": true }, + { + "name": "gore", + "language": "Go", + "repository": "https://github.com/keimoon/gore", + "description": "A full feature redis Client for Go. Supports Pipeline, Transaction, LUA scripting, Pubsub, Connection Pool, Sentinel and client sharding", + "authors": ["keimoon"], + "active": true + }, + { "name": "hedis", "language": "Haskell", From 3ba1c41b3701c89627ffa5c681469e62ddf61ef7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lo=C3=AFc=20Descotte?= Date: Fri, 25 Jul 2014 08:35:44 +0200 Subject: [PATCH 0031/2314] Add rediscala, a non-blocking I/O scala driver A non-blocking I/O scala driver is very useful for async frameworks (like Play) --- clients.json | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/clients.json b/clients.json index 48243f11f7..07b2974c85 100644 --- a/clients.json +++ b/clients.json @@ -486,6 +486,15 @@ "authors": ["livestream"], "active": true }, + + { + "name": "rediscala", + "language": "Scala", + "repository": "https://github.com/etaty/rediscala", + "description": "A Redis client for Scala (2.10+) and (AKKA 2.2+) with non-blocking and asynchronous I/O operations.", + "authors": ["etaty"], + "active": true + }, { "name": "Tcl Client", From fc17f2d83eed99b4d16f3e434db42913175776af Mon Sep 17 00:00:00 2001 From: Frank Mueller Date: Mon, 28 Jul 2014 12:07:56 +0200 Subject: [PATCH 0032/2314] Changed repository of Tideland Go client The Tideland Go software moved to GitHub. So changed the repository here. --- clients.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clients.json b/clients.json index 48243f11f7..1afc120bd0 100644 --- a/clients.json +++ b/clients.json @@ -122,7 +122,7 @@ { "name": "Tideland Go Redis Client", "language": "Go", - "repository": "http://git.tideland.biz/godm/redis", + "repository": "https://github.com/tideland/godm", "description": "A flexible Go Redis client able to handle all commands", "authors": ["themue"], "active": true From 5aa082ad96f035c0990f80c0aa2be784ff090823 Mon Sep 17 00:00:00 2001 From: Alexandre Curreli Date: Mon, 28 Jul 2014 16:52:03 +0200 Subject: [PATCH 0033/2314] Updated description of scredis to reflect latest changes --- clients.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clients.json b/clients.json index 48243f11f7..6162d8d9eb 100644 --- a/clients.json +++ b/clients.json @@ -482,7 +482,7 @@ "name": "scredis", "language": "Scala", "repository": "https://github.com/Livestream/scredis", - "description": "Scredis is an advanced Redis client entirely written in Scala. Used in production at http://Livestream.com.", + "description": "Non-blocking, ultra-fast Scala Redis client built on top of Akka IO, used in production at Livestream", "authors": ["livestream"], "active": true }, From cbdcbf9bb0b380b90bdc113be5c5fc973b7f2517 Mon Sep 17 00:00:00 2001 From: Kyle Simpson Date: Sat, 2 Aug 2014 00:07:47 -0500 Subject: [PATCH 0034/2314] Fixing reference of "MULTI" to "EXEC", per antirez/redis-doc#396 Closes https://github.com/antirez/redis-doc/issues/396 --- topics/transactions.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/transactions.md b/topics/transactions.md index a142794406..fc9df00b78 100644 --- a/topics/transactions.md +++ b/topics/transactions.md @@ -53,7 +53,7 @@ The following example increments keys `foo` and `bar` atomically. 1) (integer) 1 2) (integer) 1 -As it is possible to see from the session above, `MULTI` returns an +As it is possible to see from the session above, `EXEC` returns an array of replies, where every element is the reply of a single command in the transaction, in the same order the commands were issued. From 231a8083f7cd462a650d5bb65c93d0a76ee84b8d Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Tue, 5 Aug 2014 08:52:54 +0200 Subject: [PATCH 0035/2314] Documented ZREVRANGEBYLEX Closes #398 --- commands.json | 26 ++++++++++++++++++++++++++ commands/zrevrangebylex.md | 16 ++++++++++++++++ 2 files changed, 42 insertions(+) create mode 100644 commands/zrevrangebylex.md diff --git a/commands.json b/commands.json index a0ec252e77..8b96f1b83c 100644 --- a/commands.json +++ b/commands.json @@ -2213,6 +2213,32 @@ "since": "2.8.9", "group": "sorted_set" }, + "ZREVRANGEBYLEX": { + "summary": "Return a range of members in a sorted set, by lexicographical range, ordered from higher to lower strings.", + "complexity": "O(log(N)+M) with N being the number of elements in the sorted set and M the number of elements being returned. If M is constant (e.g. always asking for the first 10 elements with LIMIT), you can consider it O(log(N)).", + "arguments": [ + { + "name": "key", + "type": "key" + }, + { + "name": "max", + "type": "string" + }, + { + "name": "min", + "type": "string" + }, + { + "command": "LIMIT", + "name": ["offset", "count"], + "type": ["integer", "integer"], + "optional": true + } + ], + "since": "2.9.9", + "group": "sorted_set" + }, "ZRANGEBYSCORE": { "summary": "Return a range of members in a sorted set, by score", "complexity": "O(log(N)+M) with N being the number of elements in the sorted set and M the number of elements being returned. If M is constant (e.g. always asking for the first 10 elements with LIMIT), you can consider it O(log(N)).", diff --git a/commands/zrevrangebylex.md b/commands/zrevrangebylex.md new file mode 100644 index 0000000000..c6772c9128 --- /dev/null +++ b/commands/zrevrangebylex.md @@ -0,0 +1,16 @@ +When all the elements in a sorted set are inserted with the same score, in order to force lexicographical ordering, this command returns all the elements in the sorted set at `key` with a value between `max` and `min`. + +Apart from the reversed ordering, `ZREVRANGEBYLEX` is similar to `ZRANGEBYLEX`. + +@return + +@array-reply: list of elements in the specified score range. + +@examples + +```cli +ZADD myzset 0 a 0 b 0 c 0 d 0 e 0 f 0 g +ZREVRANGEBYLEX myzset [c - +ZREVRANGEBYLEX myzset (c - +ZREVRANGEBYLEX myzset (g [aaa +``` From 1752bd7b9b3089bcda14f3bfda34250370622339 Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Tue, 5 Aug 2014 09:31:15 +0200 Subject: [PATCH 0036/2314] bitcount has optional, but not multiple arguments --- commands.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands.json b/commands.json index a0ec252e77..a6cd168510 100644 --- a/commands.json +++ b/commands.json @@ -47,7 +47,7 @@ { "name": ["start", "end"], "type": ["integer", "integer"], - "multiple": true + "optional": true } ], "since": "2.6.0", From 67ca8fb17a67f13ee7dfeb1b10886d3fce2d0d13 Mon Sep 17 00:00:00 2001 From: Aaron1011 Date: Wed, 6 Aug 2014 15:01:16 -0400 Subject: [PATCH 0037/2314] Fix typo in spelling of 'or' --- commands/pubsub.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands/pubsub.md b/commands/pubsub.md index ae05fa34cb..f766b00b0d 100644 --- a/commands/pubsub.md +++ b/commands/pubsub.md @@ -7,7 +7,7 @@ documented separately. The general form is: # PUBSUB CHANNELS [pattern] Lists the currently *active channels*. An active channel is a Pub/Sub channel -with one ore more subscribers (not including clients subscribed to patterns). +with one or more subscribers (not including clients subscribed to patterns). If no `pattern` is specified, all the channels are listed, otherwise if pattern is specified only channels matching the specified glob-style pattern are From 16f29da06547ba1c74e4ca4760d886256b63f0d6 Mon Sep 17 00:00:00 2001 From: Keegan Lowenstein Date: Thu, 7 Aug 2014 15:26:17 -0700 Subject: [PATCH 0038/2314] Fix typo in sentinel-clients.md --- topics/sentinel-clients.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/sentinel-clients.md b/topics/sentinel-clients.md index 84771c4e01..8c5a2339b3 100644 --- a/topics/sentinel-clients.md +++ b/topics/sentinel-clients.md @@ -74,7 +74,7 @@ If the instance is not a master as expected, the client should wait a short amou Handling reconnections === -Once the service name is resoled into the master address and a connection is established with the Redis master instance, every time a reconnection is needed, the client should resolve again the address using Sentinels restarting from Step 1. For instance Sentinel should contacted again the following cases: +Once the service name is resolved into the master address and a connection is established with the Redis master instance, every time a reconnection is needed, the client should resolve again the address using Sentinels restarting from Step 1. For instance Sentinel should contacted again the following cases: * If the client reconnects after a timeout or socket error. * If the client reconnects because it was explicitly closed or reconnected by the user. From 8bc4242335ca5e56b41f13e22dc278ecf588e87b Mon Sep 17 00:00:00 2001 From: Alex Date: Sat, 9 Aug 2014 13:33:22 +0400 Subject: [PATCH 0039/2314] typos + formatting --- topics/data-types-intro.md | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/topics/data-types-intro.md b/topics/data-types-intro.md index a5e66788f9..567d393061 100644 --- a/topics/data-types-intro.md +++ b/topics/data-types-intro.md @@ -17,7 +17,7 @@ by Redis, which will be covered separately in this tutorial: * Hashes, which are maps composed of fields associated with values. Both the field and the value are strings. This are very similary to Ruby or Python hashes. -* Bit arrays (or simply bitmaps): it is possible, usign special commands, to +* Bit arrays (or simply bitmaps): it is possible, using special commands, to handle String values like array of bits: you can set and clear individual bits, count all the bits set to 1, find the first set or unset bit, and so forth. @@ -444,7 +444,7 @@ suggest you to read the following pages: * It is possible to build safer queues or rotating queues using `RPOPLPUSH`. * There is also a blocking variant of the command, called `BRPOPLPUSH`. -Automatically creation and removal of keys +Automatic creation and removal of keys --- So far in our examples we never had to create empty lists before pushing @@ -459,7 +459,7 @@ Hashes. Basically we can summarize the behavior with three rules: -1. When we add an element to an aggregate data type, if the target key does not exist, an empty aggregate data type is crated before adding the element. +1. When we add an element to an aggregate data type, if the target key does not exist, an empty aggregate data type is created before adding the element. 2. When we remove elements from an aggregate data type, if the value remains empty, the key is automatically destroyed. 3. Calling a read-only command such as `LLEN` (which returns the length of the list), or a write command removing elements, with an empty key, always produces the same result as if the key is holding an empty aggregate type of the type the command expects to find. @@ -918,7 +918,7 @@ One of the biggest advantages of bitmaps is that they are sometimes an extremely space saving way to store informations. For example in a system where different users are represented by incremental user IDs, it is possible to remember a single bit information (for example if they want to receive -or no the newsletter) of 4 million of users using just 512 MB of memory. +or no the newsletter) of 4 billion of users using just 512 MB of memory. Bits are set and retrieved using the `SETBIT` and `GETBIT` commands: @@ -1002,7 +1002,7 @@ to un-serialize it back to the server. Conceptually the HLL API is like using Sets to do the same task. You would `SADD` every observed element into a set, and would use `SCARD` to check the -number of elements inside the set, which are unique since `SCARD` will not +number of elements inside the set, which are unique since `SADD` will not re-add an already added element. While you don't really *add items* into an HLL, because the data structure @@ -1012,6 +1012,7 @@ same: * Every time you see a new element, you add it to the count with `PFADD`. * Every time you want to retrieve the current approximation of the unique elements *added* with `PFADD` so far, you use the `PFCOUNT`. + > pfadd hll a b c d (integer) 1 > pfcount hll From 644f775cd894072c161a3378e97fb2eaa9078899 Mon Sep 17 00:00:00 2001 From: Muneyuki Noguchi Date: Wed, 13 Aug 2014 13:16:29 +0900 Subject: [PATCH 0040/2314] Fix typo in config.md. --- topics/config.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/config.md b/topics/config.md index 525e153f02..91776614b1 100644 --- a/topics/config.md +++ b/topics/config.md @@ -34,7 +34,7 @@ Passing arguments via the command line Since Redis 2.6 it is possible to also pass Redis configuration parameters using the command line directly. This is very useful for testing purposes. -The following is an example that stats a new Redis instance using port 6380 +The following is an example that starts a new Redis instance using port 6380 as a slave of the instance running at 127.0.0.1 port 6379. ./redis-server --port 6380 --slaveof 127.0.0.1 6379 From 6849b1aa4dfe27cfa40bc2e03a7dc7c663085df9 Mon Sep 17 00:00:00 2001 From: jialechan Date: Mon, 18 Aug 2014 10:57:49 +0800 Subject: [PATCH 0041/2314] Improved examples code as example BITPOS mykey 1 0 BITPOS mykey 1 1 has same result (integer) 8, it's maybe better to change "BITPOS mykey 1 1" to "BITPOS mykey 1 2" will have clearly explain what the START param use for. --- commands/bitpos.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands/bitpos.md b/commands/bitpos.md index c7cf1c1344..be680292af 100644 --- a/commands/bitpos.md +++ b/commands/bitpos.md @@ -38,7 +38,7 @@ SET mykey "\xff\xf0\x00" BITPOS mykey 0 SET mykey "\x00\xff\xf0" BITPOS mykey 1 0 -BITPOS mykey 1 1 +BITPOS mykey 1 2 set mykey "\x00\x00\x00" BITPOS mykey 1 ``` From 7c60b5aa35fbd422098873424b7e671163ab23dc Mon Sep 17 00:00:00 2001 From: Po-Ying Chen Date: Mon, 25 Aug 2014 14:59:47 +0800 Subject: [PATCH 0042/2314] Update tool.json to include redis-mount --- tools.json | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/tools.json b/tools.json index f8b5e88d00..97810e602d 100644 --- a/tools.json +++ b/tools.json @@ -304,5 +304,13 @@ "repository": "https://pypi.python.org/pypi/nydus", "description": "Connection clustering and routing for Redis and Python.", "authors": ["zeeg"] + }, + { + "name": "redis-mount", + "language": "Go", + "url": "https://github.com/poying/redis-mount", + "repository": "https://github.com/poying/redis-mount", + "description": "redis-mount lets you use Redis as a filesystem.", + "authors": ["poying"] } ] From 1bb527638934b81da8f696b7f569c6a4b7204ba8 Mon Sep 17 00:00:00 2001 From: Carlos Abalde Date: Sat, 30 Aug 2014 11:35:01 +0200 Subject: [PATCH 0043/2314] Added libvmod-redis client (VCL) --- clients.json | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/clients.json b/clients.json index 48243f11f7..347a02a6db 100644 --- a/clients.json +++ b/clients.json @@ -806,5 +806,14 @@ "description": "Redis client for Nimrod", "authors": [], "active": true + }, + + { + "name": "libvmod-redis", + "language": "VCL", + "repository": "https://github.com/carlosabalde/libvmod-redis", + "description": "Varnish Cache module using the synchronous hiredis library API to access Redis servers from VCL.", + "authors": ["carlosabalde"], + "active": true } ] From 4515a7d12af0a2ad1248ec2d675cebe7507920c9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Vin=C3=ADcius=20do=20Carmo?= Date: Tue, 2 Sep 2014 15:20:03 -0300 Subject: [PATCH 0044/2314] Fixes #415 --- topics/data-types-intro.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/data-types-intro.md b/topics/data-types-intro.md index a5e66788f9..847e467888 100644 --- a/topics/data-types-intro.md +++ b/topics/data-types-intro.md @@ -141,7 +141,7 @@ the `MSET` and `MGET` commands: 2) "20" 3) "30" -When `MSET` is used, Redis returns an array of values. +When `MGET` is used, Redis returns an array of values. Altering and querying the key space --- From b7f082dd1b392334dfb793cd1247bd0b78ac1f5c Mon Sep 17 00:00:00 2001 From: "David Humphrey (:humph) david.humphrey@senecacollege.ca" Date: Fri, 5 Sep 2014 15:26:37 -0400 Subject: [PATCH 0045/2314] Fixes to topics/data-types-intro.md for typos, grammar and readability. --- topics/data-types-intro.md | 287 ++++++++++++++++++------------------- 1 file changed, 143 insertions(+), 144 deletions(-) diff --git a/topics/data-types-intro.md b/topics/data-types-intro.md index a5e66788f9..82bc5066f9 100644 --- a/topics/data-types-intro.md +++ b/topics/data-types-intro.md @@ -12,13 +12,13 @@ by Redis, which will be covered separately in this tutorial: * Sets: collections of unique, unsorted string elements. * Sorted sets, similar to Sets but where every string element is associated to a floating number value, called *score*. The elements are always taken sorted - by their score, so unlike Sets it is possible to retrieve range of elements - (for example you may aks: give me the top 10, or the bottom 10). + by their score, so unlike Sets it is possible to retrieve a range of elements + (for example you may ask: give me the top 10, or the bottom 10). * Hashes, which are maps composed of fields associated with values. Both the - field and the value are strings. This are very similary to Ruby or Python + field and the value are strings. This is very similar to Ruby or Python hashes. -* Bit arrays (or simply bitmaps): it is possible, usign special commands, to - handle String values like array of bits: you can set and clear individual +* Bit arrays (or simply bitmaps): it is possible, using special commands, to + handle String values like an array of bits: you can set and clear individual bits, count all the bits set to 1, find the first set or unset bit, and so forth. * HyperLogLogs: this is a probabilistic data structure which is used in order @@ -27,7 +27,7 @@ by Redis, which will be covered separately in this tutorial: It's not always trivial to grasp how these data types work and what to use in order to solve a given problem from the [command reference](/commands), so this -document is a crash course to Redis data types and their most used patterns. +document is a crash course to Redis data types and their most common patterns. For all the examples we'll use the `redis-cli` utility, that's a simple but handy command line utility to issue commands against the Redis server. @@ -41,28 +41,27 @@ The empty string is also a valid key. A few other rules about keys: -* Too long keys are not a good idea, for instance a key of 1024 bytes is not a - good idea not only memory-wise, but also because the lookup of the key in the +* Very long keys are not a good idea, for instance a key of 1024 bytes is a bad + idea not only memory-wise, but also because the lookup of the key in the dataset may require several costly key-comparisons. Even when the task at hand - is to match the existence of a big value, to resort to hashing it (for example + is to match the existence of a large value, to resort to hashing it (for example with SHA1) is a better idea, especially from the point of view of memory and bandwidth. -* Too short keys are often not a good idea. There is little point in writing - "u1000flw" as key if you can write instead "user:1000:followers", the latter - is more readable and the added space is little compared to the space used by - the key object itself and the value object. However it is not possible to deny - that short keys will consume a bit less memory. Your job is to find the - right balance. -* Try to stick with a schema. For instance "object-type:id" can be a nice - idea, like in "user:1000". Dots or dashes are often used for multi-words - fields, like in "comment:1234:reply.to" or "comment:1234:reply-to". +* Very short keys are often not a good idea. There is little point in writing + "u1000flw" as a key if you can instead write "user:1000:followers". The latter + is more readable and the added space is minor compared to the space used by + the key object itself and the value object. While short keys will obviously + consume a bit less memory, your job is to find the right balance. +* Try to stick with a schema. For instance "object-type:id" is a good + idea, as in "user:1000". Dots or dashes are often used for multi-word + fields, as in "comment:1234:reply.to" or "comment:1234:reply-to". * The maximum allowed key size is 512 MB. Redis Strings --- -The Redis String type is the simplest type of value you can associate to +The Redis String type is the simplest type of value you can associate with a Redis key. It is the only data type in Memcached, so it is also very natural for newcomers to use it in Redis. @@ -80,15 +79,15 @@ will be performed via `redis-cli` in this tutorial). As you can see using the `SET` and the `GET` commands are the way we set and retrieve a string value. Note that `SET` will replace any existing value -stored already into the key, in case the key already exists, even if the key -is associated with a non-string value. So `SET` performs an assignment. +already stored into the key, in the case that the key already exists, even if +the key is associated with a non-string value. So `SET` performs an assignment. Values can be strings (including binary data) of every kind, for instance you can store a jpeg image inside a key. A value can't be bigger than 512 MB. The `SET` command has interesting options, that are provided as additional -arguments. For example if I may ask `SET` to fail if the key already exists, -or the exact contrary, that is, to only succeed if the key already exists: +arguments. For example, I may ask `SET` to fail if the key already exists, +or the opposite, that it only succeed if the key already exists: > set mykey newval nx (nil) @@ -115,23 +114,23 @@ always the same command, acting in a slightly different way. What does it mean that INCR is atomic? That even multiple clients issuing INCR against -the same key will never incur into a race condition. For instance it can never -happen that client 1 read "10", client 2 read "10" at the same time, both -increment to 11, and set the new value of 11. The final value will always be +the same key will never enter into a race condition. For instance, it will never +happen that client 1 reads "10", client 2 reads "10" at the same time, both +increment to 11, and set the new value to 11. The final value will always be 12 and the read-increment-set operation is performed while all the other clients are not executing a command at the same time. -There are a number of commands operating on strings. For example -the `GETSET` command sets a key to a new value, returning the old value as +There are a number of commands for operating on strings. For example +the `GETSET` command sets a key to a new value, returning the old value as the result. You can use this command, for example, if you have a system that increments a Redis key using `INCR` -every time your web site receives a new visit. You want to collect this -information one time every hour, without losing a single increment. +every time your web site receives a new visitor. You may want to collect this +information once every hour, without losing a single increment. You can `GETSET` the key, assigning it the new value of "0" and reading the old value back. The ability to set or retrieve the value of multiple keys in a single -command is also useful for reduce latency. For this reason there are +command is also useful for reduced latency. For this reason there are the `MSET` and `MGET` commands: > mset a 10 b 20 c 30 @@ -163,12 +162,12 @@ and associated value, whatever the value is. > exists mykey (integer) 0 -From the examples you can also see how `DEL` itself returns 1 or 0 whatever +From the examples you can also see how `DEL` itself returns 1 or 0 depending on whether the key was removed (it existed) or not (there was no such key with that name). There are many key space related commands, but the above two are the -essential ones together with the `TYPE` command, that returns the kind +essential ones together with the `TYPE` command, which returns the kind of value stored at the specified key: > set mykey x @@ -183,11 +182,11 @@ of value stored at the specified key: Redis expires: keys with limited time to live --- -Before to continue with more complex data structures, we need to expose +Before continuing with more complex data structures, we need to discuss another feature which works regardless of the value type, and is -called **Redis expires**. Basically you can set a timeout to a key, which -is, a limited time to live. When the time to live elapsed, the key is -automatically destroyed, exactly like if the user called the `DEL` command +called **Redis expires**. Basically you can set a timeout for a key, which +is a limited time to live. When the time to live elapses, the key is +automatically destroyed, exactly as if the user called the `DEL` command with the key. A few quick info about Redis expires: @@ -234,7 +233,7 @@ Redis Lists To explain the List data type it's better to start with a little bit of theory, as the term *List* is often used in an improper way by information technology folks. For instance "Python Lists" are not what the name may suggest (Linked -Lists), they are actually Arrays (the same data type is called Array in +Lists), but rather Arrays (the same data type is called Array in Ruby actually). From a very general point of view a List is just a sequence of ordered @@ -244,10 +243,10 @@ an Array are very different from the properties of a List implemented using a Redis lists are implemented via Linked Lists. This means that even if you have millions of elements inside a list, the operation of adding a new element in -the head or in the tail of the list is performed *in constant time*. Adding a -new element with the `LPUSH` command to the head of a ten -elements list is the same speed as adding an element to the head of a 10 -million elements list. +the head or in the tail of the list is performed *in constant time*. The speed of adding a +new element with the `LPUSH` command to the head of a list with ten +elements is the same as adding an element to the head of list with 10 +million elements. What's the downside? Accessing an element *by index* is very fast in lists implemented with an Array (constant time indexed access) and not so fast in @@ -256,18 +255,19 @@ work proportional to the index of the accessed element). Redis Lists are implemented with linked lists because for a database system it is crucial to be able to add elements to a very long list in a very fast way. -Another strong advantage is, as you'll see in a moment, that Redis Lists can be +Another strong advantage, as you'll see in a moment, is that Redis Lists can be taken at constant length in constant time. When fast access to the middle of a large collection of elements is important, -there is a different data structure that can be used, called sorted sets. Sorted sets will be covered later in this tutorial. +there is a different data structure that can be used, called sorted sets. +Sorted sets will be covered later in this tutorial. First steps with Redis Lists --- The `LPUSH` command adds a new element into a list, on the left (at the head), while the `RPUSH` command adds a new -element into a list, on the right (at the tail). Finally the +element into a list ,on the right (at the tail). Finally the `LRANGE` command extracts ranges of elements from lists: > rpush mylist A @@ -282,8 +282,8 @@ element into a list, on the right (at the tail). Finally the 3) "B" Note that [LRANGE](/commands/lrange) takes two indexes, the first and the last -element of the range to return. Both the indexes can be negative to tell Redis -to start to count from the end, so -1 is the last element, -2 is the +element of the range to return. Both the indexes can be negative, telling Redis +to start counting from the end: so -1 is the last element, -2 is the penultimate element of the list, and so forth. As you can see `RPUSH` appended the elements on the right of the list, while @@ -306,7 +306,7 @@ multiple elements into a list in a single call: 9) "foo bar" An important operation defined on Redis lists is the ability to *pop elements*. -Popping elements is the operation of retrieving the element from the list, +Popping elements is the operation of both retrieving the element from the list, and eliminating it from the list, at the same time. You can pop elements from left and right, similarly to how you can push elements in both sides of the list: @@ -337,17 +337,19 @@ Lists are useful for a number of tasks, two very representative use cases are the following: * Remember the latest updates posted by users into a social network. -* Communication between processes, using a consumer-producer pattern where the producer pushes items into a list, and a consumer (usually a *worker*) consumes those items executing actions. Redis has special list commands to make this use case both more reliable and efficient. +* Communication between processes, using a consumer-producer pattern where the producer pushes items into a list, and a consumer (usually a *worker*) consumes those items and executed actions. Redis has special list commands to make this use case both more reliable and efficient. -For example both the popular Ruby libraries [resque](https://github.com/resque/resque) and [sidekiq](https://github.com/mperham/sidekiq) use Redis lists under the hoods in order to implement background jobs. +For example both the popular Ruby libraries [resque](https://github.com/resque/resque) and +[sidekiq](https://github.com/mperham/sidekiq) use Redis lists under the hoods in order to +implement background jobs. -The popular Twitter social network [takes the latest tweets](http://www.infoq.com/presentations/Real-Time-Delivery-Twitter) posted by users into Redis lists. +The popular Twitter social network [takes the latest tweets](http://www.infoq.com/presentations/Real-Time-Delivery-Twitter) +posted by users into Redis lists. -To describe a common use case step by step, imagine you want to speedup the -list of the latest published photos in the home page of your photo sharing -social network. +To describe a common use case step by step, imagine your home page shows the latest +photos published in a photo sharing social network and you want to speedup access. -* Every time an user posts a new photo, we add its ID into a list with `LPUSH`. +* Every time a user posts a new photo, we add its ID into a list with `LPUSH`. * When users visit the home page, we use `LRANGE 0 9` in order to get the latest 10 posted items. Capped lists @@ -356,7 +358,7 @@ Capped lists In many use cases we just want to use lists to store the *latest items*, whatever they are: social network updates, logs, or anything else. -Redis allows to use lists as a capped collection, only remembering the latest +Redis allows us to use lists as a capped collection, only remembering the latest N items and discarding all the oldest items using the `LTRIM` command. The `LTRIM` command is similar to `LRANGE`, but **instead of displaying the @@ -375,20 +377,19 @@ An example will make it more clear: 3) "3" The above `LTRIM` command tells Redis to take just list elements from index -0 to 2, everything else will be discarded. This makes you able to mount -a very simple but useful patter, consisting in doing a List push operation -+ a List trim operation together in order to add a new element and discard -exceeding elements: +0 to 2, everything else will be discarded. This allows for a very simple but +useful pattern: doing a List push operation + a List trim operation together +in order to add a new element and discard elements exceeding a limit: LPUSH mylist LTRIM mylist 0 999 -The above combination of elements add a new element taking only the 1000 +The above combination adds a new element and takes only the 1000 newest elements into the list. With `LRANGE` you can access the top items without any need to remember very old data. Note: while `LRANGE` is technically an O(N) command, accessing small ranges -towards the head or the tail of the list, is a constant time operation. +towards the head or the tail of the list is a constant time operation. Blocking operations on lists --- @@ -398,27 +399,27 @@ and in general as a building block for inter process communication systems: blocking operations. Imagine you want to push items into a list with one process, and use -a different process in order to actually make some kind of work with those -items. This is the usual producer / consumer setup, that can be implemented +a different process in order to actually do some kind of work with those +items. This is the usual producer / consumer setup, and can be implemented in the following simple way: * To push items into the list, producers call `LPUSH`. * To extract / process items from the list, consumers call `RPOP`. However it is possible that sometimes the list is empty and there is nothing -to process, so `RPOP` just returns NULL. So a consumer is forced to wait +to process, so `RPOP` just returns NULL. In this case a consumer is forced to wait some time and retry again with `RPOP`. This is called *polling*, and is not a good idea in this context because it has several drawbacks: 1. Forces Redis and clients to process useless commands (all the requests when the list is empty will get no actual work done, they'll just return NULL). -2. Adds a delay to the processing of items, since after a worker receives a NULL, it waits some time. To make the delay smaller, we could wait less between calls to `RPOP`, with the effect of aggravating more problem number 1: more useless calls to Redis. +2. Adds a delay to the processing of items, since after a worker receives a NULL, it waits some time. To make the delay smaller, we could wait less between calls to `RPOP`, with the effect of amplifying problem number 1, i.e. more useless calls to Redis. So Redis implements commands called `BRPOP` and `BLPOP` which are versions of `RPOP` and `LPOP` able to block if the list is empty: they'll return to -the caller only when a new element is added to the list, or when an user -specified timeout is reached. +the caller only when a new element is added to the list, or when a user-specified +timeout is reached. -This is an example of `BRPOP` call we could use in the worker: +This is an example of a `BRPOP` call we could use in the worker: > brpop tasks 5 1) "tasks" @@ -427,19 +428,19 @@ This is an example of `BRPOP` call we could use in the worker: It means: "wait for elements in the list `tasks`, but return if after 5 seconds no element is available". -Note that you can use 0 as timeout to wait forever for elements, and you can -also specify multiple lists and not just one, in order to wait into multiple +Note that you can use 0 as timeout to wait for elements forever, and you can +also specify multiple lists and not just one, in order to wait on multiple lists at the same time, and get notified when the first list receives an element. -A few things to note about `BRPOP`. +A few things to note about `BRPOP`: -1. Clients are served in an ordered way: the first client that blocked waiting for a list, is served the first when an element is pushed by some other client, and so forth. -2. The return value is different compared to `RPOP`: it is a two elements array since it also includes the name of the key, because `BRPOP` and `BLPOP` are able to block waiting for elements from multiple lists. +1. Clients are served in an ordered way: the first client that blocked waiting for a list, is served first when an element is pushed by some other client, and so forth. +2. The return value is different compared to `RPOP`: it is a two-element array since it also includes the name of the key, because `BRPOP` and `BLPOP` are able to block waiting for elements from multiple lists. 3. If the timeout is reached, NULL is returned. There are more things you should know about lists and blocking ops. We -suggest you to read the following pages: +suggest that you read more on the following: * It is possible to build safer queues or rotating queues using `RPOPLPUSH`. * There is also a blocking variant of the command, called `BRPOPLPUSH`. @@ -449,17 +450,16 @@ Automatically creation and removal of keys So far in our examples we never had to create empty lists before pushing elements, or removing empty lists when they no longer have elements inside. -It is Redis care to delete keys when lists are left empty, or to create +It is Redis' responsibility to delete keys when lists are left empty, or to create an empty list if the key does not exist and we are trying to add elements to it, for example, with `LPUSH`. This is not specific to lists, it applies to all the Redis data types -composed of multiple elements, so also applies to Sets, Sorted Sets and -Hashes. +composed of multiple elements--Sets, Sorted Sets and Hashes. Basically we can summarize the behavior with three rules: -1. When we add an element to an aggregate data type, if the target key does not exist, an empty aggregate data type is crated before adding the element. +1. When we add an element to an aggregate data type, if the target key does not exist, an empty aggregate data type is created before adding the element. 2. When we remove elements from an aggregate data type, if the value remains empty, the key is automatically destroyed. 3. Calling a read-only command such as `LLEN` (which returns the length of the list), or a write command removing elements, with an empty key, always produces the same result as if the key is holding an empty aggregate type of the type the command expects to find. @@ -494,7 +494,7 @@ Example of rule 2: > exists mylist (integer) 0 -The key does no longer exist after all the elements are popped. +The key no longer exists after all the elements are popped. Example of rule 3: @@ -509,7 +509,7 @@ Example of rule 3: Redis Hashes --- -Redis hashes look like exactly how you expect an "hash" to look like: +Redis hashes look exactly how one might expect a "hash" to look, with field-value pairs: > hmset user:1000 username antirez birthyear 1977 verified 1 OK @@ -525,10 +525,9 @@ Redis hashes look like exactly how you expect an "hash" to look like: 5) "verified" 6) "1" -It is just a set of fields-values pairs. While hashes are handy to represent -*objects*, actually the number of fields you can put inside an hash has -no practical limits if not available memory, so you can use hashes in many -different ways inside your application. +While hashes are handy to represent *objects*, actually the number of fields you can +put inside a hash has no practical limits (other than available memory), so you can use +hashes in many different ways inside your application. The command `HMSET` sets multiple fields of the hash, while `HGET` retrieves a single field. `HMGET` is similar to `HGET` but returns an array of values: @@ -548,7 +547,7 @@ as well, like `HINCRBY`: You can find the [full list of hash commands in the documentation](http://redis.io/commands#hash). -It is worth to note that small hashes (a few elements, not too big values) are +It is worth noting that small hashes (i.e., a few elements with small values) are encoded in special way in memory that make them very memory efficient. @@ -559,7 +558,7 @@ Redis Sets are unordered collections of strings. The `SADD` command adds new elements to a set. It's also possible to do a number of other operations against sets like testing if a given element already exists, performing the intersection, union or difference between -multiple sets and so forth. +multiple sets, and so forth. > sadd myset 1 2 3 (integer) 3 @@ -568,12 +567,12 @@ multiple sets and so forth. 2. 1 3. 2 -I added three elements to my set and told Redis to return back all the -elements. As you can see they are not sorted, Redis is free to return the +Here I've added three elements to my set and told Redis to return all the +elements. As you can see they are not sorted--Redis is free to return the elements in any order at every call, since there is no contract with the -user about elements ordering. +user about element ordering. -We have commands to test for membership. Does a given element exists? +Redis has commands to test for membership. Does a given element exist? > sismember myset 3 (integer) 1 @@ -589,8 +588,8 @@ A simple way to model this problem is to have a set for every object we want to tag. The set contains the IDs of the tags associated with the object. Imagine we want to tag news. -If our news ID 1000 is tagged with tag 1,2,5 and 77, we can have one set -associating our tag IDs with the news: +If our news ID 1000 is tagged with tags 1, 2, 5 and 77, we can have one set +associating our tag IDs with the news item: > sadd news:1000:tags 1 2 5 77 (integer) 4 @@ -621,8 +620,8 @@ a Redis hash, which maps tag IDs to tag names. There are other non trivial operations that are still easy to implement using the right Redis commands. For instance we may want a list of all the objects with the tags 1, 2, 10, and 27 together. We can do this using -the `SINTER` command, that performs the intersection between different -sets. We can just use: +the `SINTER` command, which performs the intersection between different +sets. We can use: > sinter tag:1:news tag:2:news tag:10:news tag:27:news ... results here ... @@ -632,8 +631,8 @@ unions, difference, extract a random element, and so forth. The command to extract an element is called `SPOP`, and is handy to model certain problems. For example in order to implement a web-based poker game, -you may want to represent your deck into a set. Imagine we use a one-char -prefix for (C)lubs, (D)iamonds, (H)earts, (S)pades. +you may want to represent your deck with a set. Imagine we use a one-char +prefix for (C)lubs, (D)iamonds, (H)earts, (S)pades: > sadd deck C1 C2 C3 C4 C5 C6 C7 C8 C9 C10 CJ CQ CK D1 D2 D3 D4 D5 D6 D7 D8 D9 D10 DJ DQ DK H1 H2 H3 @@ -647,18 +646,18 @@ perfect operation in this case. However if we call it against our deck directly, in the next play of the game we'll need to populate the deck of cards again, which may not be -ideal. So to start, we can make a copy of the set stored in the `deck` key, +ideal. So to start, we can make a copy of the set stored in the `deck` key into the `game:1:deck` key. This is accomplished using `SUNIONSTORE`, which normally performs the intersection between multiple sets, and stores the result into anther set. -However the intersection of a single set, is itself, so I can copy my deck +However, since the intersection of a single set is itself, I can copy my deck with: > sunionstore game:1:deck deck (integer) 52 -Now I'm ready to provide the first player with its five cards: +Now I'm ready to provide the first player with five cards: > spop game:1:deck "C6" @@ -673,8 +672,8 @@ Now I'm ready to provide the first player with its five cards: One pair of jacks, not great... -It is a good time to introduce the set command that provides the number -of elements inside a set. This is often called *cardinality of a set* +Now it's a good time to introduce the set command that provides the number +of elements inside a set. This is often called the *cardinality of a set* in the context of set theory, so the Redis command is called `SCARD`. > scard game:1:deck @@ -690,13 +689,13 @@ the ability to return both repeating and non-repeating elements. Redis Sorted sets --- -Sorted sets are a data type which is similar to a mix between asSet and -an hash. Like sets, sorted sets are composed of unique, non-repeating +Sorted sets are a data type which is similar to a mix between a Set and +a Hash. Like sets, sorted sets are composed of unique, non-repeating string elements, so in some sense a sorted set is a set as well. However while elements inside sets are not ordered, every element in a sorted set is associated with a floating point value, called *the score* -(this is why the type is also similar to an hash, since every element +(this is why the type is also similar to a hash, since every element is mapped to a value). Moreover, elements in a sorted sets are *taken in order* (so they are not @@ -729,8 +728,8 @@ sorted set elements, with their year of birth as "score". (integer) 1 -As you can see `ZADD` is similar to `SADD`, but takes one argument more -(placed before the element to add itself), which is the score. +As you can see `ZADD` is similar to `SADD`, but takes one additional argument +(placed before the element to be added) which is the score. `ZADD` is also variadic, so you are free to specify multiple score-value pairs, even if this is not used in the example above. @@ -755,7 +754,7 @@ all, it's already all sorted: 9) "Linus Torvalds" Note: 0 and -1 means from element index 0 to the last element (-1 works -like in the case of the `LRANGE` command). +here just as it does in the case of the `LRANGE` command). What if I want to order them the opposite way, youngest to oldest? Use [ZREVRANGE](/commands/zrevrange) instead of [ZRANGE](/commands/zrange): @@ -797,7 +796,7 @@ Operating on ranges --- Sorted sets are more powerful than this. They can operate on ranges. -Let's get all the individuals that were born up to the 1950 inclusive. We +Let's get all the individuals that were born up to 1950 inclusive. We use the `ZRANGEBYSCORE` command to do it: > zrangebyscore hackers -inf 1950 @@ -820,28 +819,28 @@ the hackers born between 1940 and 1960 from the sorted set: but it can be very useful, and returns the number of removed elements. Another extremely useful operation defined for sorted set elements -is the get-rank operation. It is basically possible to ask what is the -position of an element in the set of the order elements. +is the get-rank operation. It is possible to ask what is the +position of an element in the set of the ordered elements. > zrank hackers "Anita Borg" (integer) 4 -The `ZREVRANK` command is also available in order to get the rank considering +The `ZREVRANK` command is also available in order to get the rank, considering the elements sorted a descending way. Lexicographical scores --- -With recent versions of Redis 2.8, a new feature was introduced that allows, -assuming elements in a sorted set are all inserted with the same identical -score, to get ranges lexicographically (elements are compared with the C +With recent versions of Redis 2.8, a new feature was introduced that allows +getting ranges lexicographically, assuming elements in a sorted set are all +inserted with the same identical score (elements are compared with the C `memcmp` function, so it is guaranteed that there is no collation, and every Redis instance will reply with the same output). The main commands to operate with lexicographical ranges are `ZRANGEBYLEX`, `ZREVRANGEBYLEX`, `ZREMRANGEBYLEX` and `ZLEXCOUNT`. -For example, let's add again our list of famous hackers. But this time, +For example, let's add again our list of famous hackers, but this time use a score of zero for all the elements: > zadd hackers 0 "Alan Kay" 0 "Sophie Wilson" 0 "Richard Stallman" 0 @@ -873,14 +872,14 @@ Ranges can be inclusive or exclusive (depending on the first character), also string infinite and minus infinite are specified respectively with the `+` and `-` strings. See the documentation for more information. -This feature is important because allows to use sorted sets as a generic +This feature is important because it allows us to use sorted sets as a generic index. For example, if you want to index elements by a 128-bit unsigned integer argument, all you need to do is to add elements into a sorted -set with the same score (for example 0) but with an 8 bytes prefix +set with the same score (for example 0) but with an 8 byte prefix consisting of **the 128 bit number in big endian**. Since numbers in big endian, when ordered lexicographically (in raw bytes order) are actually ordered numerically as well, you can ask for ranges in the 128 bit space, -and get the elements value discarding the prefix. +and get the element's value discarding the prefix. If you want to see the feature in the context of a more serious demo, check the [Redis autocomplete demo](http://autocomplete.redis.io). @@ -889,16 +888,16 @@ Updating the score: leader boards --- Just a final note about sorted sets before switching to the next topic. -Sorted sets scores can be updated at any time. Just calling again `ZADD` against +Sorted sets' scores can be updated at any time. Just calling `ZADD` against an element already included in the sorted set will update its score -(and position) with O(log(N)) time complexity, so sorted sets are suitable +(and position) with O(log(N)) time complexity. As such, sorted sets are suitable when there are tons of updates. Because of this characteristic a common use case is leader boards. The typical application is a Facebook game where you combine the ability to take users sorted by their high score, plus the get-rank operation, in order -to show the top-N users, and the user rank in the leader board (you are -the #4932 best score here). +to show the top-N users, and the user rank in the leader board (e.g., "you are +the #4932 best score here"). Bitmaps @@ -911,14 +910,14 @@ bits. Bit operations are divided into two groups: constant-time single bit operations, like setting a bit to 1 or 0, or getting its value, and -operations in group of bits, for example counting the number of set -bits in a given range of bits (population counting). +operations on groups of bits, for example counting the number of set +bits in a given range of bits (e.g., population counting). -One of the biggest advantages of bitmaps is that they are sometimes an -extremely space saving way to store informations. For example in a system +One of the biggest advantages of bitmaps is that they often provide +extreme space savings when storing information. For example in a system where different users are represented by incremental user IDs, it is possible -to remember a single bit information (for example if they want to receive -or no the newsletter) of 4 million of users using just 512 MB of memory. +to remember a single bit information (for example, knowing whether +a user wants to receive a newsletter) of 4 million of users using just 512 MB of memory. Bits are set and retrieved using the `SETBIT` and `GETBIT` commands: @@ -929,7 +928,7 @@ Bits are set and retrieved using the `SETBIT` and `GETBIT` commands: > getbit key 11 (integer) 0 -The `SETBIT` command takes as first argument the bit number, and as second +The `SETBIT` command takes as its first argument the bit number, and as its second argument the value to set the bit to, which is 1 or 0. The command automatically enlarges the string if the addressed bit is outside the current string length. @@ -983,27 +982,27 @@ the Nth bit to address inside the key with `bit-number MOD M`. HyperLogLogs --- -An HyperLogLog is a probabilistic data structure used in order to count +A HyperLogLog is a probabilistic data structure used in order to count unique things (technically this is referred to estimating the cardinality -of a set). Usually counting unique items require to use an amount of memory +of a set). Usually counting unique items requires using an amount of memory proportional to the number of items you want to count, because you need -to remember the elements you already seen in the past, in order to avoid -to count them multiple times. However there is a set of algorithms that trade -memory for precision: you end with an estimated measure, with a standard error, -in the case of the Redis implementation, which is less than 1%, but the -magic of this algorithms is that you no longer need to use an amount of memory -proportional to the number of things counted, you just need to use a -constant amount of memory! 12k bytes in the worst case, or a lot less if you +to remember the elements you have already seen in the past in order to avoid +counting them multiple times. However there is a set of algorithms that trade +memory for precision: you end with an estimated measure with a standard error, +in the case of the Redis implementation, which is less than 1%. The +magic of this algorithm is that you no longer need to use an amount of memory +proportional to the number of items counted, and instead can use a +constant amount of memory! 12k bytes in the worst case, or a lot less if your HyperLogLog (We'll just call them HLL from now) has seen very few elements. HLLs in Redis, while technically a different data structure, is encoded -as a Redis string, so you can call `GET` to serialize an HLL, and `SET` -to un-serialize it back to the server. +as a Redis string, so you can call `GET` to serialize a HLL, and `SET` +to deserialize it back to the server. Conceptually the HLL API is like using Sets to do the same task. You would `SADD` every observed element into a set, and would use `SCARD` to check the number of elements inside the set, which are unique since `SCARD` will not -re-add an already added element. +re-add an existing element. While you don't really *add items* into an HLL, because the data structure only contains a state that does not include actual elements, the API is the @@ -1029,7 +1028,7 @@ Other notable features There are other important things in the Redis API that can't be explored in the context of this document, but are worth your attention: -* It is possible to [iterate the key space or a large collection incrementally](/commands/scan). +* It is possible to [iterate the key space of a large collection incrementally](/commands/scan). * It is possible to run [Lua scripts server side](/commands/eval) to win latency and bandwidth. * Redis is also a [Pub-Sub server](/topics/pubsub). @@ -1039,4 +1038,4 @@ Learn more This tutorial is in no way complete and has covered just the basics of the API. Read the [command reference](/commands) to discover a lot more. -Thanks for reading, and have a good hacking with Redis! +Thanks for reading, and have fun hacking with Redis! From 2fff2ae9a4c83b34989be5575618253dd51e96f1 Mon Sep 17 00:00:00 2001 From: "David Humphrey (:humph) david.humphrey@senecacollege.ca" Date: Fri, 5 Sep 2014 18:19:18 -0400 Subject: [PATCH 0046/2314] Review fixes --- topics/data-types-intro.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/topics/data-types-intro.md b/topics/data-types-intro.md index 82bc5066f9..5c02b403d5 100644 --- a/topics/data-types-intro.md +++ b/topics/data-types-intro.md @@ -340,7 +340,7 @@ are the following: * Communication between processes, using a consumer-producer pattern where the producer pushes items into a list, and a consumer (usually a *worker*) consumes those items and executed actions. Redis has special list commands to make this use case both more reliable and efficient. For example both the popular Ruby libraries [resque](https://github.com/resque/resque) and -[sidekiq](https://github.com/mperham/sidekiq) use Redis lists under the hoods in order to +[sidekiq](https://github.com/mperham/sidekiq) use Redis lists under the hood in order to implement background jobs. The popular Twitter social network [takes the latest tweets](http://www.infoq.com/presentations/Real-Time-Delivery-Twitter) @@ -455,7 +455,7 @@ an empty list if the key does not exist and we are trying to add elements to it, for example, with `LPUSH`. This is not specific to lists, it applies to all the Redis data types -composed of multiple elements--Sets, Sorted Sets and Hashes. +composed of multiple elements -- Sets, Sorted Sets and Hashes. Basically we can summarize the behavior with three rules: @@ -568,7 +568,7 @@ multiple sets, and so forth. 3. 2 Here I've added three elements to my set and told Redis to return all the -elements. As you can see they are not sorted--Redis is free to return the +elements. As you can see they are not sorted -- Redis is free to return the elements in any order at every call, since there is no contract with the user about element ordering. From 8bb64e5432b64d98900fe35077dd91b36c59dca1 Mon Sep 17 00:00:00 2001 From: ch1c0t Date: Sun, 7 Sep 2014 22:09:29 +0300 Subject: [PATCH 0047/2314] Fix a typo in bgsave.md --- commands/bgsave.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands/bgsave.md b/commands/bgsave.md index 04e62fa0ef..71101b12bd 100644 --- a/commands/bgsave.md +++ b/commands/bgsave.md @@ -2,7 +2,7 @@ Save the DB in background. The OK code is immediately returned. Redis forks, the parent continues to serve the clients, the child saves the DB on disk then exits. -A client my be able to check if the operation succeeded using the `LASTSAVE` +A client may be able to check if the operation succeeded using the `LASTSAVE` command. Please refer to the [persistence documentation][tp] for detailed information. From f3cb17533c58c098be35e7e53623cd26678b54bb Mon Sep 17 00:00:00 2001 From: Philipp Klose Date: Thu, 11 Sep 2014 12:52:28 +0200 Subject: [PATCH 0048/2314] Update redis.conf links Update redis.conf links: * Fix links for older versions * Add link for redis 2.8 --- topics/config.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/topics/config.md b/topics/config.md index 525e153f02..0ec8109892 100644 --- a/topics/config.md +++ b/topics/config.md @@ -26,8 +26,9 @@ The list of configuration directives, and their meaning and intended usage is available in the self documented example redis.conf shipped into the Redis distribution. -* The self documented [redis.conf for Redis 2.6](https://raw.github.com/antirez/redis/2.6/redis.conf). -* The self documented [redis.conf for Redis 2.4](https://raw.github.com/antirez/redis/2.4/redis.conf). +* The self documented [redis.conf for Redis 2.8](https://raw.githubusercontent.com/antirez/redis/2.8/redis.conf) +* The self documented [redis.conf for Redis 2.6](https://raw.githubusercontent.com/antirez/redis/2.6/redis.conf). +* The self documented [redis.conf for Redis 2.4](https://raw.githubusercontent.com/antirez/redis/2.4/redis.conf). Passing arguments via the command line --- From 86aa955af4daef02657567b156e0d6dc324340b7 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 12 Sep 2014 11:17:29 +0200 Subject: [PATCH 0049/2314] Sentinel doc: info about adding/removing Sentinels and slaves. --- topics/sentinel.md | 52 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) diff --git a/topics/sentinel.md b/topics/sentinel.md index cdda5d4b9c..976f9ae372 100644 --- a/topics/sentinel.md +++ b/topics/sentinel.md @@ -427,6 +427,58 @@ As already stated, `SENTINEL SET` can be used to set all the configuration param Note that there is no equivalent GET command since `SENTINEL MASTER` provides all the configuration parameters in a simple to parse format (as a field/value pairs array). +Adding or removing Sentinels +--- + +Adding a new Sentinel to your deployment is a simple process because of the +auto-discover mechanism implemented by Sentinel. All you need to do is to +start the new Sentinel configured to monitor the currently active master. +Within 10 seconds the Sentinel will acquire the list of other Sentinels and +the set of slaves attached to the master. + +If you need to add multiple Sentinels at once, it is suggested to add it +one after the other, waiting for all the other Sentinels to already know +about the first one before adding the next. This is useful in order to still +guarantee that majority can be achieved only in one side of a partition, +in the chance failures should happen in the process of adding new Sentinels. + +This can be easily achieved by adding every new Sentinel with a 30 seconds delay, +and during absence of network partitions. + +At the end of the process it is possible to use the command +`SENTINEL MASTER mastername` in order to check if all the Sentinels agree about +the total number of Sentinels monitoring the master. + +Removing a Sentinel is a bit more complex: Sentinels never forget already seen +Sentinels, even if they are not reachable for a long time, since we don't +want to dynamically change the majority needed to authorize a failover and +the creation of a new configuration number. So in order to remove a Sentinel +the following steps should be performed in absence of network partitions: + +1. Stop the Sentinel process of the Sentinel you want to remove. +2. Send a `SENTINEL RESET *` command to all the other Sentinel instances (instead of `*` you can use the exact master name if you want to reset just a single master). One after the other, waiting at least 30 seconds between instances. +3. Check that all the Sentinels agree about the number of Sentinels currently active, by inspecting the output of `SENTINEL MASTER mastername` of every Sentinel. + +Removing the old master or unreachable slaves. +--- + +Sentinels never forget about slaves of a given master, even when they are +unreachable for a long time. This is useful, because Sentinels should be able +to correctly reconfigure a returning slave after a network partition or a +failure event. + +Moreover, after a failover, the failed over master is virtually added as a +slave of the new master, this way it will be reconfigured to replicate with +the new master as soon as it will be available again. + +However sometimes you want to remove a slave (that may be the old master) +forever from the list of slaves monitored by Sentinels. + +In order to do this, you need to send a `SENTINEL RESET mastername` command +to all the Sentinels: they'll refresh the list of slaves within the next +10 seconds, only adding the ones listed as correctly replicating from the +current master `INFO` output. + Pub/Sub Messages --- From dede7947d8e6800a46910bed269b0eb26a5d9d20 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 12 Sep 2014 14:53:34 +0200 Subject: [PATCH 0050/2314] Make sure users understand there are two TCP ports to open. --- topics/cluster-tutorial.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/topics/cluster-tutorial.md b/topics/cluster-tutorial.md index 13d4c67171..c6443a367e 100644 --- a/topics/cluster-tutorial.md +++ b/topics/cluster-tutorial.md @@ -52,6 +52,13 @@ not able to communicate. The command port and cluster bus port offset is fixed and is always 10000. +Note that for a Redis Cluster to work properly you need, for each node: + +1. The normal client communication port (usually 6379) used to communicate with clients to be open to all the clients that need to reach the cluster, plus all the other cluster nodes (that use the client port for keys migrations). +2. The cluster bus port (the client port + 10000) must be reachable from all the other cluster nodes. + +If you don't open both TCP ports, your cluster will not work as expected. + Redis Cluster data sharding --- From 738391e55b7569904f21abbd2939e22606dbf5c3 Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 24 Sep 2014 15:42:46 +0200 Subject: [PATCH 0051/2314] Sentinel doc: info about Redis authentication. --- topics/sentinel.md | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/topics/sentinel.md b/topics/sentinel.md index 8822f59bff..af523b30ea 100644 --- a/topics/sentinel.md +++ b/topics/sentinel.md @@ -377,6 +377,34 @@ the failover more resistant to partitions: * Masters failed over are reconfigured as slaves when they return available. * Slaves partitioned away during a partition are reconfigured once reachable. +Sentinel and Redis authentication +--- + +When the master is configured to require a password from clients, +as a security measure, slaves need to also be aware of this password in +order to authenticate with the master and create the master-slave connection +used for the asynchronous replication protocol. + +This is achieved using the following configuration directives: + +* `requirepass` in the master, in order to set the authentication password, and to make sure the instance will not process requests for non authenticated clients. +* `masterauth` in the slaves in order for the slaves to authenticate with the master in order to correctly replicate data from it. + +When Sentinel is used, there is not a single master, since after a failover +slaves may play the role of masters, and old masters can be reconfigured in +order to act as slaves, so what you want to do is to set the above directives +in all your instances, both masters and slaves. + +This is also usually a logically sane setup since you don't want to protect +data only in the master, having the same data accessible in the slaves. + +However, in the uncommon case where you need a slave that is accessible +without authentication, you can still do it by setting up a slave priority +of zero (that will not allow the salve to be promoted to master), and +configuring only the `masterauth` directive for this slave, without +the `requirepass` directive, so that data will be readable by unauthenticated +clients. + Sentinel API === From da8376e9d38c52eec253b32719d62d44769a1227 Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 24 Sep 2014 16:09:19 +0200 Subject: [PATCH 0052/2314] Sentinel slave selection documented. --- topics/sentinel.md | 47 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 47 insertions(+) diff --git a/topics/sentinel.md b/topics/sentinel.md index af523b30ea..82873ed9b9 100644 --- a/topics/sentinel.md +++ b/topics/sentinel.md @@ -377,6 +377,53 @@ the failover more resistant to partitions: * Masters failed over are reconfigured as slaves when they return available. * Slaves partitioned away during a partition are reconfigured once reachable. +Slave selection and priority +--- + +When a Sentinel instance is ready to perform a failover, since the master +is in `ODOWN` state and the Sentinel received the authorization to failover +from the majority of the Sentinel instances known, a suitable slave needs +to be selected. + +The slave selection process evaluates the following informations about slaves: + +1. Disconnection time from the master. +2. Slave priority. +3. Replication offset processed. +4. Run ID. + +A slave that is found to be disconnected from the master for more than ten +times the configured masster timeout (down-after-milliseconds option), plus +the time the master is also not available from the point of view of the +Sentinel doing the failover, is considered to be not suitable for the failover +and is skipped. + +In more rigorous terms, a slave whose the `INFO` output suggests to be +disconnected form the master for more than: + + (down-after-milliseconds * 10) + milliseconds_since_master_is_in_SDOWN_state + +Is considered to be not reliable and is discareded at all. + +The slave selection only consider the slaves that passed the above test, +and sorts it based on the above criteria, in the following order. + +1. The slaves are sorted by `slave-priority` as confiugred in the `redis.conf` file of the Redis instance. A lower priority will be preferred. +2. If the priority is the same, the replication offset processed by the slave is checked, and the slave that received more data from the master is selected. +3. If multiple slaves have the same priority and processed the same data from the master, a further check is performed, selecting the slave with the lexicographically smaller run ID. Having a lower run ID is not a real advantage for a slave, but is useful in order to make the process of slave selection more determiistic, instead of resorting to select a random slave. + +Redis masters (that may be turned into slaves after a failover), and slaves, all +must be configured with a `slave-priority` if there are machines to be strongly +preferred. Otherwise all the instances can run with the default run ID (which +is the suggested setup, since it is far more interesting to select the slave +by replication offset). + +A Redis instance can be configured with a special `slave-priority` of zero +in order to be **never selected** by Sentinels as the new master. +However a slave configured in this way will still be reconfigured by +Sentinels in order to replicate with the new master after a failover, the +only difference is that it will never become a master itself. + Sentinel and Redis authentication --- From 0f4386806a44c67a74d7ca161892cb5daef25ea7 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 26 Sep 2014 10:06:54 +0200 Subject: [PATCH 0053/2314] Information about migrating to Redis Cluster. --- topics/cluster-tutorial.md | 48 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) diff --git a/topics/cluster-tutorial.md b/topics/cluster-tutorial.md index c6443a367e..31709db394 100644 --- a/topics/cluster-tutorial.md +++ b/topics/cluster-tutorial.md @@ -871,3 +871,51 @@ Upgrading masters is a bit more complex, and the suggested procedure is: Following this procedure you should upgrade one node after the other until all the nodes are upgraded. + +Migrating to Redis Cluster +--- + +Users willing to migrate to Redis Cluster may have just a single master, or +may already using a preexisting sharding setup, where keys +are split among N nodes, using some in-house algorithm or a sharding algorithm +implemented by their client library or Redis proxy. + +In both cases it is possible to migrate to Redis Cluster easily, however +what is the most important detail is if multiple-keys operations are used +by the application, and how. There are three different cases: + +1. Multiple keys operations, or transactions, or Lua scripts involving muliple keys, are not used. Keys are accessed independently (even if accessed via transactions or Lua scripts grouping multiple commands together). +2. Multiple keys operations, transactions, or Lua scripts involving multiple keys are used by only with keys having the same **hash tag**, which means that the keys used together all have a `{...}` sub-string that happens to be identical. For example the following multiple keys operation is defined in the context of the same hash tag: `SUNION {user:1000}.foo {user:1000}.bar`. +3. Multiple keys operations, transactions, or Lua scripts involving multiple keys are used with key names not having an explicit, or the same, hash tag. + +The third case is not handled by Redis Cluster: the application requires to +be modified in order to don't use multi keys operations or only use them in +the context of the same hash tag. + +Case 1 and 2 are covered, so we'll focus on those two cases, that are handled +in the same way, so no distinction will be made in the documentation. + +Assuming you have your preexisting data set split into N masters, where +N=1 if you have no preexisting sharding, the following steps are needed +in order to migrate your data set to Redis Cluster: + +1. Stop your clients. No automatic live-migration to Redis Cluster is currently possible. You may be able to do it orchestrating a live migration in the context of your application / enviroment. +2. Generate an append only file for all of your N masters using the BGREWRITEAOF command, and waiting for the AOF file to be completely generated. +3. Save your AOF files from aof-1 to aof-N somewhere. At this point you can stop your old instances if you wish (this is useful since in non-virtualized deployments you often need to reuse the same computers). +4. Create a Redis Cluster composed of N masters and zero slaves. You'll add slaves later. Make sure all your nodes are using the append only file for persistence. +5. Stop all the cluster nodes, substitute their append only file with your pre-eisitng append only files, aof-1 for the first node, aof-2 for the secod node, up to aof-N. +6. Restart your Redis Cluster nodes with the new AOF files. They'll complain that there are keys that should not be there according to their configuration. +7. Use `redis-trib fix` command in order to fix the cluster so that keys will be migrated according to the hash slots each node is authoritative or not. +8. Use `redis-trib check` at the end to make sure your cluster is ok. +9. Restart your clients modified to use a Redis Cluster aware client library.w + +There is an alternative way to import data from external instances to a Redis +Cluster, which is to use the `redis-trib import` command. + +The command moves all the keys of a running instance (deleting the keys from +the source instance) to the specified pre-existing Redis Cluster. However +note that if you use a Redis 2.8 instance as source instance the operation +may be slow since 2.8 does not implement migrate connection caching, so you +may want to restart your source instance with a Redis 3.x version before +to perform such operation. + From 9b9cfcc960319ce0683120002cab83ec2c2f6df0 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 26 Sep 2014 10:26:57 +0200 Subject: [PATCH 0054/2314] Small clarification in the cluster tutorial. --- topics/cluster-tutorial.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/cluster-tutorial.md b/topics/cluster-tutorial.md index 31709db394..7c9c0d7062 100644 --- a/topics/cluster-tutorial.md +++ b/topics/cluster-tutorial.md @@ -884,7 +884,7 @@ In both cases it is possible to migrate to Redis Cluster easily, however what is the most important detail is if multiple-keys operations are used by the application, and how. There are three different cases: -1. Multiple keys operations, or transactions, or Lua scripts involving muliple keys, are not used. Keys are accessed independently (even if accessed via transactions or Lua scripts grouping multiple commands together). +1. Multiple keys operations, or transactions, or Lua scripts involving muliple keys, are not used. Keys are accessed independently (even if accessed via transactions or Lua scripts grouping multiple commands, about the same key, together). 2. Multiple keys operations, transactions, or Lua scripts involving multiple keys are used by only with keys having the same **hash tag**, which means that the keys used together all have a `{...}` sub-string that happens to be identical. For example the following multiple keys operation is defined in the context of the same hash tag: `SUNION {user:1000}.foo {user:1000}.bar`. 3. Multiple keys operations, transactions, or Lua scripts involving multiple keys are used with key names not having an explicit, or the same, hash tag. From 6353cd115f0e94e6f4f734f19b005ab3e6839b3a Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 26 Sep 2014 10:27:25 +0200 Subject: [PATCH 0055/2314] typo --- topics/cluster-tutorial.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/cluster-tutorial.md b/topics/cluster-tutorial.md index 7c9c0d7062..142c903d43 100644 --- a/topics/cluster-tutorial.md +++ b/topics/cluster-tutorial.md @@ -885,7 +885,7 @@ what is the most important detail is if multiple-keys operations are used by the application, and how. There are three different cases: 1. Multiple keys operations, or transactions, or Lua scripts involving muliple keys, are not used. Keys are accessed independently (even if accessed via transactions or Lua scripts grouping multiple commands, about the same key, together). -2. Multiple keys operations, transactions, or Lua scripts involving multiple keys are used by only with keys having the same **hash tag**, which means that the keys used together all have a `{...}` sub-string that happens to be identical. For example the following multiple keys operation is defined in the context of the same hash tag: `SUNION {user:1000}.foo {user:1000}.bar`. +2. Multiple keys operations, transactions, or Lua scripts involving multiple keys are used but only with keys having the same **hash tag**, which means that the keys used together all have a `{...}` sub-string that happens to be identical. For example the following multiple keys operation is defined in the context of the same hash tag: `SUNION {user:1000}.foo {user:1000}.bar`. 3. Multiple keys operations, transactions, or Lua scripts involving multiple keys are used with key names not having an explicit, or the same, hash tag. The third case is not handled by Redis Cluster: the application requires to From aaa049d5dc7d879c0c7a94f117fdf0352e90dc50 Mon Sep 17 00:00:00 2001 From: Maximilian Date: Fri, 26 Sep 2014 17:31:38 -0500 Subject: [PATCH 0056/2314] Fixed typo in introduction --- topics/introduction.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/introduction.md b/topics/introduction.md index 5127d620f6..c09caa39aa 100644 --- a/topics/introduction.md +++ b/topics/introduction.md @@ -27,7 +27,7 @@ Other features include: * [Pub/Sub](/topics/pubsub) * [Lua scripting](/commands/eval) * [Keys with a limited time-to-live](/commands/expire) -* [LRU eviction fo keys](/topics/lru-cache) +* [LRU eviction of keys](/topics/lru-cache) * [Automatic failover](/topics/sentinel) You can use Redis from [most programming languages](/clients) out there. From 634d0e9eb9edf3cd266d9903ba1a325f6f640032 Mon Sep 17 00:00:00 2001 From: Michael Holroyd Date: Mon, 29 Sep 2014 11:34:42 -0400 Subject: [PATCH 0057/2314] fix minor typo --- topics/signals.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/signals.md b/topics/signals.md index 4306f71997..c7785c6086 100644 --- a/topics/signals.md +++ b/topics/signals.md @@ -41,7 +41,7 @@ The following follow signals are handled as a Redis crash: * SIGFPE * SIGILL -One one of these signals is trapped, Redis aborts any current operation and performs the following actions: +Once one of these signals is trapped, Redis aborts any current operation and performs the following actions: * A bug report is produced on the log file. This includes a stack trace, dump of registers, and information about the state of clients. * Since Redis 2.8 (currently a development version) a fast memory test is performed as a first check of the reliability of the crashing system. From fefb6d551c9f0e15394f8f12d4e8cc8bcb062c94 Mon Sep 17 00:00:00 2001 From: PowerKiKi Date: Fri, 3 Oct 2014 15:52:17 +0900 Subject: [PATCH 0058/2314] bullet list not rendered properly While rendered properly on github, it was not rendered properly on official site. Newline before a list seems to be mandatory --- topics/twitter-clone.md | 1 + 1 file changed, 1 insertion(+) diff --git a/topics/twitter-clone.md b/topics/twitter-clone.md index e20f96b03c..22c50cbab8 100644 --- a/topics/twitter-clone.md +++ b/topics/twitter-clone.md @@ -241,6 +241,7 @@ authentication secrets to user IDs. HSET auths fea5e81ac8ca77622bed1c2132a021f9 1000 In order to authenticate a user we'll do these simple steps ( see the `login.php` file in the Retwis source code): + * Get the username and password via the login form * Check if the `username` field actually exists in the `users` Hash. * If it exists we have the user id, (i.e. 1000) From a2f5471ead89b404bb15613dccfcade36d2f14db Mon Sep 17 00:00:00 2001 From: Erik Dubbelboer Date: Mon, 6 Oct 2014 14:13:34 +0000 Subject: [PATCH 0059/2314] Fix spelling mistake --- commands/pfcount.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands/pfcount.md b/commands/pfcount.md index 35ac60ade7..47634e2135 100644 --- a/commands/pfcount.md +++ b/commands/pfcount.md @@ -31,7 +31,7 @@ PFCOUNT hll some-other-hll Performances --- -When `PFCOUNT` is called with a single key, performances as excellent even if +When `PFCOUNT` is called with a single key, performances are excellent even if in theory constant times to process a dense HyperLogLog are high. This is possible because the `PFCOUNT` uses caching in order to remember the cardinality previously computed, that rarely changes because most `PFADD` operations will From df77f1dc6d878604cacc1078d2867a0f753a73af Mon Sep 17 00:00:00 2001 From: Florian Weingarten Date: Fri, 3 Oct 2014 23:47:35 +0000 Subject: [PATCH 0060/2314] Flushdb time complexity --- commands/flushall.md | 3 +++ commands/flushdb.md | 3 +++ 2 files changed, 6 insertions(+) diff --git a/commands/flushall.md b/commands/flushall.md index 976412d99a..0fa4219dba 100644 --- a/commands/flushall.md +++ b/commands/flushall.md @@ -2,6 +2,9 @@ Delete all the keys of all the existing databases, not just the currently selected one. This command never fails. +The time-complexity for this operation is O(N), N being the number of +keys in the database. + @return @simple-string-reply diff --git a/commands/flushdb.md b/commands/flushdb.md index a35dbf8f6c..f41d0e31f3 100644 --- a/commands/flushdb.md +++ b/commands/flushdb.md @@ -1,6 +1,9 @@ Delete all the keys of the currently selected DB. This command never fails. +The time-complexity for this operation is O(N), N being the number of +keys in the database. + @return @simple-string-reply From 7fa65d420e22e6ffc0d98c7c8a009dfa8032880e Mon Sep 17 00:00:00 2001 From: Michel Martens Date: Tue, 7 Oct 2014 06:15:44 +0000 Subject: [PATCH 0061/2314] Fix wording in replication.md --- topics/replication.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/replication.md b/topics/replication.md index 9e21be54a7..89f004b5d2 100644 --- a/topics/replication.md +++ b/topics/replication.md @@ -32,7 +32,7 @@ connections during this brief window. multiple slaves for read-only queries (for example, heavy `SORT` operations can be offloaded to slaves), or simply for data redundancy. -* It is possible to use replication to avoid the cost of writing the master +* It is possible to use replication to avoid the cost of having the master write the full dataset to disk: just configure your master redis.conf to avoid saving (just comment all the "save" directives), then connect a slave configured to save from time to time. From 41803303c7e95a4a9bb807df9b3c1a4b8caff796 Mon Sep 17 00:00:00 2001 From: Michel Martens Date: Tue, 7 Oct 2014 06:30:12 +0000 Subject: [PATCH 0062/2314] Replace "guys" with "people" --- topics/faq.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/faq.md b/topics/faq.md index 06be4024c3..4569db7258 100644 --- a/topics/faq.md +++ b/topics/faq.md @@ -145,6 +145,6 @@ It means REmote DIctionary Server. ## Why did you started the Redis project? -Originally Redis was started in order to scale [LLOOGG][lloogg]. But after I got the basic server working I liked the idea to share the work with other guys, and Redis was turned into an open source project. +Originally Redis was started in order to scale [LLOOGG][lloogg]. But after I got the basic server working I liked the idea to share the work with other people, and Redis was turned into an open source project. [lloogg]: http://lloogg.com From dc2e937616f19d76ba357b19000e1d14d2fe2d13 Mon Sep 17 00:00:00 2001 From: Itamar Haber Date: Tue, 27 May 2014 00:47:40 +0300 Subject: [PATCH 0063/2314] Update keys.md Added reference to 2.8's SCAN as an alternative to KEYS --- commands/keys.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands/keys.md b/commands/keys.md index e15ddbcca4..9263ea2724 100644 --- a/commands/keys.md +++ b/commands/keys.md @@ -12,7 +12,7 @@ This command is intended for debugging and special operations, such as changing your keyspace layout. Don't use `KEYS` in your regular application code. If you're looking for a way to find keys in a subset of your keyspace, consider -using [sets][tdts]. +using `SCAN` or [sets][tdts]. [tdts]: /topics/data-types#sets From c6b5112b0f01e31efa108f3b662294dcb68323be Mon Sep 17 00:00:00 2001 From: Simon Bertrang Date: Fri, 13 Jun 2014 20:53:41 +0200 Subject: [PATCH 0064/2314] add link to Perl implementation --- topics/distlock.md | 1 + 1 file changed, 1 insertion(+) diff --git a/topics/distlock.md b/topics/distlock.md index c2cffa773a..a3ee4e4c2d 100644 --- a/topics/distlock.md +++ b/topics/distlock.md @@ -27,6 +27,7 @@ already available, that can be used as a reference. * [Redlock-php](https://github.com/ronnylt/redlock-php) (PHP implementation). * [Redsync.go](https://github.com/hjr265/redsync.go) (Go implementation). * [Redisson](https://github.com/mrniko/redisson) (Java implementation). +* [Redis::DistLock](https://github.com/sbertrang/redis-distlock) (Perl implementation). Safety and Liveness guarantees --- From 50b2e0744a6d814d63dfbe7c0f2e33ce1079bc00 Mon Sep 17 00:00:00 2001 From: Michel Martens Date: Tue, 7 Oct 2014 19:42:13 +0000 Subject: [PATCH 0065/2314] Fix typo. --- topics/data-types-intro.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/data-types-intro.md b/topics/data-types-intro.md index a5e66788f9..fac8b14489 100644 --- a/topics/data-types-intro.md +++ b/topics/data-types-intro.md @@ -17,7 +17,7 @@ by Redis, which will be covered separately in this tutorial: * Hashes, which are maps composed of fields associated with values. Both the field and the value are strings. This are very similary to Ruby or Python hashes. -* Bit arrays (or simply bitmaps): it is possible, usign special commands, to +* Bit arrays (or simply bitmaps): it is possible, using special commands, to handle String values like array of bits: you can set and clear individual bits, count all the bits set to 1, find the first set or unset bit, and so forth. From a5f12ed50499094c24b804b307eefafbd3c68603 Mon Sep 17 00:00:00 2001 From: David Bieber Date: Thu, 26 Jun 2014 12:07:22 -0400 Subject: [PATCH 0066/2314] fix numerous typos --- topics/data-types-intro.md | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/topics/data-types-intro.md b/topics/data-types-intro.md index 12efaca34f..5e6befb9b9 100644 --- a/topics/data-types-intro.md +++ b/topics/data-types-intro.md @@ -13,9 +13,9 @@ by Redis, which will be covered separately in this tutorial: * Sorted sets, similar to Sets but where every string element is associated to a floating number value, called *score*. The elements are always taken sorted by their score, so unlike Sets it is possible to retrieve range of elements - (for example you may aks: give me the top 10, or the bottom 10). + (for example you may ask: give me the top 10, or the bottom 10). * Hashes, which are maps composed of fields associated with values. Both the - field and the value are strings. This are very similary to Ruby or Python + field and the value are strings. This are very similar to Ruby or Python hashes. * Bit arrays (or simply bitmaps): it is possible, using special commands, to handle String values like array of bits: you can set and clear individual @@ -151,7 +151,7 @@ in order to interact with the space of keys, and thus, can be used with keys of any type. For example the `EXISTS` command returns 1 or 0 to signal if a given key -exists or not in the data base, while the `DEL` command deletes a key +exists or not in the database, while the `DEL` command deletes a key and associated value, whatever the value is. > set mykey hello @@ -376,7 +376,7 @@ An example will make it more clear: The above `LTRIM` command tells Redis to take just list elements from index 0 to 2, everything else will be discarded. This makes you able to mount -a very simple but useful patter, consisting in doing a List push operation +a very simple but useful pattern, consisting of doing a List push operation + a List trim operation together in order to add a new element and discard exceeding elements: @@ -459,7 +459,7 @@ Hashes. Basically we can summarize the behavior with three rules: -1. When we add an element to an aggregate data type, if the target key does not exist, an empty aggregate data type is crated before adding the element. +1. When we add an element to an aggregate data type, if the target key does not exist, an empty aggregate data type is created before adding the element. 2. When we remove elements from an aggregate data type, if the value remains empty, the key is automatically destroyed. 3. Calling a read-only command such as `LLEN` (which returns the length of the list), or a write command removing elements, with an empty key, always produces the same result as if the key is holding an empty aggregate type of the type the command expects to find. @@ -651,7 +651,7 @@ ideal. So to start, we can make a copy of the set stored in the `deck` key, into the `game:1:deck` key. This is accomplished using `SUNIONSTORE`, which normally performs the -intersection between multiple sets, and stores the result into anther set. +intersection between multiple sets, and stores the result into another set. However the intersection of a single set, is itself, so I can copy my deck with: @@ -1039,4 +1039,4 @@ Learn more This tutorial is in no way complete and has covered just the basics of the API. Read the [command reference](/commands) to discover a lot more. -Thanks for reading, and have a good hacking with Redis! +Thanks for reading, and have a good time hacking with Redis! From 08b70fc5e00850820c7553870d7b80adb36c89e8 Mon Sep 17 00:00:00 2001 From: Artem Bezsmertnyi Date: Mon, 30 Jun 2014 21:17:11 +0300 Subject: [PATCH 0067/2314] Fix typos --- topics/persistence.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/persistence.md b/topics/persistence.md index 39f886e45d..dc626b4214 100644 --- a/topics/persistence.md +++ b/topics/persistence.md @@ -270,7 +270,7 @@ of money to spend we'll review the most interesting disaster recovery techniques that don't have too high costs. * Amazon S3 and other similar services are a good way for mounting your disaster recovery system. Simply transfer your daily or hourly RDB snapshot to S3 in an encrypted form. You can encrypt your data using `gpg -c` (in symmetric encryption mode). Make sure to store your password in many different safe places (for instance give a copy to the most important people of your organization). It is recommanded to use multiple storage services for improved data safety. -* Transfer your snapshots using SCP (part of SSH) to far servers. This is a fairly simple and safe route: get a small VPS in a place that is very far from you, install ssh there, and greate an ssh client key without passphrase, then make +* Transfer your snapshots using SCP (part of SSH) to far servers. This is a fairly simple and safe route: get a small VPS in a place that is very far from you, install ssh there, and generate an ssh client key without passphrase, then make add it in the authorized_keys file of your small VPS. You are ready to transfer backups in an automated fashion. Get at least two VPS in two different providers for best results. From 0263282bf8f3d5f9ef49a9b16a799bf1afe7d092 Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 8 Oct 2014 14:43:50 +0200 Subject: [PATCH 0068/2314] Encryption page added. --- topics/encryption.md | 13 +++++++++++++ 1 file changed, 13 insertions(+) create mode 100644 topics/encryption.md diff --git a/topics/encryption.md b/topics/encryption.md new file mode 100644 index 0000000000..5819f08c61 --- /dev/null +++ b/topics/encryption.md @@ -0,0 +1,13 @@ +Redis Encryption +=== + +The idea of adding SSL support to Redis was proposed many times, however +currently we believe that given the small percentage of users requiring +SSL support, and the fact that each scenario tends to be different, to use +a different "tunneling" strategy can be better. We may change the idea in the +future, but currently a good solution that may be suitable for many use cases +is to use the following project: + +* [Spiped](http://www.tarsnap.com/spiped.html) is a utility for creating symmetrically encrypted and authenticated pipes between socket addresses, so that one may connect to one address (e.g., a UNIX socket on localhost) and transparently have a connection established to another address (e.g., a UNIX socket on a different system). + +The software is written in a similar spirit to Redis itself, it is a self-contained 4000 lines of C code utility that does a single thing well. From 3a924941a562f089f103807ecc0e7696018f577b Mon Sep 17 00:00:00 2001 From: Mustafa Altun Date: Mon, 11 Aug 2014 23:35:53 +0300 Subject: [PATCH 0069/2314] Wording and typo/grammar fixes in data-types-intro page. --- topics/data-types-intro.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/topics/data-types-intro.md b/topics/data-types-intro.md index 4adc7736da..d695601d2c 100644 --- a/topics/data-types-intro.md +++ b/topics/data-types-intro.md @@ -915,7 +915,7 @@ operations in group of bits, for example counting the number of set bits in a given range of bits (population counting). One of the biggest advantages of bitmaps is that they are sometimes an -extremely space saving way to store informations. For example in a system +extremely space saving way to store information. For example in a system where different users are represented by incremental user IDs, it is possible to remember a single bit information (for example if they want to receive or no the newsletter) of 4 billion of users using just 512 MB of memory. @@ -958,7 +958,7 @@ is a trivial example of `BITCOUNT` call: Common user cases for bitmaps are: * Real time analytics of all kinds. -* Storing space efficient but high performance boolean informations associated with object IDs. +* Storing space efficient but high performance boolean information associated with object IDs. For example imagine you want to know the longest streak of daily visits of your web site users. You start counting days starting from zero, that is the From d9a6ac55a9f7ff3c9e2fa26e9b5034b4ce099492 Mon Sep 17 00:00:00 2001 From: Mustafa Altun Date: Tue, 12 Aug 2014 00:03:25 +0300 Subject: [PATCH 0070/2314] Typo and grammar fixes in "Distributed locks with Redis" page. --- topics/distlock.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/topics/distlock.md b/topics/distlock.md index a3ee4e4c2d..0156c0439e 100644 --- a/topics/distlock.md +++ b/topics/distlock.md @@ -6,7 +6,7 @@ different processes require to operate with shared resources in a mutually exclusive way. There are a number of libraries and blog posts describing how to implement -a DLM (Distributed Lock Manager) with Redis, but every library use a different +a DLM (Distributed Lock Manager) with Redis, but every library uses a different approach, and many use a simple approach with lower guarantees compared to what can be achieved with slightly more complex designs. @@ -87,7 +87,7 @@ A simpler solution is to use a combination of unix time with microseconds resolu The time we use as the key time to live, is called the “lock validity time”. It is both the auto release time, and the time the client has in order to perform the operation required before another client may be able to acquire the lock again, without technically violating the mutual exclusion guarantee, which is only limited to a given window of time from the moment the lock is acquired. -So now we have a good way to acquire and release the lock. The system, reasoning about a non-distrubited system which is composed of a single instance, always available, is safe. Let’s extend the concept to a distributed system where we don’t have such guarantees. +So now we have a good way to acquire and release the lock. The system, reasoning about a non-distributed system which is composed of a single instance, always available, is safe. Let’s extend the concept to a distributed system where we don’t have such guarantees. The Redlock algorithm --- @@ -107,7 +107,7 @@ Is the algorithm asynchronous? The algorithm relies on the assumption that while there is no synchronized clock across the processes, still the local time in every process flows approximately at the same rate, with an error which is small compared to the auto-release time of the lock. This assumption closely resembles a real-world computer: every computer has a local clock and we can usually rely on different computers to have a clock drift which is small. -At this point we need to better specifiy our mutual exclusion rule: it is guaranteed only as long as the client holding the lock will terminate its work within the lock validity time (as obtained in step 3), minus some time (just a few milliseconds in order to compensate for clock drift between processes). +At this point we need to better specify our mutual exclusion rule: it is guaranteed only as long as the client holding the lock will terminate its work within the lock validity time (as obtained in step 3), minus some time (just a few milliseconds in order to compensate for clock drift between processes). For more information about similar systems requiring a bound *clock drift*, this paper is an interesting reference: [Leases: an efficient fault-tolerant mechanism for distributed file cache consistency](http://dl.acm.org/citation.cfm?id=74870). @@ -134,7 +134,7 @@ During the time the majority of keys are set, another client will not be able to However we want to also make sure that multiple clients trying to acquire the lock at the same time can’t simultaneously succeed. -If a client locked the majority of instances using a time near, or greater, than the lock maximum validity time (the TTL we use for SET basically), it will consider the lock invalid and will unlock the instances, so we only need to consider the case where a client was able to lock the majority of instances in a time which is less than the validity time. In this case for the argument already expressed above, for `MIN_VALIDITY` no client should be able to re-acquire the lock. So multiple clients will be albe to lock N/2+1 instances at the same time (with “time" being the end of Step 2) only when the time to lock the majority was greater than the TTL time, making the lock invalid. +If a client locked the majority of instances using a time near, or greater, than the lock maximum validity time (the TTL we use for SET basically), it will consider the lock invalid and will unlock the instances, so we only need to consider the case where a client was able to lock the majority of instances in a time which is less than the validity time. In this case for the argument already expressed above, for `MIN_VALIDITY` no client should be able to re-acquire the lock. So multiple clients will be able to lock N/2+1 instances at the same time (with “time" being the end of Step 2) only when the time to lock the majority was greater than the TTL time, making the lock invalid. Are you able to provide a formal proof of safety, point out to existing algorithms that are similar enough, or to find a bug? That would be very appreciated. @@ -178,7 +178,7 @@ become invalid and be automatically released. Using *delayed restarts* it is basically possible to achieve safety even without any kind of Redis persistence available, however note that this may translate into an availability penalty. For example if a majority of instances -crash, the system will become gobally unavailable for `TTL` (here globally means +crash, the system will become globally unavailable for `TTL` (here globally means that no resource at all will be lockable during this time). Making the algorithm more reliable: Extending the lock @@ -192,7 +192,7 @@ lock by sending a Lua script to all the instances that extends the TTL of the ke if the key exists and its value is still the random value the client assigned when the lock was acquired. -The client should only consider the lock re-acquired if it was albe to extend +The client should only consider the lock re-acquired if it was able to extend the lock into the majority of instances, and within the validity time (basically the algorithm to use is very similar to the one used when acquiring the lock). From 9c3e2af2bf63986e49671f720ef32eb21aa9a094 Mon Sep 17 00:00:00 2001 From: Francesc Campoy Date: Thu, 25 Sep 2014 17:04:50 -0700 Subject: [PATCH 0071/2314] Added missing 'and' --- topics/data-types-intro.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/data-types-intro.md b/topics/data-types-intro.md index bf30f59bd1..e878734f27 100644 --- a/topics/data-types-intro.md +++ b/topics/data-types-intro.md @@ -223,7 +223,7 @@ The example above sets a key with the string value `100`, having an expire of ten seconds. Later the `TTL` command is called in order to check the remaining time to live for the key. -In order to set and check expires in milliseconds, check the `PEXPIRE` +In order to set and check expires in milliseconds, check the `PEXPIRE` and the `PTTL` commands, and the full list of `SET` options. From 15b5768ccf81d9520261c82f47ea7a1a2a8f77aa Mon Sep 17 00:00:00 2001 From: jacketzhong Date: Fri, 10 Oct 2014 09:36:45 +0800 Subject: [PATCH 0072/2314] add cpp implementation --- topics/distlock.md | 1 + 1 file changed, 1 insertion(+) diff --git a/topics/distlock.md b/topics/distlock.md index 0156c0439e..bb7053103a 100644 --- a/topics/distlock.md +++ b/topics/distlock.md @@ -28,6 +28,7 @@ already available, that can be used as a reference. * [Redsync.go](https://github.com/hjr265/redsync.go) (Go implementation). * [Redisson](https://github.com/mrniko/redisson) (Java implementation). * [Redis::DistLock](https://github.com/sbertrang/redis-distlock) (Perl implementation). +* [Redlock-cpp](https://github.com/jacket-code/redlock-cpp) (Cpp implementation). Safety and Liveness guarantees --- From 0d7912eb9c1e0799ab0b82fc1fb0f44e97505f85 Mon Sep 17 00:00:00 2001 From: Rob Bednark Date: Fri, 10 Oct 2014 15:54:27 -0700 Subject: [PATCH 0073/2314] Made grammatically changes: fixed grammatical errors, and rewrote some of the sentences to be more clear and fluid. --- topics/partitioning.md | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/topics/partitioning.md b/topics/partitioning.md index c342ba6a5d..0abdb62006 100644 --- a/topics/partitioning.md +++ b/topics/partitioning.md @@ -9,28 +9,28 @@ Why partitioning is useful Partitioning in Redis serves two main goals: * It allows for much larger databases, using the sum of the memory of many computers. Without partitioning you are limited to the amount of memory a single computer can support. -* It allows to scale the computational power to multiple cores and multiple computers, and the network bandwidth to multiple computers and network adapters. +* It allows scaling the computational power to multiple cores and multiple computers, and the network bandwidth to multiple computers and network adapters. Partitioning basics --- There are different partitioning criteria. Imagine we have four Redis instances **R0**, **R1**, **R2**, **R3**, and many keys representing users like `user:1`, `user:2`, ... and so forth, we can find different ways to select in which instance we store a given key. In other words there are *different systems to map* a given key to a given Redis server. -One of the simplest way to perform partitioning is called **range partitioning**, and is accomplished by mapping ranges of objects into specific Redis instances. For example I could say, users from ID 0 to ID 10000 will go into instance **R0**, while users form ID 10001 to ID 20000 will go into instance **R1** and so forth. +One of the simplest ways to perform partitioning is with **range partitioning**, and is accomplished by mapping ranges of objects into specific Redis instances. For example, I could say users from ID 0 to ID 10000 will go into instance **R0**, while users form ID 10001 to ID 20000 will go into instance **R1** and so forth. -This systems works and is actually used in practice, however it has the disadvantage that there is to take a table mapping ranges to instances. This table needs to be managed and we need a table for every kind of object we have. Usually with Redis it is not a good idea. +This system works and is actually used in practice, however, it has the disadvantage of requiring a table that maps ranges to instances. This table needs to be managed and a table is needed for every kind of object, so therefore range partitioning in Redis is often undesirable because it is much more inefficient than other alternative partitioning approaches. -An alternative to to range partitioning is **hash partitioning**. This scheme works with any key, no need for a key in the form `object_name:` as is as simple as this: +An alternative to range partitioning is **hash partitioning**. This scheme works with any key, without requiring a key in the form `object_name:`, and is as simple as: -* Take the key name and use an hash function to turn it into a number. For instance I could use the `crc32` hash function. So if the key is `foobar` I do `crc32(foobar)` that will output something like 93024922. -* I use a modulo operation with this number in order to turn it into a number between 0 and 3, so that I can map this number to one of the four Redis instances I've. So `93024922 modulo 4` equals 2, so I know my key `foobar` should be stored into the **R2** instance. *Note: the modulo operation is just the rest of the division, usually it is implemented by the `%` operator in many programming languages.* +* Take the key name and use a hash function (e.g., the `crc32` hash function) to turn it into a number. For example, if the key is `foobar`, `crc32(foobar)` will output something like `93024922`. +* Use a modulo operation with this number in order to turn it into a number between 0 and 3, so that this number can be mapped to one of my four Redis instances. `93024922 modulo 4` equals `2`, so I know my key `foobar` should be stored into the **R2** instance. *Note: the modulo operation returns the remainder from a division operation, and is implemented with the `%` operator in many programming languages.* -There are many other ways to perform partitioning, but with this two examples you should get the idea. One advanced form of hash partitioning is called **consistent hashing** and is implemented by a few Redis clients and proxies. +There are many other ways to perform partitioning, but with these two examples you should get the idea. One advanced form of hash partitioning is called **consistent hashing** and is implemented by a few Redis clients and proxies. Different implementations of partitioning --- -Partitioning can be responsibility of different parts of a software stack. +Partitioning can be the responsibility of different parts of a software stack. * **Client side partitioning** means that the clients directly select the right node where to write or read a given key. Many Redis clients implement client side partitioning. * **Proxy assisted partitioning** means that our clients send requests to a proxy that is able to speak the Redis protocol, instead of sending requests directly to the right Redis instance. The proxy will make sure to forward our request to the right Redis instance accordingly to the configured partitioning schema, and will send the replies back to the client. The Redis and Memcached proxy [Twemproxy](https://github.com/twitter/twemproxy) implements proxy assisted partitioning. @@ -50,14 +50,14 @@ Some features of Redis don't play very well with partitioning: Data store or cache? --- -Partitioning when using Redis as a data store or cache is conceptually the same, however there is a huge difference. While when Redis is used as a data store you need to be sure that a given key always maps to the same instance, when Redis is used as a cache if a given node is unavailable it is not a big problem if we start using a different node, altering the key-instance map as we wish to improve the *availability* of the system (that is, the ability of the system to reply to our queries). +Although partitioning in Redis is conceptually the same whether using Redis as a data store or as a cache, there is a significant limitation when using it as a data store. When Redis is used as a data store, a given key must always map to the same Redis instance. When Redis is used as a cache, if a given node is unavailable it is not a big problem if a different node is used, altering the key-instance map as we wish to improve the *availability* of the system (that is, the ability of the system to reply to our queries). Consistent hashing implementations are often able to switch to other nodes if the preferred node for a given key is not available. Similarly if you add a new node, part of the new keys will start to be stored on the new node. The main concept here is the following: * If Redis is used as a cache **scaling up and down** using consistent hashing is easy. -* If Redis is used as a store, **we need to take the map between keys and nodes fixed, and a fixed number of nodes**. Otherwise we need a system that is able to rebalance keys between nodes when we add or remove nodes, and currently only Redis Cluster is able to do this, but Redis Cluster is currently in beta, and not yet considered production ready. +* If Redis is used as a store, **a fixed keys-to-nodes map is used, so the number of nodes must be fixed and cannot vary**. Otherwise, a system is needed that is able to rebalance keys between nodes when nodes are added or removed, and currently only Redis Cluster is able to do this, but Redis Cluster is currently in beta, and not yet considered production-ready. Presharding --- From 9195d16ba866996acbbbb0c1296f6154695dd72d Mon Sep 17 00:00:00 2001 From: Paulo Lopes Date: Mon, 13 Oct 2014 15:49:45 +0200 Subject: [PATCH 0074/2314] add vert.x mod-redis to the list --- clients.json | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/clients.json b/clients.json index 48dcddc766..bf3966cb76 100644 --- a/clients.json +++ b/clients.json @@ -249,6 +249,14 @@ "authors": [] }, + { + "name": "mod-redis-io", + "language": "Java", + "repository": "https://github.com/vert-x/mod-redis", + "description": "Official asynchronous redis.io bus module for Vert.x", + "authors": ["pmlopes"], + }, + { "name": "redis-lua", "language": "Lua", From 9964604d9b7cf70c32fe414944bbc61fee0f7080 Mon Sep 17 00:00:00 2001 From: Paulo Lopes Date: Mon, 13 Oct 2014 15:52:41 +0200 Subject: [PATCH 0075/2314] drop -io from mod to match vert.x module name --- clients.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clients.json b/clients.json index bf3966cb76..030ed69e41 100644 --- a/clients.json +++ b/clients.json @@ -250,7 +250,7 @@ }, { - "name": "mod-redis-io", + "name": "mod-redis", "language": "Java", "repository": "https://github.com/vert-x/mod-redis", "description": "Official asynchronous redis.io bus module for Vert.x", From ab38fa0adaaf6fd16a2b03624310e12ae82ef4f7 Mon Sep 17 00:00:00 2001 From: Michel Martens Date: Mon, 13 Oct 2014 14:42:41 +0000 Subject: [PATCH 0076/2314] Fix JSON formatting --- clients.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clients.json b/clients.json index 4bcc68fee7..07369a133b 100644 --- a/clients.json +++ b/clients.json @@ -517,7 +517,7 @@ "name": "Retcl", "language": "Tcl", "repository": "https://github.com/gahr/retcl", - "description": "Retcl is an asynchronous, event-driven Redis client library implemented as a single-file Tcl module." + "description": "Retcl is an asynchronous, event-driven Redis client library implemented as a single-file Tcl module.", "authors": ["gahrgahr"] }, From 4499f903af949f5d1a737323fbc5734e839dec01 Mon Sep 17 00:00:00 2001 From: Michel Martens Date: Mon, 13 Oct 2014 14:43:16 +0000 Subject: [PATCH 0077/2314] Add txredisapi --- clients.json | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/clients.json b/clients.json index 07369a133b..fae4ec25a8 100644 --- a/clients.json +++ b/clients.json @@ -427,6 +427,15 @@ "authors": ["dio_rian"] }, + { + "name": "txredisapi", + "language": "Python", + "url": "https://github.com/fiorix/txredisapi", + "description": "Full featured, non-blocking client for Twisted.", + "authors": ["fiorix"], + "active": true + }, + { "name": "desir", "language": "Python", From b6c3fc2342458929fa6985445badf47b3d0fc835 Mon Sep 17 00:00:00 2001 From: Michel Martens Date: Wed, 15 Oct 2014 18:10:39 +0000 Subject: [PATCH 0078/2314] Fix typo (tip from @rochoa) --- topics/latency.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/latency.md b/topics/latency.md index 46704ea028..06504ef24d 100644 --- a/topics/latency.md +++ b/topics/latency.md @@ -532,7 +532,7 @@ It is important to use it only as *last resort* when there is no way to track th This is how this feature works: -* The user enables the softare watchdog using the `CONFIG SET` command. +* The user enables the software watchdog using the `CONFIG SET` command. * Redis starts monitoring itself constantly. * If Redis detects that the server is blocked into some operation that is not returning fast enough, and that may be the source of the latency issue, a low level report about where the server is blocked is dumped on the log file. * The user contacts the developers writing a message in the Redis Google Group, including the watchdog report in the message. From 6aaee8cabd0ea2b6ef07befa32653fb43fd66857 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jos=C3=A9=20Carlos=20Nieto?= Date: Fri, 17 Oct 2014 10:11:59 -0500 Subject: [PATCH 0079/2314] Updating description for the gosexy/redis package. --- clients.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clients.json b/clients.json index fae4ec25a8..f02cd3134e 100644 --- a/clients.json +++ b/clients.json @@ -142,7 +142,7 @@ "language": "Go", "repository": "https://github.com/gosexy/redis", "url": "https://menteslibres.net/gosexy/redis", - "description": "A Go client for redis built on top of the hiredis C client. Supports non-blocking connections and channel-based subscriptions.", + "description": "Redis client library for Go that maps the full redis command list into equivalent Go functions.", "authors": ["xiam"], "active": true }, From 78a885ceb4e6bc4f836a26382d2688cbb1d14376 Mon Sep 17 00:00:00 2001 From: Armin Ronacher Date: Sun, 26 Oct 2014 02:10:37 +0100 Subject: [PATCH 0080/2314] Recommend redis-rs and reorder the rust libraries. redis-rs is currently the only library that works with any recent rust version or is maintained. --- clients.json | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/clients.json b/clients.json index fae4ec25a8..342ea5c28c 100644 --- a/clients.json +++ b/clients.json @@ -778,6 +778,16 @@ "description": "A Redis client written with the Akka IO package introduced in Akka 2.2.", "authors": ["chrisdinn"] }, + + { + "name": "redis-rs", + "language": "Rust", + "repository": "https://github.com/mitsuhiko/redis-rs", + "description": "A high and low level client library for Redis tracking Rust nightly.", + "authors": ["mitsuhiko"], + "active": true, + "recommended": true + }, { "name": "rust-redis", @@ -788,15 +798,6 @@ "active": true }, - { - "name": "redis-rs", - "language": "Rust", - "repository": "https://github.com/mitsuhiko/redis-rs", - "description": "A fairly high level client library for Redis.", - "authors": ["mitsuhiko"], - "active": true - }, - { "name": "redic", "language": "Ruby", From 2bc1423e09994bd4016dea7184a31cf90ef68da8 Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 29 Oct 2014 17:07:32 +0100 Subject: [PATCH 0081/2314] More info about replication. This adds info about diskless replication, and safety of runnign a mastesr with persistence turned off. --- topics/replication.md | 52 ++++++++++++++++++++++++++++++++++++------- 1 file changed, 44 insertions(+), 8 deletions(-) diff --git a/topics/replication.md b/topics/replication.md index 89f004b5d2..3558bc7154 100644 --- a/topics/replication.md +++ b/topics/replication.md @@ -32,10 +32,28 @@ connections during this brief window. multiple slaves for read-only queries (for example, heavy `SORT` operations can be offloaded to slaves), or simply for data redundancy. -* It is possible to use replication to avoid the cost of having the master -write the full dataset to disk: just configure your master redis.conf to avoid -saving (just comment all the "save" directives), then connect a slave -configured to save from time to time. +* It is possible to use replication to avoid the cost of having the master write the full dataset to disk: just configure your master `redis.conf` to avoid saving (just comment all the "save" directives), then connect a slave configured to save from time to time. However in this setup make sure masters don't restart automatically (please read the next section for more information). + +Safety of replication when master has persistence turned off +--- + +In setups where Redis replication is used, it is strongly advised to have +persistence turned on in the master, or when this is not possible, for example +because of latency concerns, instances should be configured to **avoid restarting +automatically**. + +To better understand why masters with persistence turned off configured to +auto restart are dangerous, check the following failure mode where data +is wiped from the master and all its slaves: + +1. We have a setup with node A acting as master, with persistence turned down, and nodes B and C replicating from node A. +2. A crashes, however it has some auto-restart system, that restarts the process. However since persistence is turned off, the node restarts with an empty data set. +3. Nodes B and C will replicate from A, which is empty, so they'll effectively destroy their copy of the data. + +When Redis Sentinel is used for high availability, also turning off persistency +on the master, together with auto restart of the process, is dangerous. For example the master can restart fast enough for Sentinel to don't detect a failure, so that the failure mode described above happens. + +Every time data safety is important, and replication is used with master configured without persistence, auto restart of instances should be disabled. How Redis replication works --- @@ -85,6 +103,19 @@ while the old implementation uses the `SYNC` command. Note that a Redis 2.8 slave is able to detect if the server it is talking with does not support `PSYNC`, and will use `SYNC` instead. +Diskless replication +--- + +Normally a full resynchronization requires to create an RDB file on disk, +then reload the same RDB from disk in order to feed the slaves with the data. + +With slow disks this can be a very stressing operation for the master. +Redis version 2.8.18 will be the first version to have experimental support +for diskless replication. In this setup the child process directly sends the +RDB over the wire to slaves, without using the disk as intermediate storage. + +The feature is currently considered experimental. + Configuration --- @@ -101,6 +132,12 @@ There are also a few parameters for tuning the replication backlog taken in memory by the master to perform the partial resynchronization. See the example `redis.conf` shipped with the Redis distribution for more information. +Diskless replication can be enabled using the `repl-diskless-sync` configuration +parameter. The delay to start the transfer in order to wait more slaves to +arrive after the first one, is controlled by the `repl-diskless-sync-delay` +parameter. Please refer to the example `redis.conf` file in the Redis distribution +for more details. + Read-only slave --- @@ -112,10 +149,9 @@ Read-only slaves will reject all write commands, so that it is not possible to w You may wonder why it is possible to revert the read-only setting and have slave instances that can be target of write operations. While those writes will be discarded if the slave and the master -resynchronize or if the slave is restarted, there's a legitimate -use case for storing ephemeral data in writable slaves. For -instance, clients may take information about master reachability -to coordinate a failover strategy. +resynchronize or if the slave is restarted, there are a few legitimate +use case for storing ephemeral data in writable slaves. However in the future +it is possible that this feature will be dropped. Setting a slave to authenticate to a master --- From 487c2fb523a30252fd6399130311dba39461ec80 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Thu, 30 Oct 2014 12:34:51 +0100 Subject: [PATCH 0082/2314] Update redis-py-cluster in cluster-tutorial --- topics/cluster-tutorial.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/cluster-tutorial.md b/topics/cluster-tutorial.md index 142c903d43..718148d9fc 100644 --- a/topics/cluster-tutorial.md +++ b/topics/cluster-tutorial.md @@ -282,7 +282,7 @@ client libraries implementations. I'm aware of the following implementations: * [redis-rb-cluster](http://github.com/antirez/redis-rb-cluster) is a Ruby implementation written by me (@antirez) as a reference for other languages. It is a simple wrapper around the original redis-rb, implementing the minimal semantics to talk with the cluster efficiently. -* [redis-py-cluster](https://github.com/Grokzen/redis-py-cluster) appears to be a port of redis-rb-cluster to Python. Not recently updated (last commit 6 months ago) however it may be a starting point. +* [redis-py-cluster](https://github.com/Grokzen/redis-py-cluster) A port of redis-rb-cluster to Python. Supports majority of *redis-py* functionality. Is in active development. * The popular [Predis](https://github.com/nrk/predis) has support for Redis Cluster, the support was recently updated and is in active development. * The most used Java client, [Jedis](https://github.com/xetorthio/jedis) recently added support for Redis Cluster, see the *Jedis Cluster* section in the project README. * The `redis-cli` utility in the unstable branch of the Redis repository at Github implements a very basic cluster support when started with the `-c` switch. From cade7a703422008ca11e484eca34c304ef76443b Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 31 Oct 2014 12:32:43 +0100 Subject: [PATCH 0083/2314] Added EC2 specific info to admin page. --- topics/admin.md | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/topics/admin.md b/topics/admin.md index 55cc35a093..2f60ba5f0a 100644 --- a/topics/admin.md +++ b/topics/admin.md @@ -10,11 +10,19 @@ Redis setup hints + We suggest deploying Redis using the **Linux operating system**. Redis is also tested heavily on osx, and tested from time to time on FreeBSD and OpenBSD systems. However Linux is where we do all the major stress testing, and where most production deployments are working. + Make sure to set the Linux kernel **overcommit memory setting to 1**. Add `vm.overcommit_memory = 1` to `/etc/sysctl.conf` and then reboot or run the command `sysctl vm.overcommit_memory=1` for this to take effect immediately. + Make sure to **setup some swap** in your system (we suggest as much as swap as memory). If Linux does not have swap and your Redis instance accidentally consumes too much memory, either Redis will crash for out of memory or the Linux kernel OOM killer will kill the Redis process. ++ Set an explicit `maxmemory` option limit in your instance in order to make sure that the instance will report errors instead of failing when the system memory limit is near to be reached. + If you are using Redis in a very write-heavy application, while saving an RDB file on disk or rewriting the AOF log **Redis may use up to 2 times the memory normally used**. The additional memory used is proportional to the number of memory pages modified by writes during the saving process, so it is often proportional to the number of keys (or aggregate types items) touched during this time. Make sure to size your memory accordingly. -+ Even if you have persistence disabled, Redis will need to perform RDB saves if you use replication. -+ The use of Redis persistence with **EC2 EBS volumes is discouraged** since EBS performance is usually poor. Use ephemeral storage to persist and then move your persistence files to EBS when possible. -+ If you are deploying using a virtual machine that uses the **Xen hypervisor you may experience slow fork() times**. This may block Redis from a few milliseconds up to a few seconds depending on the dataset size. Check the [latency page](/topics/latency) for more information. This problem is not common to other hypervisors. + Use `daemonize no` when run under daemontools. ++ Even if you have persistence disabled, Redis will need to perform RDB saves if you use replication, unless you use the new diskless replication feature, which is currently experimental. ++ If you are using replication, make sure that either your master has persistence enabled, or that it does not automatically restarts on crashes: slaves will try to be an exact copy of the master, so if a master restarts with an empty data set, slaves will be wiped as well. + +Running Redis on EC2 +-------------------- + ++ Use HVM based instances, not PV based instances. ++ Don't use old instances families, for example: use m3.medium with HVM instead of m1.medium with PV. ++ The use of Redis persistence with **EC2 EBS volumes** needs to be handled with care since sometimes EBS volumes have high latency characteristics. ++ You may want to try the new **diskless replication** (currently experimetnal) if you have issues when slaves are synchronizing with the master. Upgrading or restarting a Redis instance without downtime ------------------------------------------------------- @@ -22,7 +30,7 @@ Upgrading or restarting a Redis instance without downtime Redis is designed to be a very long running process in your server. For instance many configuration options can be modified without any kind of restart using the [CONFIG SET command](/commands/config-set). -Starting from Redis 2.2 it is even possible to switch from AOF to RDB snapshots persistence or the other way around without restarting Redis. Check the output of the 'CONFIG GET *' command for more information. +Starting from Redis 2.2 it is even possible to switch from AOF to RDB snapshots persistence or the other way around without restarting Redis. Check the output of the `CONFIG GET *` command for more information. However from time to time a restart is mandatory, for instance in order to upgrade the Redis process to a newer version, or when you need to modify some configuration parameter that is currently not supported by the CONFIG command. From 62c8c92ac03a36785d5a3abe7ccbad030fdfa446 Mon Sep 17 00:00:00 2001 From: Lennie Date: Sat, 1 Nov 2014 12:38:30 +0100 Subject: [PATCH 0084/2314] sentinel.md typo fix I believe the intended text was 'there are 3 nodes' not 'there are there nodes' --- topics/sentinel.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/sentinel.md b/topics/sentinel.md index 82873ed9b9..5661586f83 100644 --- a/topics/sentinel.md +++ b/topics/sentinel.md @@ -304,7 +304,7 @@ However in a real-world system using Sentinel there are three different players: In order to define the behavior of the system we have to consider all three. -The following is a simple network where there are there nodes, each running +The following is a simple network where there are 3 nodes, each running a Redis instance, and a Sentinel instance: +-------------+ From 5651219bc133800b38055dede6c215a2d785458a Mon Sep 17 00:00:00 2001 From: Gaurish Sharma Date: Tue, 4 Nov 2014 10:49:45 +0530 Subject: [PATCH 0085/2314] Typo Fix :scissors: --- topics/persistence.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/persistence.md b/topics/persistence.md index dc626b4214..b3965c0e47 100644 --- a/topics/persistence.md +++ b/topics/persistence.md @@ -269,7 +269,7 @@ Since many Redis users are in the startup scene and thus don't have plenty of money to spend we'll review the most interesting disaster recovery techniques that don't have too high costs. -* Amazon S3 and other similar services are a good way for mounting your disaster recovery system. Simply transfer your daily or hourly RDB snapshot to S3 in an encrypted form. You can encrypt your data using `gpg -c` (in symmetric encryption mode). Make sure to store your password in many different safe places (for instance give a copy to the most important people of your organization). It is recommanded to use multiple storage services for improved data safety. +* Amazon S3 and other similar services are a good way for mounting your disaster recovery system. Simply transfer your daily or hourly RDB snapshot to S3 in an encrypted form. You can encrypt your data using `gpg -c` (in symmetric encryption mode). Make sure to store your password in many different safe places (for instance give a copy to the most important people of your organization). It is recommended to use multiple storage services for improved data safety. * Transfer your snapshots using SCP (part of SSH) to far servers. This is a fairly simple and safe route: get a small VPS in a place that is very far from you, install ssh there, and generate an ssh client key without passphrase, then make add it in the authorized_keys file of your small VPS. You are ready to transfer backups in an automated fashion. Get at least two VPS in two different providers From d944d6114ee80d480fc5d0f0f7ea67be4a04a1d2 Mon Sep 17 00:00:00 2001 From: Alejandro Lazaro Date: Tue, 4 Nov 2014 17:07:58 -0300 Subject: [PATCH 0086/2314] Fix a typo in commands.json --- commands.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands.json b/commands.json index 98886e0708..de22e6c6e6 100644 --- a/commands.json +++ b/commands.json @@ -247,7 +247,7 @@ }, "COMMAND GETKEYS": { "summary": "Extract keys given a full Redis command", - "complexity": "O(N) where N is the number of arugments to the command", + "complexity": "O(N) where N is the number of arguments to the command", "since": "2.8.13", "group": "server" }, From 85588b658a4383afc116f9fdfab8e61ecbed61bb Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 7 Nov 2014 16:29:06 +0100 Subject: [PATCH 0087/2314] Latency page improved. --- topics/admin.md | 1 + topics/latency.md | 51 +++++++++++++++++++++++++++++++++++++++++++---- 2 files changed, 48 insertions(+), 4 deletions(-) diff --git a/topics/admin.md b/topics/admin.md index 2f60ba5f0a..1f8ce1c8ff 100644 --- a/topics/admin.md +++ b/topics/admin.md @@ -9,6 +9,7 @@ Redis setup hints + We suggest deploying Redis using the **Linux operating system**. Redis is also tested heavily on osx, and tested from time to time on FreeBSD and OpenBSD systems. However Linux is where we do all the major stress testing, and where most production deployments are working. + Make sure to set the Linux kernel **overcommit memory setting to 1**. Add `vm.overcommit_memory = 1` to `/etc/sysctl.conf` and then reboot or run the command `sysctl vm.overcommit_memory=1` for this to take effect immediately. +* Make sure to disable Linux kernel feature *transparent huge pages*, it will affect greatly both memory usage and latency in a negative way. This is accomplished with the following command: `echo never > sys/kernel/mm/transparent_hugepage/enabled`. + Make sure to **setup some swap** in your system (we suggest as much as swap as memory). If Linux does not have swap and your Redis instance accidentally consumes too much memory, either Redis will crash for out of memory or the Linux kernel OOM killer will kill the Redis process. + Set an explicit `maxmemory` option limit in your instance in order to make sure that the instance will report errors instead of failing when the system memory limit is near to be reached. + If you are using Redis in a very write-heavy application, while saving an RDB file on disk or rewriting the AOF log **Redis may use up to 2 times the memory normally used**. The additional memory used is proportional to the number of memory pages modified by writes during the saving process, so it is often proportional to the number of keys (or aggregate types items) touched during this time. Make sure to size your memory accordingly. diff --git a/topics/latency.md b/topics/latency.md index 06504ef24d..d1085eb2ad 100644 --- a/topics/latency.md +++ b/topics/latency.md @@ -9,6 +9,30 @@ issues a command and the time the reply to the command is received by the client. Usually Redis processing time is extremely low, in the sub microsecond range, but there are certain conditions leading to higher latency figures. +I've little time, give me the checklist +--- + +The following documentation is very important in order to run Redis in +a low latency fashion. However I understand that we are busy people, so +let's start with a quick checklist. If you fail following this steps, please +return here to read the full documentation. + +1. Make sure you are not running slow commands that are blocking the server. Use the Redis [Slow Log feature](/commands/slowlog) to check this. +2. For EC2 users, make sure you use HVM based modern EC2 instances, like m3.medium. Otherwise fork() is too slow. +3. Transparent huge pages must be disabled from your kernel. Use `echo never > sys/kernel/mm/transparent_hugepage/enabled` to disable them, and restart your Redis process. +4. If you are using a virtual machine, it is possible that you have an intrinsic latency that has nothing to do with Redis. Check the minimum latency you can expect from your runtime environment using `./redis-cli --intrinsic-latency 100`. +5. Enable and use the [Latency monitor](/topics/latency-monitor) feature of Redis in order to get a human readable description of the latency events and causes in your Redis instance. + +In general, use the following table for durability VS latency/performance tradeoffs, ordered from stronger safety to better latency. + +1. AOF + fsync always: this is very slow, you should use it only if you know what you are doing. +2. AOF + fsync every second: this is a good compromise. +3. AOF + fsync every second + no-appendfsync-on-rewrite option set to yes: this is as the above, but avoids to fsync during rewrites to lower the disk pressure. +4. AOF + fsync never. Fsyncing is up to the kernel in this setup, even less disk pressure and risk of latency spikes. +5. RDB. Here you have a vast spectrum of tradeoffs depending on the save triggers you configure. + +And now for people with 15 minutes to spend, the details... + Measuring latency ----------------- @@ -204,19 +228,38 @@ Fork time in different systems ------------------------------ Modern hardware is pretty fast to copy the page table, but Xen is not. -The problem with Xen is not virtualization-specific, but Xen-specific. For instance -using VMware or Virtual Box does not result into slow fork time. +The problem with Xen is not virtualization-specific, but Xen-specific. For instance using VMware or Virtual Box does not result into slow fork time. The following is a table that compares fork time for different Redis instance size. Data is obtained performing a BGSAVE and looking at the `latest_fork_usec` filed in the `INFO` command output. +However the good news is that **new types of EC2 HVM based instances are much +better with fork times**, almost on pair with physical servers, so for example +using m3.medium (or better) instances will provide good results. + * **Linux beefy VM on VMware** 6.0GB RSS forked in 77 milliseconds (12.8 milliseconds per GB). * **Linux running on physical machine (Unknown HW)** 6.1GB RSS forked in 80 milliseconds (13.1 milliseconds per GB) * **Linux running on physical machine (Xeon @ 2.27Ghz)** 6.9GB RSS forked into 62 milliseconds (9 milliseconds per GB). * **Linux VM on 6sync (KVM)** 360 MB RSS forked in 8.2 milliseconds (23.3 millisecond per GB). -* **Linux VM on EC2 (Xen)** 6.1GB RSS forked in 1460 milliseconds (239.3 milliseconds per GB). +* **Linux VM on EC2, old instance types (Xen)** 6.1GB RSS forked in 1460 milliseconds (239.3 milliseconds per GB). +* **Linux VM on EC2, new instance types (Xen)** 1GB RSS forked in 10 milliseconds (10 milliseconds per GB). * **Linux VM on Linode (Xen)** 0.9GBRSS forked into 382 millisecodns (424 milliseconds per GB). -As you can see a VM running on Xen has a performance hit that is between one order to two orders of magnitude. We believe this is a severe problem with Xen and we hope it will be addressed ASAP. +As you can see certanin VM running on Xen have a performance hit that is between one order to two orders of magnitude. For EC2 users the suggestion is simple: use modern HVM based instances. + +Latency induced by transparent huge pages +----------------------------------------- + +Unfortunately when a Linux kernel has transparent huge pages enabled, Redis +incurs to a big latency penality after the `fork` call is used in order to +persist on disk. Huge pages are the cause of the follwing issue: + +1. Fork is called, two processes with shared huge pages are crated. +2. In a busy instance, a few event loops runs will cause commands to target a few thousand of pages, causing the copy on write of almost the whole process memory. +3. This will result in big latency and big memory usage. + +Make sure to **disable transparent huge pages** using the following command: + + echo never > sys/kernel/mm/transparent_hugepage/enabled Latency induced by swapping (operating system paging) ----------------------------------------------------- From a74da778e17c6ce1e54805856fbdf0cf3595d8b0 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 7 Nov 2014 16:30:00 +0100 Subject: [PATCH 0088/2314] Huge pages section removed since they are evil anyway. --- topics/latency.md | 74 ----------------------------------------------- 1 file changed, 74 deletions(-) diff --git a/topics/latency.md b/topics/latency.md index d1085eb2ad..03cec33a70 100644 --- a/topics/latency.md +++ b/topics/latency.md @@ -614,77 +614,3 @@ Note: in the example the **DEBUG SLEEP** command was used in order to block the If you happen to collect multiple watchdog stack traces you are encouraged to send everything to the Redis Google Group: the more traces we obtain, the simpler it will be to understand what the problem with your instance is. -APPENDIX A: Experimenting with huge pages ------------------------------------------ - -Latency introduced by fork can be mitigated using huge pages at the cost of a bigger memory usage during persistence. The following appeindex describe in details this feature as implemented in the Linux kernel. - -Some CPUs can use different page size though. AMD and Intel CPUs can support -2 MB page size if needed. These pages are nicknamed *huge pages*. Some -operating systems can optimize page size in real time, transparently -aggregating small pages into huge pages on the fly. - -On Linux, explicit huge pages management has been introduced in 2.6.16, and -implicit transparent huge pages are available starting in 2.6.38. If you -run recent Linux distributions (for example RH 6 or derivatives), transparent -huge pages can be activated, and you can use a vanilla Redis version with them. - -This is the preferred way to experiment/use with huge pages on Linux. - -Now, if you run older distributions (RH 5, SLES 10-11, or derivatives), and -not afraid of a few hacks, Redis requires to be patched in order to support -huge pages. - -The first step would be to read [Mel Gorman's primer on huge pages](http://lwn.net/Articles/374424/) - -There are currently two ways to patch Redis to support huge pages. - -+ For Redis 2.4, the embedded jemalloc allocator must be patched. -[patch](https://gist.github.com/1171054) by Pieter Noordhuis. -Note this patch relies on the anonymous mmap huge page support, -only available starting 2.6.32, so this method cannot be used for older -distributions (RH 5, SLES 10, and derivatives). - -+ For Redis 2.2, or 2.4 with the libc allocator, Redis makefile -must be altered to link Redis with -[the libhugetlbfs library](http://libhugetlbfs.sourceforge.net/). -It is a straightforward [change](https://gist.github.com/1240452) - -Then, the system must be configured to support huge pages. - -The following command allocates and makes N huge pages available: - - $ sudo sysctl -w vm.nr_hugepages= - -The following command mounts the huge page filesystem: - - $ sudo mount -t hugetlbfs none /mnt/hugetlbfs - -In all cases, once Redis is running with huge pages (transparent or -not), the following benefits are expected: - -+ The latency due to the fork operations is dramatically reduced. - This is mostly useful for very large instances, and especially - on a VM. -+ Redis is faster due to the fact the translation look-aside buffer - (TLB) of the CPU is more efficient to cache page table entries - (i.e. the hit ratio is better). Do not expect miracle, it is only - a few percent gain at most. -+ Redis memory cannot be swapped out anymore, which is interesting - to avoid outstanding latencies due to virtual memory. - -Unfortunately, and on top of the extra operational complexity, -there is also a significant drawback of running Redis with -huge pages. The COW mechanism granularity is the page. With -2 MB pages, the probability a page is modified during a background -save operation is 512 times higher than with 4 KB pages. The actual -memory required for a background save therefore increases a lot, -especially if the write traffic is truly random, with poor locality. -With huge pages, using twice the memory while saving is not anymore -a theoretical incident. It really happens. - -The result of a complete benchmark can be found -[here](https://gist.github.com/1272254). - - - From c07557a9969e7dc15abc583ce8ce6a1ab0b71af5 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 7 Nov 2014 16:32:15 +0100 Subject: [PATCH 0089/2314] this -> these. --- topics/latency.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/latency.md b/topics/latency.md index 03cec33a70..99d6de3385 100644 --- a/topics/latency.md +++ b/topics/latency.md @@ -14,7 +14,7 @@ I've little time, give me the checklist The following documentation is very important in order to run Redis in a low latency fashion. However I understand that we are busy people, so -let's start with a quick checklist. If you fail following this steps, please +let's start with a quick checklist. If you fail following these steps, please return here to read the full documentation. 1. Make sure you are not running slow commands that are blocking the server. Use the Redis [Slow Log feature](/commands/slowlog) to check this. From 8283d44b290522f88f97aa6e73c7fef8bbb33d92 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 7 Nov 2014 16:40:32 +0100 Subject: [PATCH 0090/2314] In computing a single slash makes difference. --- topics/latency.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/latency.md b/topics/latency.md index 99d6de3385..b881f79359 100644 --- a/topics/latency.md +++ b/topics/latency.md @@ -19,7 +19,7 @@ return here to read the full documentation. 1. Make sure you are not running slow commands that are blocking the server. Use the Redis [Slow Log feature](/commands/slowlog) to check this. 2. For EC2 users, make sure you use HVM based modern EC2 instances, like m3.medium. Otherwise fork() is too slow. -3. Transparent huge pages must be disabled from your kernel. Use `echo never > sys/kernel/mm/transparent_hugepage/enabled` to disable them, and restart your Redis process. +3. Transparent huge pages must be disabled from your kernel. Use `echo never > /sys/kernel/mm/transparent_hugepage/enabled` to disable them, and restart your Redis process. 4. If you are using a virtual machine, it is possible that you have an intrinsic latency that has nothing to do with Redis. Check the minimum latency you can expect from your runtime environment using `./redis-cli --intrinsic-latency 100`. 5. Enable and use the [Latency monitor](/topics/latency-monitor) feature of Redis in order to get a human readable description of the latency events and causes in your Redis instance. From d4b94ed56824fc4ab51926421f37d1baa499d1ac Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 7 Nov 2014 16:41:22 +0100 Subject: [PATCH 0091/2314] Another slash. --- topics/latency.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/latency.md b/topics/latency.md index b881f79359..9bb456a523 100644 --- a/topics/latency.md +++ b/topics/latency.md @@ -259,7 +259,7 @@ persist on disk. Huge pages are the cause of the follwing issue: Make sure to **disable transparent huge pages** using the following command: - echo never > sys/kernel/mm/transparent_hugepage/enabled + echo never > /sys/kernel/mm/transparent_hugepage/enabled Latency induced by swapping (operating system paging) ----------------------------------------------------- From 075da40a7eaafc3a0cfd3f8f8312c3baff9628bf Mon Sep 17 00:00:00 2001 From: Hugo Lopes Tavares Date: Tue, 11 Nov 2014 20:41:58 -0500 Subject: [PATCH 0092/2314] Fix command name in sunionstore.md Related to https://github.com/antirez/redis-doc/pull/194#discussion_r20196196 --- commands/sunionstore.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands/sunionstore.md b/commands/sunionstore.md index 74df06071f..716caf13f0 100644 --- a/commands/sunionstore.md +++ b/commands/sunionstore.md @@ -16,6 +16,6 @@ SADD key1 "c" SADD key2 "c" SADD key2 "d" SADD key2 "e" -SINTERSTORE key key1 key2 +SUNIONSTORE key key1 key2 SMEMBERS key ``` From 5743499f41a0b929fba0a8cfe74f2dd78f85d694 Mon Sep 17 00:00:00 2001 From: Miura Takuma Date: Thu, 13 Nov 2014 16:47:04 +0900 Subject: [PATCH 0093/2314] fixed; when a key expired, EXPIRED fires, not DEL. --- topics/notifications.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/notifications.md b/topics/notifications.md index 7cee600f62..05662eec6f 100644 --- a/topics/notifications.md +++ b/topics/notifications.md @@ -96,7 +96,7 @@ Different commands generate different kind of events according to the following * `DEL` generates a `del` event for every deleted key. * `RENAME` generates two events, a `rename_from` event for the source key, and a `rename_to` event for the destination key. -* `EXPIRE` generates an `expire` event when an expire is set to the key, or a `del` event every time setting an expire results into the key being deleted (see `EXPIRE` documentation for more info). +* `EXPIRE` generates an `expire` event when an expire is set to the key, or a `expired` event every time setting an expire results into the key being deleted (see `EXPIRE` documentation for more info). * `SORT` generates a `sortstore` event when `STORE` is used to set a new key. If the resulting list is empty, and the `STORE` option is used, and there was already an existing key with that name, the result is that the key is deleted, so a `del` event is generated in this condition. * `SET` and all its variants (`SETEX`, `SETNX`,`GETSET`) generate `set` events. However `SETEX` will also generate an `expire` events. * `MSET` generates a separated `set` event for every key. From 5bf19f984afe1624f34b8cdda451cd68b9371538 Mon Sep 17 00:00:00 2001 From: Alex Nekipelov Date: Thu, 13 Nov 2014 12:12:17 +0300 Subject: [PATCH 0094/2314] Added C++/Boost Asio client --- clients.json | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/clients.json b/clients.json index fae4ec25a8..e5e34dd99d 100644 --- a/clients.json +++ b/clients.json @@ -850,5 +850,15 @@ "description": "Varnish Cache module using the synchronous hiredis library API to access Redis servers from VCL.", "authors": ["carlosabalde"], "active": true - } + }, + + { + "name": "redisclient", + "language": "C++", + "repository": "https://github.com/nekipelov/redisclient", + "description": "A C++ asynchronous client based on boost::asio", + "authors": [ "nekipelov" ], + "active": true + } + ] From 6dec9567131b38909ad3b0831c8bfa334ac07f0c Mon Sep 17 00:00:00 2001 From: Guido Iaquinti Date: Thu, 13 Nov 2014 12:30:08 +0000 Subject: [PATCH 0095/2314] Fix typos in topics/latency.md --- topics/latency.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/topics/latency.md b/topics/latency.md index 9bb456a523..d0fe4b5d0e 100644 --- a/topics/latency.md +++ b/topics/latency.md @@ -239,10 +239,10 @@ using m3.medium (or better) instances will provide good results. * **Linux beefy VM on VMware** 6.0GB RSS forked in 77 milliseconds (12.8 milliseconds per GB). * **Linux running on physical machine (Unknown HW)** 6.1GB RSS forked in 80 milliseconds (13.1 milliseconds per GB) * **Linux running on physical machine (Xeon @ 2.27Ghz)** 6.9GB RSS forked into 62 milliseconds (9 milliseconds per GB). -* **Linux VM on 6sync (KVM)** 360 MB RSS forked in 8.2 milliseconds (23.3 millisecond per GB). +* **Linux VM on 6sync (KVM)** 360 MB RSS forked in 8.2 milliseconds (23.3 milliseconds per GB). * **Linux VM on EC2, old instance types (Xen)** 6.1GB RSS forked in 1460 milliseconds (239.3 milliseconds per GB). * **Linux VM on EC2, new instance types (Xen)** 1GB RSS forked in 10 milliseconds (10 milliseconds per GB). -* **Linux VM on Linode (Xen)** 0.9GBRSS forked into 382 millisecodns (424 milliseconds per GB). +* **Linux VM on Linode (Xen)** 0.9GBRSS forked into 382 milliseconds (424 milliseconds per GB). As you can see certanin VM running on Xen have a performance hit that is between one order to two orders of magnitude. For EC2 users the suggestion is simple: use modern HVM based instances. From 982e4ab58beaaf0e6be3f8ef3e9b5feb2494feb9 Mon Sep 17 00:00:00 2001 From: stephensearles Date: Fri, 14 Nov 2014 10:55:11 -0500 Subject: [PATCH 0096/2314] adding shipwire/redis to clients.json --- clients.json | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/clients.json b/clients.json index e5e34dd99d..6f1d3d7d61 100644 --- a/clients.json +++ b/clients.json @@ -164,6 +164,15 @@ "authors": ["keimoon"], "active": true }, + + { + "name": "shipwire/redis", + "language": "Go", + "repository": "https://github.com/shipwire/redis", + "description": "A Redis client focused on streaming, with support for a print-like API, pipelining, Pub/Sub, and connection pooling.", + "authors": ["stephensearles"], + "active": true + }, { "name": "hedis", From 264e70c08c4840def4d9f58ed24a72c6e495c710 Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Fri, 14 Nov 2014 11:29:20 -1000 Subject: [PATCH 0097/2314] Document newly integrated BitOps library --- commands/eval.md | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/commands/eval.md b/commands/eval.md index 9c37852bb8..763f6a57b0 100644 --- a/commands/eval.md +++ b/commands/eval.md @@ -512,6 +512,7 @@ The Redis Lua interpreter loads the following Lua libraries: * struct lib. * cjson lib. * cmsgpack lib. +* bitop lib * redis.sha1hex function. Every Redis instance is _guaranteed_ to have all the above libraries so you can @@ -586,6 +587,27 @@ Example: 3) "baz" ``` +### bitop + +The Lua Bit Operations Module adds bitwise operations on numbers. +It is available for scripting in Redis since version 2.8.18. + +Example: + +``` +127.0.0.1:6379> eval 'return bit.tobit(1)' 0 +(integer) 1 +127.0.0.1:6379> eval 'return bit.bor(1,2,4,8,16,32,64,128)' 0 +(integer) 255 +127.0.0.1:6379> eval 'return bit.tohex(422342)' 0 +"000671c6" +``` + +It supports several other functions: +`bit.tobit`, `bit.tohex`, `bit.bnot`, `bit.band`, `bit.bor`, `bit.bxor`, +`bit.lshift`, `bit.rshift`, `bit.arshift`, `bit.rol`, `bit.ror`, `bit.bswap`. +All available functions are documented in the [Lua BitOp documentation](http://bitop.luajit.org/api.html) + ### redis.sha1hex Perform the SHA1 of the input string. From 74f8ad31c5a9d0bcd6fec366337b7485ee4e35a7 Mon Sep 17 00:00:00 2001 From: Angelo Scotto Date: Sat, 15 Nov 2014 21:27:12 +0100 Subject: [PATCH 0098/2314] Added C# implementation --- topics/distlock.md | 1 + 1 file changed, 1 insertion(+) diff --git a/topics/distlock.md b/topics/distlock.md index bb7053103a..a3e60db54d 100644 --- a/topics/distlock.md +++ b/topics/distlock.md @@ -29,6 +29,7 @@ already available, that can be used as a reference. * [Redisson](https://github.com/mrniko/redisson) (Java implementation). * [Redis::DistLock](https://github.com/sbertrang/redis-distlock) (Perl implementation). * [Redlock-cpp](https://github.com/jacket-code/redlock-cpp) (Cpp implementation). +* [Redlock-cs](https://github.com/kidfashion/redlock-cs) (C#/.NET implementation). Safety and Liveness guarantees --- From bd12469f266c1347f9784c84a46b4bb66dedcc77 Mon Sep 17 00:00:00 2001 From: Ari Aosved Date: Sat, 15 Nov 2014 15:53:21 -0800 Subject: [PATCH 0099/2314] Fix reference to the action LPUSH performs wrt RPOPLPUSH --- commands.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands.json b/commands.json index 98886e0708..fc730216bd 100644 --- a/commands.json +++ b/commands.json @@ -1421,7 +1421,7 @@ "group": "list" }, "RPOPLPUSH": { - "summary": "Remove the last element in a list, append it to another list and return it", + "summary": "Remove the last element in a list, prepend it to another list and return it", "complexity": "O(1)", "arguments": [ { From 5b6f6640413d7fc32b29163db8a5b44bcb3307e7 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 5 Dec 2014 10:19:05 +0100 Subject: [PATCH 0100/2314] Latency: specify that intrinsic latency must be checked on the server. --- topics/latency.md | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/topics/latency.md b/topics/latency.md index 9bb456a523..24213084f2 100644 --- a/topics/latency.md +++ b/topics/latency.md @@ -20,7 +20,7 @@ return here to read the full documentation. 1. Make sure you are not running slow commands that are blocking the server. Use the Redis [Slow Log feature](/commands/slowlog) to check this. 2. For EC2 users, make sure you use HVM based modern EC2 instances, like m3.medium. Otherwise fork() is too slow. 3. Transparent huge pages must be disabled from your kernel. Use `echo never > /sys/kernel/mm/transparent_hugepage/enabled` to disable them, and restart your Redis process. -4. If you are using a virtual machine, it is possible that you have an intrinsic latency that has nothing to do with Redis. Check the minimum latency you can expect from your runtime environment using `./redis-cli --intrinsic-latency 100`. +4. If you are using a virtual machine, it is possible that you have an intrinsic latency that has nothing to do with Redis. Check the minimum latency you can expect from your runtime environment using `./redis-cli --intrinsic-latency 100`. Note: you need to run this command in *the server* not in the client. 5. Enable and use the [Latency monitor](/topics/latency-monitor) feature of Redis in order to get a human readable description of the latency events and causes in your Redis instance. In general, use the following table for durability VS latency/performance tradeoffs, ordered from stronger safety to better latency. @@ -88,9 +88,12 @@ intensive and will likely saturate a single core in your system. Max latency so far: 83 microseconds. Max latency so far: 115 microseconds. -The intrinsic latency of this system is just 0.115 milliseconds (or 115 -microseconds), which is a good news, however keep in mind that the intrinsic -latency may change over time depending on the load of the system. +Note: redis-cli in this special case needs to **run in the server** where you run or plan to run Redis, not in the client. In this special mode redis-cli does no connect to a Redis server at all: it will just try to measure the largest time the kernel does not provide CPU time to run to the redis-cli process itself. + +In the above example, the intrinsic latency of the system is just 0.115 +milliseconds (or 115 microseconds), which is a good news, however keep in mind +that the intrinsic latency may change over time depending on the load of the +system. Virtualized environments will not show so good numbers, especially with high load or if there are noisy neighbors. The following is a run on a Linode 4096 From 39113d7343e79ff5aff8f28f863f51ad33aff50f Mon Sep 17 00:00:00 2001 From: plz Date: Sun, 7 Dec 2014 23:55:10 +0100 Subject: [PATCH 0101/2314] Update sentinel.md Fixing typo. --- topics/sentinel.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/sentinel.md b/topics/sentinel.md index 82873ed9b9..fcd658249c 100644 --- a/topics/sentinel.md +++ b/topics/sentinel.md @@ -152,7 +152,7 @@ that need to agree about the unreachability or error condition of the master in order to trigger a failover. However, after the failover is triggered, in order for the failover to actually be -performed, **at least a majority of Sentinels must authorized the Sentinel to +performed, **at least a majority of Sentinels must authorize the Sentinel to failover**. Let's try to make things a bit more clear: From 1913de46cd04c915d801b3ac113b87c04f66f2b9 Mon Sep 17 00:00:00 2001 From: Larry Ng Date: Tue, 9 Dec 2014 18:49:58 -0500 Subject: [PATCH 0102/2314] Fix number of keys sampled per expire loop (issue #461) --- commands/expire.md | 2 +- topics/latency.md | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/commands/expire.md b/commands/expire.md index dd0142da03..2f04b02ede 100644 --- a/commands/expire.md +++ b/commands/expire.md @@ -135,7 +135,7 @@ All the keys that are already expired are deleted from the keyspace. Specifically this is what Redis does 10 times per second: -1. Test 100 random keys from the set of keys with an associated expire. +1. Test 20 random keys from the set of keys with an associated expire. 2. Delete all the keys found expired. 3. If more than 25 keys were expired, start again from step 1. diff --git a/topics/latency.md b/topics/latency.md index 24213084f2..a334ecdfea 100644 --- a/topics/latency.md +++ b/topics/latency.md @@ -549,10 +549,10 @@ Redis evict expired keys in two ways: The active expiring is designed to be adaptive. An expire cycle is started every 100 milliseconds (10 times per second), and will do the following: -+ Sample `REDIS_EXPIRELOOKUPS_PER_CRON` keys, evicting all the keys already expired. ++ Sample `ACTIVE_EXPIRE_CYCLE_LOOKUPS_PER_LOOP` keys, evicting all the keys already expired. + If the more than 25% of the keys were found expired, repeat. -Given that `REDIS_EXPIRELOOKUPS_PER_CRON` is set to 10 by default, and the process is performed ten times per second, usually just 100 keys per second are actively expired. This is enough to clean the DB fast enough even when already expired keys are not accessed for a long time, so that the *lazy* algorithm does not help. At the same time expiring just 100 keys per second has no effects in the latency a Redis instance. +Given that `ACTIVE_EXPIRE_CYCLE_LOOKUPS_PER_LOOP` is set to 20 by default, and the process is performed ten times per second, usually just 200 keys per second are actively expired. This is enough to clean the DB fast enough even when already expired keys are not accessed for a long time, so that the *lazy* algorithm does not help. At the same time expiring just 200 keys per second has no effects in the latency a Redis instance. However the algorithm is adaptive and will loop if it founds more than 25% of keys already expired in the set of sampled keys. But given that we run the algorithm ten times per second, this means that the unlucky event of more than 25% of the keys in our random sample are expiring at least *in the same second*. From 27c9c0a6d2f98095dc0b701b17913219f3e023c0 Mon Sep 17 00:00:00 2001 From: Titouan Galopin Date: Wed, 23 Jan 2013 14:09:48 +0100 Subject: [PATCH 0103/2314] Update clients.json Add Yampee Redis client for PHP. --- clients.json | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/clients.json b/clients.json index 6f1d3d7d61..cdfdb476b0 100644 --- a/clients.json +++ b/clients.json @@ -418,6 +418,14 @@ "active": true }, + { + "name": "Yampee Redis", + "language": "PHP", + "repository": "https://github.com/yampee/Redis", + "description": "A full-featured Redis client for PHP 5.2. Easy to use and to extend.", + "authors": ["tgalopin"] + }, + { "name": "redis-py", "language": "Python", From a2a09137c275cc3dc7e7867daf3ea0d420c21ac9 Mon Sep 17 00:00:00 2001 From: Michel Martens Date: Thu, 11 Dec 2014 08:39:44 +0000 Subject: [PATCH 0104/2314] Remove extra comma --- clients.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clients.json b/clients.json index 31d725a5ee..886a1febbb 100644 --- a/clients.json +++ b/clients.json @@ -263,7 +263,7 @@ "language": "Java", "repository": "https://github.com/vert-x/mod-redis", "description": "Official asynchronous redis.io bus module for Vert.x", - "authors": ["pmlopes"], + "authors": ["pmlopes"] }, { From 7d7b4a05e8c9e2387a0c643d2a42455ed3420c55 Mon Sep 17 00:00:00 2001 From: Michel Martens Date: Thu, 11 Dec 2014 08:40:53 +0000 Subject: [PATCH 0105/2314] Resolve conflicts --- clients.json | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/clients.json b/clients.json index 886a1febbb..6325cba82e 100644 --- a/clients.json +++ b/clients.json @@ -892,6 +892,13 @@ "description": "A C++ asynchronous client based on boost::asio", "authors": [ "nekipelov" ], "active": true - } - + }, + + { + "name": "redis-octave", + "language": "Matlab", + "repository": "https://github.com/markuman/redis-octave", + "description": "A Redis client in pure Octave ", + "authors": ["markuman"] + } ] From b06c7bc04920ecc0e0715bd820f02de079667a02 Mon Sep 17 00:00:00 2001 From: Changjian Gao Date: Fri, 12 Dec 2014 13:02:25 +0800 Subject: [PATCH 0106/2314] Fixes typo in sentinel.md --- topics/sentinel.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/topics/sentinel.md b/topics/sentinel.md index 82873ed9b9..f48fe88a95 100644 --- a/topics/sentinel.md +++ b/topics/sentinel.md @@ -393,13 +393,13 @@ The slave selection process evaluates the following informations about slaves: 4. Run ID. A slave that is found to be disconnected from the master for more than ten -times the configured masster timeout (down-after-milliseconds option), plus +times the configured master timeout (down-after-milliseconds option), plus the time the master is also not available from the point of view of the Sentinel doing the failover, is considered to be not suitable for the failover and is skipped. In more rigorous terms, a slave whose the `INFO` output suggests to be -disconnected form the master for more than: +disconnected from the master for more than: (down-after-milliseconds * 10) + milliseconds_since_master_is_in_SDOWN_state From cfdb7794e277bb50e31fe506f17b5fff0019210b Mon Sep 17 00:00:00 2001 From: Alon Diamant Date: Sun, 14 Dec 2014 02:20:19 +0200 Subject: [PATCH 0107/2314] Updated SPOP documentation, following redis issue #1793: SPOP optional count argument --- commands.json | 7 ++++++- commands/spop.md | 18 +++++++++++++++--- 2 files changed, 21 insertions(+), 4 deletions(-) diff --git a/commands.json b/commands.json index 98886e0708..5888c79ec0 100644 --- a/commands.json +++ b/commands.json @@ -1869,12 +1869,17 @@ "group": "generic" }, "SPOP": { - "summary": "Remove and return a random member from a set", + "summary": "Remove and return one or multiple random members from a set", "complexity": "O(1)", "arguments": [ { "name": "key", "type": "key" + }, + { + "name": "count", + "type": "integer", + "optional": true } ], "since": "1.0.0", diff --git a/commands/spop.md b/commands/spop.md index 466a9ecf33..cc1b11a490 100644 --- a/commands/spop.md +++ b/commands/spop.md @@ -1,7 +1,6 @@ -Removes and returns a random element from the set value stored at `key`. +Removes and returns one or more random elements from the set value store at `key`. -This operation is similar to `SRANDMEMBER`, that returns a random element from a -set but does not remove it. +This operation is similar to `SRANDMEMBER`, that returns one or more random elements from a set but does not remove it. @return @@ -15,4 +14,17 @@ SADD myset "two" SADD myset "three" SPOP myset SMEMBERS myset +SADD myset "four" +SADD myset "five" +SPOP myset 3 +SMEMBERS myset ``` + + +## Specification of the behavior when count is passed + +If count is bigger than the number of elements inside the Set, the command will only return the whole set without additional elements. + +## Distribution of returned elements + +Note that this command is not suitable when you need a guaranteed uniform distribution of the returned elements. For more information about the algorithms used for SPOP, look up both the Knuth sampling and Floyd sampling algorithms. \ No newline at end of file From 454830f13c4da3f6ef88b7c06f90803f28a77ba6 Mon Sep 17 00:00:00 2001 From: Dylan Thacker-Smith Date: Sun, 14 Dec 2014 21:19:18 -0500 Subject: [PATCH 0108/2314] Fix spelling and grammar in sentinel documentation. --- topics/sentinel-clients.md | 10 +++++----- topics/sentinel-spec.md | 4 ++-- topics/sentinel.md | 4 ++-- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/topics/sentinel-clients.md b/topics/sentinel-clients.md index 8c5a2339b3..1f56af7d69 100644 --- a/topics/sentinel-clients.md +++ b/topics/sentinel-clients.md @@ -21,14 +21,14 @@ Redis service discovery via Sentinel === Redis Sentinel identify every master with a name like "stats" or "cache". -Every name actually identifies a *group of intances*, composed of a master +Every name actually identifies a *group of instances*, composed of a master and a variable number of slaves. The address of the Redis master that is used for a specific purpose inside a network may change after events like an automatic failover, a manually triggered failover (for instance in order to upgrade a Redis instance), and other reasons. -Normally Redis clients have some kind of hard-coded configuraiton that specifies the address of a Redis master instance within a network as IP address and port number. However if the master address changes, manual intervention in every client is needed. +Normally Redis clients have some kind of hard-coded configuration that specifies the address of a Redis master instance within a network as IP address and port number. However if the master address changes, manual intervention in every client is needed. -A Redis client supporting Sentinel can automatically discover the address of a Redis master from the master name using Redis Sentinel. So instead of an hard coded IP address and port, a client supporting Sentinel should optionally be able to take as input: +A Redis client supporting Sentinel can automatically discover the address of a Redis master from the master name using Redis Sentinel. So instead of a hard coded IP address and port, a client supporting Sentinel should optionally be able to take as input: * A list of ip:port pairs pointing to known Sentinel instances. * The name of the service, like "cache" or "timelines". @@ -94,7 +94,7 @@ address again. If the client will contact a Sentinel with yet not updated information, the verification of the Redis instance role via the `ROLE` command will fail, allowing the client to detect that the contacted Sentinel provided stale information, and will try again. -Note: it is possible that a stale master returns online at the same time a client contacts a stale Sentinel instance, so the client may connect with a stale master, and yet the ROLE output will match. However when the master is back again Sentinel will try to demote it to slave, triggering a new disconnection. The same reasoning applies to connecting to stale slaves that will get reconfigured to replicate with a differnt master. +Note: it is possible that a stale master returns online at the same time a client contacts a stale Sentinel instance, so the client may connect with a stale master, and yet the ROLE output will match. However when the master is back again Sentinel will try to demote it to slave, triggering a new disconnection. The same reasoning applies to connecting to stale slaves that will get reconfigured to replicate with a different master. Connecting to slaves === @@ -144,7 +144,7 @@ Sentinel instances using Pub/Sub in order to subscribe to changes in the Redis instances configurations. This mechanism can be used in order to speedup the reconfiguration of clients, -that is, clients may listent to Pub/Sub in order to know when a configuration +that is, clients may listen to Pub/Sub in order to know when a configuration change happened in order to run the three steps protocol explained in this document in order to resolve the new Redis master (or slave) address. diff --git a/topics/sentinel-spec.md b/topics/sentinel-spec.md index ee5bcd13f5..caeb7c03ab 100644 --- a/topics/sentinel-spec.md +++ b/topics/sentinel-spec.md @@ -19,7 +19,7 @@ is intended to be used by people that don't need Redis Cluster, but simply a way to perform automatic fail over when a master instance is not functioning correctly. -The plan is to provide an usable beta implementaiton of Redis Sentinel in a +The plan is to provide a usable beta implementation of Redis Sentinel in a short time, preferably in mid July 2012. In short this is what Redis Sentinel will be able to do: @@ -280,7 +280,7 @@ or other error conditions. In such a case the protection against race conditions (multiple Sentinels starting to perform the fail over at the same time) is given by the random delay used to start the fail over, and the continuous monitor of the slave instances to detect if another Sentinel -(or an human) started the failover process. +(or a human) started the failover process. Moreover the slave to promote is selected using a deterministic process to minimize the chance that two different Sentinels with full vision of the diff --git a/topics/sentinel.md b/topics/sentinel.md index 82873ed9b9..11a0710daf 100644 --- a/topics/sentinel.md +++ b/topics/sentinel.md @@ -177,7 +177,7 @@ Configuration epochs Sentinels require to get authorizations from a majority in order to start a failover for a few important reasons: -When a Sentinel is authorized, it gets an unique **configuration epoch** for the master it is failing over. This is a number that will be used to version the new configuration after the failover is completed. Because a majority agreed that a given version was assigned to a given Sentinel, no other Sentinel will be able to use it. This means that every configuration of every failover is versioned with an unique version. We'll see why this is so important. +When a Sentinel is authorized, it gets a unique **configuration epoch** for the master it is failing over. This is a number that will be used to version the new configuration after the failover is completed. Because a majority agreed that a given version was assigned to a given Sentinel, no other Sentinel will be able to use it. This means that every configuration of every failover is versioned with a unique version. We'll see why this is so important. Moreover Sentinels have a rule: if a Sentinel voted another Sentinel for the failover of a given master, it will wait some time to try to failover the same master again. This delay is the `failover-timeout` you can configure in `sentinel.conf`. This means that Sentinels will not try to failover the same master at the same time, the first to ask to be authorized will try, if it fails another will try after some time, and so forth. @@ -410,7 +410,7 @@ and sorts it based on the above criteria, in the following order. 1. The slaves are sorted by `slave-priority` as confiugred in the `redis.conf` file of the Redis instance. A lower priority will be preferred. 2. If the priority is the same, the replication offset processed by the slave is checked, and the slave that received more data from the master is selected. -3. If multiple slaves have the same priority and processed the same data from the master, a further check is performed, selecting the slave with the lexicographically smaller run ID. Having a lower run ID is not a real advantage for a slave, but is useful in order to make the process of slave selection more determiistic, instead of resorting to select a random slave. +3. If multiple slaves have the same priority and processed the same data from the master, a further check is performed, selecting the slave with the lexicographically smaller run ID. Having a lower run ID is not a real advantage for a slave, but is useful in order to make the process of slave selection more deterministic, instead of resorting to select a random slave. Redis masters (that may be turned into slaves after a failover), and slaves, all must be configured with a `slave-priority` if there are machines to be strongly From 123ae6c2e08ae8838dd131d8e278fe0c20a4b1f6 Mon Sep 17 00:00:00 2001 From: Dylan Thacker-Smith Date: Mon, 15 Dec 2014 00:39:11 -0500 Subject: [PATCH 0109/2314] Fix spelling and grammar in cluster documentation. --- topics/cluster-spec.md | 34 +++++++++++++++++----------------- topics/cluster-tutorial.md | 16 ++++++++-------- 2 files changed, 25 insertions(+), 25 deletions(-) diff --git a/topics/cluster-spec.md b/topics/cluster-spec.md index 75858df490..e8fba63c81 100644 --- a/topics/cluster-spec.md +++ b/topics/cluster-spec.md @@ -98,7 +98,7 @@ Eventually clients obtain an up to date representation of the cluster and which Because of the use of asynchronous replication, nodes does not wait for other nodes acknowledgment of writes (optional synchronous replication is a work in progress and will be likely added in future releases). -Also, because muliple keys commands are only limited to *near* keys, data is never moved between nodes if not in case of resharding. +Also, because multiple keys commands are only limited to *near* keys, data is never moved between nodes if not in case of resharding. So normal operations are handled exactly as in the case of a single Redis instance. This means that in a Redis Cluster with N master nodes you can expect the same performance as a single Redis instance multiplied by N as the design allows to scale linearly. At the same time the query is usually performed in a single round trip, since clients usually retain persistent connections with the nodes, so latency figures are also the same as the single stand alone Redis node case. @@ -219,7 +219,7 @@ C example code: Cluster nodes attributes --- -Every node has an unique name in the cluster. The node name is the +Every node has a unique name in the cluster. The node name is the hex representation of a 160 bit random number, obtained the first time a node is started (usually using /dev/urandom). The node will save its ID in the node configuration file, and will use the @@ -284,7 +284,7 @@ Redis cluster is a full mesh where every node is connected with every other node In a cluster of N nodes, every node has N-1 outgoing TCP connections, and N-1 incoming connections. These TCP connections are kept alive all the time and are not created on demand. -When a node expects an pong reply in response to a ping in the cluster bus, before to wait for enough time to mark the node as unreachable, it will try to +When a node expects a pong reply in response to a ping in the cluster bus, before to wait for enough time to mark the node as unreachable, it will try to refresh the connection with the node by reconnecting from scratch. While Redis Cluster nodes form a full mesh, nodes use a gossip protocol and @@ -361,13 +361,13 @@ Cluster live reconfiguration Redis cluster supports the ability to add and remove nodes while the cluster is running. Actually adding or removing a node is abstracted into the same -operation, that is, moving an hash slot from a node to another. +operation, that is, moving a hash slot from a node to another. * To add a new node to the cluster an empty node is added to the cluster and some hash slot is moved from existing nodes to the new node. * To remove a node from the cluster the hash slots assigned to that node are moved to other existing nodes. So the core of the implementation is the ability to move slots around. -Actually from a practical point of view an hash slot is just a set of keys, so +Actually from a practical point of view a hash slot is just a set of keys, so what Redis cluster really does during *resharding* is to move keys from an instance to another instance. @@ -595,7 +595,7 @@ by the slave's master node. This may happen because: When this happens the client should update its hashslot map as explained in the previous sections. -The *readonly* state of the connection can be undoed using the `READWRITE` command. +The *readonly* state of the connection can be undone using the `READWRITE` command. Fault Tolerance === @@ -618,7 +618,7 @@ There are ways to use the gossip information already exchanged by Redis Cluster Ping and Pong packets content --- -Ping and Pong packets contain an header that is common to all the kind of packets (for instance packets to request a vote), and a special Gossip Section that is specific of Ping and Pong packets. +Ping and Pong packets contain a header that is common to all the kind of packets (for instance packets to request a vote), and a special Gossip Section that is specific of Ping and Pong packets. The common header has the following information: @@ -783,7 +783,7 @@ Masters receive requests for votes in form of `FAILOVER_AUTH_REQUEST` requests f For a vote to be granted the following conditions need to be met: -* 1) A master only votes a single time for a given epoch, and refuses to vote for older epochs: every master has a lastVoteEpoch field and will refuse to vote again as long as the `currentEpoch` in the auth request packet is not greater than the lastVoteEpoch. When a master replies positively to an vote request, the lastVoteEpoch is updated accordingly. +* 1) A master only votes a single time for a given epoch, and refuses to vote for older epochs: every master has a lastVoteEpoch field and will refuse to vote again as long as the `currentEpoch` in the auth request packet is not greater than the lastVoteEpoch. When a master replies positively to a vote request, the lastVoteEpoch is updated accordingly. * 2) A master votes for a slave only if the slave's master is flagged as `FAIL`. * 3) Auth requests with a `currentEpoch` that is less than the master `currentEpoch` are ignored. Because of this the Master reply will always have the same `currentEpoch` as the auth request. If the same slave asks again to be voted, incrementing the `currentEpoch`, it is guaranteed that an old delayed reply from the master can not be accepted for the new vote. @@ -823,21 +823,21 @@ Rules for server slots information propagation An important part of Redis Cluster is the mechanism used to propagate the information about which cluster node is serving a given set of hash slots. This is vital to both the startup of a fresh cluster and the ability to upgrade the configuration after a slave was promoted to serve the slots of its failing master. -Ping and Pong packets that instances continuously exchange contain an header that is used by the sender in order to advertise the hash slots it claims to be responsible for. This is the main mechanism used in order to propagate change, with the exception of a manual reconfiguration operated by the cluster administrator (for example a manual resharding via redis-trib in order to move hash slots among masters). +Ping and Pong packets that instances continuously exchange contain a header that is used by the sender in order to advertise the hash slots it claims to be responsible for. This is the main mechanism used in order to propagate change, with the exception of a manual reconfiguration operated by the cluster administrator (for example a manual resharding via redis-trib in order to move hash slots among masters). When a new Redis Cluster node is created, its local slot table, that maps a given hash slot with a given node ID, is initialized so that every hash slot is assigned to nil, that is, the hash slot is unassigned. The first rule followed by a node in order to update its hash slot table is the following: -**Rule 1: If an hash slot is unassigned, and a known node claims it, I'll modify my hash slot table to associate the hash slot to this node.** +**Rule 1: If a hash slot is unassigned, and a known node claims it, I'll modify my hash slot table to associate the hash slot to this node.** Because of this rule, when a new cluster is created, it is only needed to manually assign (using the `CLUSTER` command, usually via the redis-trib command line tool) the slots served by each master node to the node itself, and the information will rapidly propagate across the cluster. However this rule is not enough when a configuration update happens because of a slave gets promoted to master after a master failure. The new master instance will advertise the slots previously served by the old slave, but those slots are not unassigned from the point of view of the other nodes, that will not upgrade the configuration if they just follow the first rule. -For this reason there is a second rule that is used in order to rebind an hash slot already assigned to a previous node to a new node claiming it. The rule is the following: +For this reason there is a second rule that is used in order to rebind a hash slot already assigned to a previous node to a new node claiming it. The rule is the following: -**Rule 2: If an hash slot is already assigned, and a known node is advertising it using a `configEpoch` that is greater than the `configEpoch` advertised by the current owner of the slot, I'll rebind the hash slot to the new node.** +**Rule 2: If a hash slot is already assigned, and a known node is advertising it using a `configEpoch` that is greater than the `configEpoch` advertised by the current owner of the slot, I'll rebind the hash slot to the new node.** Because of the second rule eventually all the nodes in the cluster will agree that the owner of a slot is the one with the greatest `configEpoch` among the nodes advertising it. @@ -934,7 +934,7 @@ So for example if there are 10 masters with 1 slave each, and 2 masters with having 5 slaves, the one with the lowest node ID. Given that no agreement is used, it is possible that when the cluster configuration is not stable, a race condition occurs where multiple slaves think to be the non-failing -slave with the lower node ID (but it is an hard to trigger condition in +slave with the lower node ID (but it is a hard to trigger condition in practice). If this happens, the result is multiple slaves migrating to the same master, which is harmless. If the race happens in a way that will left the ceding master without slaves, as soon as the cluster is stable again @@ -945,7 +945,7 @@ Eventually every master will be backed by at least one slave, however normally the behavior is that a single slave migrates from a master with multiple slaves to an orphaned master. -The algorithm is controlled by an user-configurable parameter called +The algorithm is controlled by a user-configurable parameter called `cluster-migration-barrier`, that is the number of good slaves a master will be left with for a slave to migrate. So for example if this parameter is set to 2, a slave will try to migrate only if its master remains with @@ -957,14 +957,14 @@ configEpoch conflicts resolution algorithm When new `configEpoch` values are created via slave promotions during failovers, they are guaranteed to be unique. -However during manual reshardings, when an hash slot is migrated from +However during manual reshardings, when a hash slot is migrated from a node A to a node B, the resharding program will force B to upgrade its configuration to an epoch which is the greatest found in the cluster, plus 1 (unless the node is already the one with the greatest configuration epoch), without to require for an agreement from other nodes. This is needed so that the new slot configuration will win over the old one. -This process happens when the system administator performs a manual +This process happens when the system administrator performs a manual resharding, however it is possible that when the slot is closed after a resharding and the node assigns itself a new configuration epoch, at the same time a failure happens, just before the new `configEpoch` is @@ -996,7 +996,7 @@ the same `configEpoch`. * THEN it increments its `currentEpoch` by 1, and uses it as the new `configEpoch`. If there are any set of nodes with the same `configEpoch`, all the nodes but the one with the greatest Node ID will move forward, guaranteeing that every node -will pick an unique configEpoch regardless of what happened. +will pick a unique configEpoch regardless of what happened. This mechanism also guarantees that after a fresh cluster is created all nodes start with a different `configEpoch`. diff --git a/topics/cluster-tutorial.md b/topics/cluster-tutorial.md index 933bed35a9..03338669b5 100644 --- a/topics/cluster-tutorial.md +++ b/topics/cluster-tutorial.md @@ -9,7 +9,7 @@ the [Redis Cluster specification](/topics/cluster-spec) but just describing how the system behaves from the point of view of the user. Note that if you plan to run a serious Redis Cluster deployment, the -more formal specification is an highly suggested reading. +more formal specification is a highly suggested reading. **Redis cluster is currently alpha quality code**, please get in touch in the Redis mailing list or open an issue in the Redis Github repository if you @@ -233,7 +233,7 @@ existed, every node assigns itself a new ID. [82462] 26 Nov 11:56:55.329 * No cluster configuration found, I'm 97a3a64667477371c4479320d683e4c8db5858b1 This ID will be used forever by this specific instance in order for the instance -to have an unique name in the context of the cluster. Every node +to have a unique name in the context of the cluster. Every node remembers every other node using this IDs, and not by IP or port. IP addresses and ports may change, but the unique node identifier will never change for all the life of the node. We call this identifier simply **Node ID**. @@ -745,7 +745,7 @@ having as a target the empty node. Adding a new node as a replica --- -Adding a new Replica can be performed in two ways. The obivous one is to +Adding a new Replica can be performed in two ways. The obvious one is to use redis-trib again, but with the --slave option, like this: ./redis-trib.rb add-node --slave 127.0.0.1:7006 127.0.0.1:7000 @@ -755,7 +755,7 @@ a new master, so we are not specifying to which master we want to add the replica. In this case what happens is that redis-trib will add the new node as replica of a random master among the masters with less replicas. -However you can specifiy exactly what master you want to target with your +However you can specify exactly what master you want to target with your new replica with the following command line: ./redis-trib.rb add-node --slave --master-id 3c3a0c74aae0b56170ccb03a76b60cfe7dc1912e 127.0.0.1:7006 127.0.0.1:7000 @@ -861,7 +861,7 @@ Upgrading nodes in a Redis Cluster Upgrading slave nodes is easy since you just need to stop the node and restart it with an updated version of Redis. If there are clients scaling reads using slave nodes, they should be able to reconnect to a different slave if a given -one is not avaialble. +one is not available. Upgrading masters is a bit more complex, and the suggested procedure is: @@ -885,7 +885,7 @@ In both cases it is possible to migrate to Redis Cluster easily, however what is the most important detail is if multiple-keys operations are used by the application, and how. There are three different cases: -1. Multiple keys operations, or transactions, or Lua scripts involving muliple keys, are not used. Keys are accessed independently (even if accessed via transactions or Lua scripts grouping multiple commands, about the same key, together). +1. Multiple keys operations, or transactions, or Lua scripts involving multiple keys, are not used. Keys are accessed independently (even if accessed via transactions or Lua scripts grouping multiple commands, about the same key, together). 2. Multiple keys operations, transactions, or Lua scripts involving multiple keys are used but only with keys having the same **hash tag**, which means that the keys used together all have a `{...}` sub-string that happens to be identical. For example the following multiple keys operation is defined in the context of the same hash tag: `SUNION {user:1000}.foo {user:1000}.bar`. 3. Multiple keys operations, transactions, or Lua scripts involving multiple keys are used with key names not having an explicit, or the same, hash tag. @@ -900,11 +900,11 @@ Assuming you have your preexisting data set split into N masters, where N=1 if you have no preexisting sharding, the following steps are needed in order to migrate your data set to Redis Cluster: -1. Stop your clients. No automatic live-migration to Redis Cluster is currently possible. You may be able to do it orchestrating a live migration in the context of your application / enviroment. +1. Stop your clients. No automatic live-migration to Redis Cluster is currently possible. You may be able to do it orchestrating a live migration in the context of your application / environment. 2. Generate an append only file for all of your N masters using the BGREWRITEAOF command, and waiting for the AOF file to be completely generated. 3. Save your AOF files from aof-1 to aof-N somewhere. At this point you can stop your old instances if you wish (this is useful since in non-virtualized deployments you often need to reuse the same computers). 4. Create a Redis Cluster composed of N masters and zero slaves. You'll add slaves later. Make sure all your nodes are using the append only file for persistence. -5. Stop all the cluster nodes, substitute their append only file with your pre-eisitng append only files, aof-1 for the first node, aof-2 for the secod node, up to aof-N. +5. Stop all the cluster nodes, substitute their append only file with your pre-existing append only files, aof-1 for the first node, aof-2 for the second node, up to aof-N. 6. Restart your Redis Cluster nodes with the new AOF files. They'll complain that there are keys that should not be there according to their configuration. 7. Use `redis-trib fix` command in order to fix the cluster so that keys will be migrated according to the hash slots each node is authoritative or not. 8. Use `redis-trib check` at the end to make sure your cluster is ok. From 4b09cf8b2c2604a709847602b579bf50e8e3f92b Mon Sep 17 00:00:00 2001 From: Andrey Bulygin Date: Thu, 23 May 2013 12:27:44 +0300 Subject: [PATCH 0110/2314] Added redisboost to C# clients --- clients.json | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/clients.json b/clients.json index 5407127751..88590bd704 100644 --- a/clients.json +++ b/clients.json @@ -908,5 +908,14 @@ "repository": "https://github.com/markuman/redis-octave", "description": "A Redis client in pure Octave ", "authors": ["markuman"] + }, + + { + "name": "redisboost", + "language": "C#", + "url": "http://andrew-bn.github.io/RedisBoost/", + "repository": "https://github.com/andrew-bn/RedisBoost", + "description": "Thread-safe async Redis client. Offers high performance and simple api", + "authors": ["bn_andrew"] } ] From 84b602eb463d785a8186d76379ff1ebf192a1a09 Mon Sep 17 00:00:00 2001 From: Daniel Norton Date: Wed, 17 Dec 2014 14:55:05 -0600 Subject: [PATCH 0111/2314] Spelling/typo in faq.md s/efficinet/efficient/ --- topics/faq.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/faq.md b/topics/faq.md index 4569db7258..d2ea76c440 100644 --- a/topics/faq.md +++ b/topics/faq.md @@ -40,7 +40,7 @@ to split your data set into multiple Redis instances, please read the Yes, a common design pattern involves taking very write-heavy small data in Redis (and data you need the Redis data structures to model your problem -in an efficinet way), and big *blobs* of data into an SQL or eventually +in an efficient way), and big *blobs* of data into an SQL or eventually consistent on-disk database. ## Is there something I can do to lower the Redis memory usage? From fda74b6688dccf7b7f77698ab7046a1bb124192f Mon Sep 17 00:00:00 2001 From: Josiah Carlson Date: Mon, 22 Dec 2014 15:53:31 -0800 Subject: [PATCH 0112/2314] Added a couple tools, fixed some urls --- tools.json | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/tools.json b/tools.json index 97810e602d..425ad46a82 100644 --- a/tools.json +++ b/tools.json @@ -2,7 +2,7 @@ { "name": "Resque", "language": "Ruby", - "repository": "https://github.com/defunkt/resque", + "repository": "https://github.com/resque/resque", "description": "Resque is a Redis-backed Ruby library for creating background jobs, placing them on multiple queues, and processing them later.", "authors": ["defunkt"] }, @@ -37,14 +37,14 @@ { "name": "Kombu", "language": "Python", - "repository": "https://github.com/ask/kombu", + "repository": "https://github.com/celery/kombu", "description": "Python AMQP Framework with redis suppport", "authors": [] }, { "name": "Sider", "language": "Python", - "repository": "https://bitbucket.org/dahlia/sider", + "repository": "https://github.com/dahlia/sider", "description": "Python persistent object library based on Redis.", "authors": ["hongminhee"] }, @@ -262,7 +262,7 @@ { "name": "Redis Qi4j EntityStore", "language": "Java", - "url": "http://qi4j.org/extension-es-redis.html", + "url": "http://qi4j.org/latest/extension-es-redis.html", "repository": "http://github.com/qi4j/qi4j-sdk", "description": "Qi4j EntityStore backed by Redis", "authors": ["eskatos"] @@ -312,5 +312,21 @@ "repository": "https://github.com/poying/redis-mount", "description": "redis-mount lets you use Redis as a filesystem.", "authors": ["poying"] + }, + { + "name": "RPQueue", + "language": "Python", + "url": "https://pypi.python.org/pypi/rpqueue", + "repository": "https://github.com/josiahcrlson/rpqueue", + "description": "RPQueue offers a prioritized, periodic, and scheduled task system for Python using Redis", + "authors": ["josiahcarlson"] + }, + { + "name": "rom", + "language": "Python", + "url": "https://pypi.python.org/pypi/rom", + "repository": "https://github.com/josiahcarlson/rom", + "description": "Redis object mapper for Python using declarative models, with search over numeric, full text, prefix, and suffix indexes", + "authors": ["josiahcarlson"] } ] From 6c387f115b5d8021c8ad8caf7f4ae89673597220 Mon Sep 17 00:00:00 2001 From: John Claus Date: Wed, 31 Dec 2014 10:02:43 -0700 Subject: [PATCH 0113/2314] Updated the Sentinel doc to fix minor grammatical issues. --- topics/sentinel.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/topics/sentinel.md b/topics/sentinel.md index 11a0710daf..5aef796d8a 100644 --- a/topics/sentinel.md +++ b/topics/sentinel.md @@ -12,10 +12,10 @@ It performs the following four tasks: Distributed nature of Sentinel --- -Redis Sentinel is a distributed system, this means that usually you want to run -multiple Sentinel processes across your infrastructure, and this processes +Redis Sentinel is a distributed system. This means that usually you want to run +multiple Sentinel processes across your infrastructure. These processes will use gossip protocols in order to understand if a master is down and -agreement protocols in order to get authorized to perform the failover and assign +agreement protocols in order to become authorized to perform the failover and assign a new version to the new configuration. Distributed systems have given *safety* and *liveness* properties, in order to From a196acb77cede31194e57203fe7323bf34e79c34 Mon Sep 17 00:00:00 2001 From: Santi Saez Date: Fri, 2 Jan 2015 13:11:52 +0100 Subject: [PATCH 0114/2314] Fix some typos in topics/latency.md --- topics/latency.md | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/topics/latency.md b/topics/latency.md index 24213084f2..8406cad955 100644 --- a/topics/latency.md +++ b/topics/latency.md @@ -47,13 +47,13 @@ Using the internal Redis latency monitoring subsystem --- Since Redis 2.8.13, Redis provides latency monitoring capabilities that -are able to sample differnet execution paths to understand where the -server is blocking. This makes debugging of the problems illustarated in +are able to sample different execution paths to understand where the +server is blocking. This makes debugging of the problems illustrated in this documentation much simpler, so we suggest to enable latency monitoring ASAP. Please refer to the [Latency monitor documentation](/topics/latency-monitor). While the latency monitoring sampling and reporting capabilities will make -simpler to understand the soruce of latency in your Redis system, it is still +simpler to understand the source of latency in your Redis system, it is still advised that you read this documentation extensively to better understand the topic of Redis and latency spikes. @@ -245,7 +245,7 @@ using m3.medium (or better) instances will provide good results. * **Linux VM on 6sync (KVM)** 360 MB RSS forked in 8.2 milliseconds (23.3 millisecond per GB). * **Linux VM on EC2, old instance types (Xen)** 6.1GB RSS forked in 1460 milliseconds (239.3 milliseconds per GB). * **Linux VM on EC2, new instance types (Xen)** 1GB RSS forked in 10 milliseconds (10 milliseconds per GB). -* **Linux VM on Linode (Xen)** 0.9GBRSS forked into 382 millisecodns (424 milliseconds per GB). +* **Linux VM on Linode (Xen)** 0.9GBRSS forked into 382 milliseconds (424 milliseconds per GB). As you can see certanin VM running on Xen have a performance hit that is between one order to two orders of magnitude. For EC2 users the suggestion is simple: use modern HVM based instances. @@ -253,10 +253,10 @@ Latency induced by transparent huge pages ----------------------------------------- Unfortunately when a Linux kernel has transparent huge pages enabled, Redis -incurs to a big latency penality after the `fork` call is used in order to -persist on disk. Huge pages are the cause of the follwing issue: +incurs to a big latency penalty after the `fork` call is used in order to +persist on disk. Huge pages are the cause of the following issue: -1. Fork is called, two processes with shared huge pages are crated. +1. Fork is called, two processes with shared huge pages are created. 2. In a busy instance, a few event loops runs will cause commands to target a few thousand of pages, causing the copy on write of almost the whole process memory. 3. This will result in big latency and big memory usage. @@ -436,7 +436,7 @@ Redis instance you can further verify it using the **vmstat** command: 0 0 3980 697048 147180 1406640 0 0 0 0 18613 15987 6 6 88 0 2 0 3980 696924 147180 1406656 0 0 0 0 18744 16299 6 5 88 0 0 0 3980 697048 147180 1406688 0 0 0 4 18520 15974 6 6 88 0 -^C + ^C The interesting part of the output for our needs are the two columns **si** and **so**, that counts the amount of memory swapped from/to the swap file. If @@ -567,7 +567,7 @@ Redis software watchdog Redis 2.6 introduces the *Redis Software Watchdog* that is a debugging tool designed to track those latency problems that for one reason or the other -esacped an analysis using normal tools. +escaped an analysis using normal tools. The software watchdog is an experimental feature. While it is designed to be used in production environments care should be taken to backup the database From 4c635bd43a143554ec3ffec66a09e1415a5ceedb Mon Sep 17 00:00:00 2001 From: michael-grunder Date: Sat, 17 Jan 2015 13:14:52 -0800 Subject: [PATCH 0115/2314] Update phpredis location and authors --- clients.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/clients.json b/clients.json index 1860a3effa..1459dac963 100644 --- a/clients.json +++ b/clients.json @@ -375,9 +375,9 @@ { "name": "phpredis", "language": "PHP", - "repository": "https://github.com/nicolasff/phpredis", + "repository": "https://github.com/phpredis/phpredis.git", "description": "This is a client written in C as a PHP module.", - "authors": ["yowgi"], + "authors": ["grumi78", "yowgi"], "recommended": true, "active": true }, From d65972c18035988c92c9124557047aad78c167e6 Mon Sep 17 00:00:00 2001 From: Nicholas Hrynuik Date: Sun, 18 Jan 2015 14:47:31 -0500 Subject: [PATCH 0116/2314] Fix 2 typos in data-types-intro --- topics/data-types-intro.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/topics/data-types-intro.md b/topics/data-types-intro.md index e878734f27..548e02e531 100644 --- a/topics/data-types-intro.md +++ b/topics/data-types-intro.md @@ -650,8 +650,8 @@ ideal. So to start, we can make a copy of the set stored in the `deck` key into the `game:1:deck` key. This is accomplished using `SUNIONSTORE`, which normally performs the -intersection between multiple sets, and stores the result into another set. -However, since the intersection of a single set is itself, I can copy my deck +union between multiple sets, and stores the result into another set. +However, since the union of a single set is itself, I can copy my deck with: > sunionstore game:1:deck deck From 776066d9ada210b9127f1bb11dc0fd164e69d8a4 Mon Sep 17 00:00:00 2001 From: zensh Date: Sat, 24 Jan 2015 17:57:13 +0800 Subject: [PATCH 0117/2314] add Node.js redis client: thunk-redis. --- clients.json | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/clients.json b/clients.json index 1860a3effa..dd00664029 100644 --- a/clients.json +++ b/clients.json @@ -663,6 +663,16 @@ "active": true }, + { + "name": "thunk-redis", + "language": "Node.js", + "repository": "https://github.com/thunks/thunk-redis", + "description": "A redis client with pipelining, rely on thunks, support promise.", + "authors": ["zensh"], + "recommended": true, + "active": true + }, + { "name": "spade", "language": "Node.js", From 40708f594de39dd5b93e5086a3c769cb44187a0c Mon Sep 17 00:00:00 2001 From: Keyvan Hedayati Date: Sat, 24 Jan 2015 13:34:49 +0330 Subject: [PATCH 0118/2314] Added Python implementation to distlock --- topics/distlock.md | 1 + 1 file changed, 1 insertion(+) diff --git a/topics/distlock.md b/topics/distlock.md index bb7053103a..ff2824c1fd 100644 --- a/topics/distlock.md +++ b/topics/distlock.md @@ -24,6 +24,7 @@ Before to describe the algorithm, here there are a few links at implementations already available, that can be used as a reference. * [Redlock-rb](https://github.com/antirez/redlock-rb) (Ruby implementation). +* [Redlock-py](https://github.com/SPSCommerce/redlock-py) (Python implementation). * [Redlock-php](https://github.com/ronnylt/redlock-php) (PHP implementation). * [Redsync.go](https://github.com/hjr265/redsync.go) (Go implementation). * [Redisson](https://github.com/mrniko/redisson) (Java implementation). From 2e6416718640f998ee958e6868766bee27724682 Mon Sep 17 00:00:00 2001 From: Itamar Haber Date: Sat, 24 Jan 2015 17:07:18 +0200 Subject: [PATCH 0119/2314] `SCRIPT` isn't allowed by web-embedded Redis The example doesn't work and returns two errors. --- commands/script exists.md | 7 ------- 1 file changed, 7 deletions(-) diff --git a/commands/script exists.md b/commands/script exists.md index 818826fc84..194bebe47b 100644 --- a/commands/script exists.md +++ b/commands/script exists.md @@ -17,10 +17,3 @@ Lua scripting. the specified SHA1 digest arguments. For every corresponding SHA1 digest of a script that actually exists in the script cache, an 1 is returned, otherwise 0 is returned. - -@example - -```cli -SCRIPT LOAD "return 1" -SCRIPT EXISTS e0e1f9fabfc9d4800c877a703b823ac0578ff8db -``` From 83fc3cd76bc48d0d92ac9629bab68bdb97b10bf9 Mon Sep 17 00:00:00 2001 From: Itamar Haber Date: Sat, 24 Jan 2015 17:13:21 +0200 Subject: [PATCH 0120/2314] Fixed typo in ZRANGEBYLEX's "since" attribute --- commands.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands.json b/commands.json index 98886e0708..ba0af4d547 100644 --- a/commands.json +++ b/commands.json @@ -2236,7 +2236,7 @@ "optional": true } ], - "since": "2.9.9", + "since": "2.8.9", "group": "sorted_set" }, "ZRANGEBYSCORE": { From 43a42d806fb47333806b0faa72b968e469a00bcc Mon Sep 17 00:00:00 2001 From: RickvdP Date: Sun, 25 Jan 2015 01:51:24 +0100 Subject: [PATCH 0121/2314] Sentinel.md typo fixes --- topics/sentinel.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/topics/sentinel.md b/topics/sentinel.md index 11a0710daf..e4db5f6d6a 100644 --- a/topics/sentinel.md +++ b/topics/sentinel.md @@ -403,12 +403,12 @@ disconnected form the master for more than: (down-after-milliseconds * 10) + milliseconds_since_master_is_in_SDOWN_state -Is considered to be not reliable and is discareded at all. +Is considered to be not reliable and is disregarded at all. -The slave selection only consider the slaves that passed the above test, +The slave selection only considers the slaves that passed the above test, and sorts it based on the above criteria, in the following order. -1. The slaves are sorted by `slave-priority` as confiugred in the `redis.conf` file of the Redis instance. A lower priority will be preferred. +1. The slaves are sorted by `slave-priority` as configured in the `redis.conf` file of the Redis instance. A lower priority will be preferred. 2. If the priority is the same, the replication offset processed by the slave is checked, and the slave that received more data from the master is selected. 3. If multiple slaves have the same priority and processed the same data from the master, a further check is performed, selecting the slave with the lexicographically smaller run ID. Having a lower run ID is not a real advantage for a slave, but is useful in order to make the process of slave selection more deterministic, instead of resorting to select a random slave. @@ -447,7 +447,7 @@ data only in the master, having the same data accessible in the slaves. However, in the uncommon case where you need a slave that is accessible without authentication, you can still do it by setting up a slave priority -of zero (that will not allow the salve to be promoted to master), and +of zero (that will not allow the slave to be promoted to master), and configuring only the `masterauth` directive for this slave, without the `requirepass` directive, so that data will be readable by unauthenticated clients. From d9d75f2836e47406851bda498ef42bae9caad3ab Mon Sep 17 00:00:00 2001 From: Hayk Martirosyan Date: Tue, 27 Jan 2015 20:27:32 -0800 Subject: [PATCH 0122/2314] Add redox to C++ clients in clients.json --- clients.json | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/clients.json b/clients.json index 1860a3effa..f5f23873e4 100644 --- a/clients.json +++ b/clients.json @@ -746,6 +746,15 @@ "authors": ["loopole"] }, + { + "name": "redox", + "language": "C++", + "repository": "https://github.com/hmartiro/redox", + "description": "Modern, asynchronous, and fast C++11 client for Redis", + "authors": ["hmartiros"], + "active": true + }, + { "name": "redis3m", "language": "C++", From 28f56bcb82bb70246d48523e7357cf6a47cba7c7 Mon Sep 17 00:00:00 2001 From: Itamar Haber Date: Wed, 28 Jan 2015 13:08:21 +0200 Subject: [PATCH 0123/2314] Added REPLACE modifier to RESTORE --- commands.json | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/commands.json b/commands.json index 98886e0708..a5ca580773 100644 --- a/commands.json +++ b/commands.json @@ -1398,7 +1398,14 @@ { "name": "serialized-value", "type": "string" + }, + { + "name": "replace", + "type": "enum", + "enum": ["REPLACE"], + "optional": true } + ], "since": "2.6.0", "group": "generic" From 6413d3b68cf72ae1c0eed1a92cdd9d93478cb57a Mon Sep 17 00:00:00 2001 From: Itamar Haber Date: Wed, 28 Jan 2015 13:10:17 +0200 Subject: [PATCH 0124/2314] Added REPLACE modifier to RESTORE's documentation --- commands/restore.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/commands/restore.md b/commands/restore.md index 2e4b6b5095..cfb0fb2c9b 100644 --- a/commands/restore.md +++ b/commands/restore.md @@ -4,6 +4,9 @@ provided serialized value (obtained via `DUMP`). If `ttl` is 0 the key is created without any expire, otherwise the specified expire time (in milliseconds) is set. +`RESTORE` will return a "Target key name is busy" error when `key` already +exists unless you use the `REPLACE` modifier. + `RESTORE` checks the RDB version and data checksum. If they don't match an error is returned. From d1ee61ed8b059e2b2e16ebe1b0ac98b38fb59b6c Mon Sep 17 00:00:00 2001 From: Kyle Pointer Date: Wed, 28 Jan 2015 13:41:34 -0600 Subject: [PATCH 0125/2314] Fix small apparent typo on pubsub topic. --- topics/pubsub.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/pubsub.md b/topics/pubsub.md index b8234fa3dd..5a0bfb1e16 100644 --- a/topics/pubsub.md +++ b/topics/pubsub.md @@ -54,7 +54,7 @@ payload. Pub/Sub has no relation to the key space. It was made to not interfere with it on any level, including database numbers. -Publishing on db 10, will be heard on by a subscriber on db 1. +Publishing on db 10, will be heard by a subscriber on db 1. If you need scoping of some kind, prefix the channels with the name of the environment (test, staging, production, ...). From cd42a2f83883c61f4e31fdc8cde59d6d8dfe0f80 Mon Sep 17 00:00:00 2001 From: antirez Date: Tue, 3 Feb 2015 14:46:55 +0100 Subject: [PATCH 0126/2314] Range of sorted set scores documented in ZADD man page. --- commands/zadd.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/commands/zadd.md b/commands/zadd.md index 78f7a03930..482cf8462d 100644 --- a/commands/zadd.md +++ b/commands/zadd.md @@ -10,6 +10,11 @@ members is created, like if the sorted set was empty. If the key exists but does The score values should be the string representation of a double precision floating point number. `+inf` and `-inf` values are valid values as well. +Range of integer scores that can be expressed precisely +--- + +Redis sorted sets use a *double 64-bit floating point number* to represent the score. In all the architectures we support, this is represented as an **IEEE 754 floating point number**, that is able to represent precisely integer numbers between `-(2^53)` and `+(2^53)` included. In more practical terms, all the integers between -9007199254740992 and 9007199254740992 are prefectly representable. Larger integers, or fractions, are intenrally represented using the exponential notation, so it is possible that you get only an approximation of the decimal numero, or of the very big integer, that you set as score. + Sorted sets 101 --- From 166a108f5f35484b6f5496d4941b81d5f76c9bff Mon Sep 17 00:00:00 2001 From: antirez Date: Tue, 3 Feb 2015 14:50:22 +0100 Subject: [PATCH 0127/2314] ZADD typo fixing. --- commands/zadd.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands/zadd.md b/commands/zadd.md index 482cf8462d..22a08127fc 100644 --- a/commands/zadd.md +++ b/commands/zadd.md @@ -13,7 +13,7 @@ The score values should be the string representation of a double precision float Range of integer scores that can be expressed precisely --- -Redis sorted sets use a *double 64-bit floating point number* to represent the score. In all the architectures we support, this is represented as an **IEEE 754 floating point number**, that is able to represent precisely integer numbers between `-(2^53)` and `+(2^53)` included. In more practical terms, all the integers between -9007199254740992 and 9007199254740992 are prefectly representable. Larger integers, or fractions, are intenrally represented using the exponential notation, so it is possible that you get only an approximation of the decimal numero, or of the very big integer, that you set as score. +Redis sorted sets use a *double 64-bit floating point number* to represent the score. In all the architectures we support, this is represented as an **IEEE 754 floating point number**, that is able to represent precisely integer numbers between `-(2^53)` and `+(2^53)` included. In more practical terms, all the integers between -9007199254740992 and 9007199254740992 are prefectly representable. Larger integers, or fractions, are internally represented in exponential form, so it is possible that you get only an approximation of the decimal number, or of the very big integer, that you set as score. Sorted sets 101 --- From de6853950150b892f3e0ffbf828ee0c8ae3a263c Mon Sep 17 00:00:00 2001 From: Brian White Date: Fri, 6 Feb 2015 00:21:09 -0500 Subject: [PATCH 0128/2314] Fix typo --- topics/data-types-intro.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/data-types-intro.md b/topics/data-types-intro.md index e878734f27..e99f8fd95a 100644 --- a/topics/data-types-intro.md +++ b/topics/data-types-intro.md @@ -703,7 +703,7 @@ ordered on request, order is a peculiarity of the data structure used to represent sorted sets). They are ordered according to the following rule: * If A and B are two elements with a different score, then A > B if A.score is > B.score. -* If A and B have exactly the same score, than A > B if the A string is lexicographically greater than the B string. A and B strings can't be equal since sorted sets only have unique elements. +* If A and B have exactly the same score, then A > B if the A string is lexicographically greater than the B string. A and B strings can't be equal since sorted sets only have unique elements. Let's start with a simple example, adding a few selected hackers names as sorted set elements, with their year of birth as "score". From ab20d1aaaae1696263b0d19f0a230b59001eb5df Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 6 Feb 2015 09:29:16 +0100 Subject: [PATCH 0129/2314] Link Redlock algorithm form SET / SETNX pages. --- commands/set.md | 2 ++ commands/setnx.md | 11 ++++++----- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/commands/set.md b/commands/set.md index 9555f84793..e9cbda3fb9 100644 --- a/commands/set.md +++ b/commands/set.md @@ -28,6 +28,8 @@ GET mykey ## Patterns +**Note:** The following pattern is discouraged in favor of [the Redlock algorithm](http://redis.io/topics/distlock) which is only a bit more complex to implement, but offers better guarantees and is fault tolerant. + The command `SET resource-name anystring NX EX max-lock-time` is a simple way to implement a locking system with Redis. A client can acquire the lock if the above command returns `OK` (or retry after some time if the command returns Nil), and remove the lock just using `DEL`. diff --git a/commands/setnx.md b/commands/setnx.md index 3c70cab673..8aaad80c4b 100644 --- a/commands/setnx.md +++ b/commands/setnx.md @@ -20,12 +20,13 @@ GET mykey ## Design pattern: Locking with `!SETNX` -**NOTE:** Starting with Redis 2.6.12 it is possible to create a much simpler locking primitive using the `SET` command to acquire the lock, and a simple Lua script to release the lock. The pattern is documented in the `SET` command page. +**Please note that:** -The old `SETNX` based pattern is documented below for historical reasons. +1. The following pattern is discouraged in favor of [the Redlock algorithm](http://redis.io/topics/distlock) which is only a bit more complex to implement, but offers better guarantees and is fault tolerant. +2. We document the old pattern anyway because certain existing implementations link to this page as a reference. Moreover it is an interesting example of how Redis commands can be used in order to mount programming primitives. +3. Anyway even assuming a single-instance locking primitive, starting with 2.6.12 it is possible to create a much simpler locking primitive, equivalent to the one discussed here, using the `SET` command to acquire the lock, and a simple Lua script to release the lock. The pattern is documented in the `SET` command page. -`SETNX` can be used as a locking primitive. -For example, to acquire the lock of the key `foo`, the client could try the +That said, `SETNX` can be used, and was historically used, as a locking primitive. For example, to acquire the lock of the key `foo`, the client could try the following: ``` @@ -90,7 +91,7 @@ Let's see how C4, our sane client, uses the good algorithm: Note that even if C4 set the key a bit a few seconds in the future this is not a problem. -**Important note**: In order to make this locking algorithm more robust, a +In order to make this locking algorithm more robust, a client holding a lock should always check the timeout didn't expire before unlocking the key with `DEL` because client failures can be complex, not just crashing but also blocking a lot of time against some operations and trying From 62dc9638569e90a601ecd270590bd7e098fd3fb5 Mon Sep 17 00:00:00 2001 From: Mark Paluch Date: Wed, 11 Feb 2015 07:48:39 +0100 Subject: [PATCH 0130/2314] Update client.json with new maintained lettuce repo path --- clients.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/clients.json b/clients.json index 1860a3effa..b1b185ce53 100644 --- a/clients.json +++ b/clients.json @@ -799,9 +799,9 @@ { "name": "lettuce", "language": "Java", - "repository": "https://github.com/wg/lettuce", + "repository": "https://github.com/mp911de/lettuce", "description": "Thread-safe client supporting async usage and key/value codecs", - "authors": ["ar3te"] + "authors": ["ar3te", "mp911de"] }, { From 0a43f489fa2fa66b6ae1c19f0b0728170d781427 Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 12 Feb 2015 16:46:32 +0100 Subject: [PATCH 0131/2314] CONFIG SET doc updated to mention units support for maxmemory. --- commands/config set.md | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/commands/config set.md b/commands/config set.md index 2a3d3372a8..8d0b01a06d 100644 --- a/commands/config set.md +++ b/commands/config set.md @@ -16,10 +16,12 @@ following important differences: [hgcarr22rc]: http://github.com/antirez/redis/raw/2.8/redis.conf -* Where bytes or other quantities are specified, it is not possible to use - the `redis.conf` abbreviated form (10k 2gb ... and so forth), everything - should be specified as a well-formed 64-bit integer, in the base unit of the - configuration directive. +* In most options where bytes or other quantities are specified, it is not + possible to use the `redis.conf` abbreviated form (10k 2gb ... and so forth), + everything should be specified as a well-formed 64-bit integer, in the base + unit of the configuration directive. However since Redis version equal or + greater to 3.2 (currently in alpha stage) it will be possible to use + `CONFIG SET maxmemory` with units like in `redis.conf`. * The save parameter is a single string of space-separated integers. Every pair of integers represent a seconds/modifications threshold. From 362ef2ef9456aeaad9dea634fff457efd56c6d69 Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 12 Feb 2015 16:53:53 +0100 Subject: [PATCH 0132/2314] CONFIG SET doc updated again to reflect reality. --- commands/config set.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/commands/config set.md b/commands/config set.md index 8d0b01a06d..dd6a936e8e 100644 --- a/commands/config set.md +++ b/commands/config set.md @@ -16,12 +16,12 @@ following important differences: [hgcarr22rc]: http://github.com/antirez/redis/raw/2.8/redis.conf -* In most options where bytes or other quantities are specified, it is not +* In options where bytes or other quantities are specified, it is not possible to use the `redis.conf` abbreviated form (10k 2gb ... and so forth), everything should be specified as a well-formed 64-bit integer, in the base - unit of the configuration directive. However since Redis version equal or - greater to 3.2 (currently in alpha stage) it will be possible to use - `CONFIG SET maxmemory` with units like in `redis.conf`. + unit of the configuration directive. However since Redis version 3.0 or + greater, it is possible to use `CONFIG SET` with memory units for + `maxmemory`, client output buffers, and replication backlog size. * The save parameter is a single string of space-separated integers. Every pair of integers represent a seconds/modifications threshold. From e72364bbc5faafc550d0a2d7c0cb85d122239d25 Mon Sep 17 00:00:00 2001 From: topilski Date: Sat, 14 Feb 2015 16:19:54 +0300 Subject: [PATCH 0133/2314] FastoRedis client --- tools.json | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/tools.json b/tools.json index 97810e602d..67431642dd 100644 --- a/tools.json +++ b/tools.json @@ -297,6 +297,14 @@ "description": "Cross-platform desktop GUI management tool for Redis", "authors": ["u_glide"] }, + { + "name": "FastoRedis", + "language": "C++", + "url": "http://fastoredis.com", + "repository": "https://github.com/fasto/fastoredis", + "description": "Cross-platform Redis, Memcached management tool.", + "authors": [ "atopilski" ], + }, { "name": "Nydus", "language": "Python", From 21dd90211279516992d94c643a84e0363026169a Mon Sep 17 00:00:00 2001 From: antirez Date: Sat, 14 Feb 2015 16:54:08 +0100 Subject: [PATCH 0134/2314] ZRANGE documentation improved a bit. --- commands/zrange.md | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/commands/zrange.md b/commands/zrange.md index 6100597f0d..d45750fd5e 100644 --- a/commands/zrange.md +++ b/commands/zrange.md @@ -11,6 +11,9 @@ They can also be negative numbers indicating offsets from the end of the sorted set, with `-1` being the last element of the sorted set, `-2` the penultimate element and so on. +`start` and `stop` are **inclusive ranges**, so for exmaple `ZRANGE myzset 0 1`` +will return both the first and the second element of the sorted set. + Out of range indexes will not produce an error. If `start` is larger than the largest index in the sorted set, or `start > stop`, an empty list is returned. @@ -27,7 +30,7 @@ array with (value, score) arrays/tuples). @return @array-reply: list of elements in the specified range (optionally with -their scores). +their scores, in case the `WITHSCORES` option is given). @examples @@ -39,3 +42,9 @@ ZRANGE myzset 0 -1 ZRANGE myzset 2 3 ZRANGE myzset -2 -1 ``` + +The following example using `WITHSCORES` shows how the command returns always an array, but this time, populated with *element_1*, *score_2*, *element_2*, *score_2*, ..., *element_N*, *score_N*. + +```cli +ZRANGE myzset 0 1 WITHSCORES +``` From 991514056971390d959786f1b24531084e7619b4 Mon Sep 17 00:00:00 2001 From: antirez Date: Sat, 14 Feb 2015 16:55:19 +0100 Subject: [PATCH 0135/2314] Typo fixed in ZRANGE command doc. --- commands/zrange.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands/zrange.md b/commands/zrange.md index d45750fd5e..a5354318fd 100644 --- a/commands/zrange.md +++ b/commands/zrange.md @@ -11,7 +11,7 @@ They can also be negative numbers indicating offsets from the end of the sorted set, with `-1` being the last element of the sorted set, `-2` the penultimate element and so on. -`start` and `stop` are **inclusive ranges**, so for exmaple `ZRANGE myzset 0 1`` +`start` and `stop` are **inclusive ranges**, so for example `ZRANGE myzset 0 1`` will return both the first and the second element of the sorted set. Out of range indexes will not produce an error. From d925f61ab21ef1997242e068a16ff484959472fa Mon Sep 17 00:00:00 2001 From: Alexandr Date: Sun, 15 Feb 2015 10:14:56 +0300 Subject: [PATCH 0136/2314] Update tools.json --- tools.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools.json b/tools.json index 67431642dd..c45fc81171 100644 --- a/tools.json +++ b/tools.json @@ -303,7 +303,7 @@ "url": "http://fastoredis.com", "repository": "https://github.com/fasto/fastoredis", "description": "Cross-platform Redis, Memcached management tool.", - "authors": [ "atopilski" ], + "authors": [ "topilski" ], }, { "name": "Nydus", From 8f0965994a9dbf3cb3acce592ed5edf5b1f1d59c Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 20 Feb 2015 12:14:29 +0100 Subject: [PATCH 0137/2314] Update who is using Redis page with techstacks.io link. --- topics/whos-using-redis.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/whos-using-redis.md b/topics/whos-using-redis.md index 163cf7068f..2bdec6c13b 100644 --- a/topics/whos-using-redis.md +++ b/topics/whos-using-redis.md @@ -41,4 +41,4 @@ A list of well known companies using Redis: -And many others! link policy: we only link major sites, we used to also link to small companies and services but this rapidly became impossible to maintain. +And many others!, techstacks.io maintains a list of popular sites using Redis, the information may not be always updated since many companies change their tech stack during their lifetime, but is an interesting resource. link policy: we only link major sites, we used to also link to small companies and services but this rapidly became impossible to maintain. From c2f914b500620ad0f4b23e322053c8c16fa05a74 Mon Sep 17 00:00:00 2001 From: Itamar Haber Date: Wed, 25 Feb 2015 18:19:32 +0200 Subject: [PATCH 0138/2314] Add Hash to question about max number of elements Since Hashes are using dict they can also have a max of ~2^32 fields. --- topics/faq.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/topics/faq.md b/topics/faq.md index 262dceabe2..07c98f50a5 100644 --- a/topics/faq.md +++ b/topics/faq.md @@ -123,12 +123,12 @@ start thinking at some way to shard earlier. You can find more information about using multiple Redis instances in the [Partitioning page](/topics/partitioning). -## What is the maximum number of keys a single Redis instance can hold? and what the max number of elements in a List, Set, Sorted Set? +## What is the maximum number of keys a single Redis instance can hold? and what the max number of elements in a Hash, List, Set, Sorted Set? Redis can handle up to 2^32 keys, and was tested in practice to handle at least 250 million of keys per instance. -Every list, set, and sorted set, can hold 2^32 elements. +Every hash, list, set, and sorted set, can hold 2^32 elements. In other words your limit is likely the available memory in your system. From a457691262dfcd4bbf78e9deddd9ad88b16b7756 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Obrok?= Date: Thu, 26 Feb 2015 14:40:05 +0100 Subject: [PATCH 0139/2314] Fix a couple typos in expire.md --- commands/expire.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands/expire.md b/commands/expire.md index dd0142da03..1d512b1651 100644 --- a/commands/expire.md +++ b/commands/expire.md @@ -129,7 +129,7 @@ key is found to be timed out. Of course this is not enough as there are expired keys that will never be accessed again. -This keys should be expired anyway, so periodically Redis test a few keys at +These keys should be expired anyway, so periodically Redis tests a few keys at random among keys with an expire set. All the keys that are already expired are deleted from the keyspace. From 4853f514aa89cdcfaf8a828459b216d2379cb82d Mon Sep 17 00:00:00 2001 From: Andreas Lappe Date: Thu, 26 Feb 2015 17:06:43 +0100 Subject: [PATCH 0140/2314] Add exredis to elixir clients --- clients.json | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/clients.json b/clients.json index 1860a3effa..5bacd89f8c 100644 --- a/clients.json +++ b/clients.json @@ -81,6 +81,15 @@ "active": true }, + { + "name": "exredis", + "language": "Elixir", + "repository": "https://github.com/artemeff/exredis", + "description": "Redis client for Elixir.", + "authors": ["artemeff"], + "active": true + }, + { "name": "redis.fy", "language": "Fancy", From f94a50bf7c124905dd33ad131b9f2e30965fceb7 Mon Sep 17 00:00:00 2001 From: Michel Martens Date: Thu, 26 Feb 2015 17:02:29 +0000 Subject: [PATCH 0141/2314] Fix tools.json --- tools.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools.json b/tools.json index c45fc81171..15a7a575b8 100644 --- a/tools.json +++ b/tools.json @@ -300,10 +300,10 @@ { "name": "FastoRedis", "language": "C++", - "url": "http://fastoredis.com", + "url": "http://fastoredis.com", "repository": "https://github.com/fasto/fastoredis", "description": "Cross-platform Redis, Memcached management tool.", - "authors": [ "topilski" ], + "authors": ["topilski"] }, { "name": "Nydus", From 754083d08c4c9fbb634360adcb8d2f2096270a81 Mon Sep 17 00:00:00 2001 From: Michel Martens Date: Thu, 26 Feb 2015 17:06:49 +0000 Subject: [PATCH 0142/2314] Remove .git from some URLs --- clients.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/clients.json b/clients.json index ada193a500..e71fc58a42 100644 --- a/clients.json +++ b/clients.json @@ -30,7 +30,7 @@ { "name": "aleph", "language": "Clojure", - "repository": "https://github.com/ztellman/aleph.git", + "repository": "https://github.com/ztellman/aleph", "description": "Redis client build on top of lamina", "authors":["Zach Tellman"], "active": true @@ -384,7 +384,7 @@ { "name": "phpredis", "language": "PHP", - "repository": "https://github.com/phpredis/phpredis.git", + "repository": "https://github.com/phpredis/phpredis", "description": "This is a client written in C as a PHP module.", "authors": ["grumi78", "yowgi"], "recommended": true, From 35125d1e040bb8e308c4815f82c0dfe8579e2fa6 Mon Sep 17 00:00:00 2001 From: Michel Martens Date: Thu, 26 Feb 2015 17:47:57 +0000 Subject: [PATCH 0143/2314] Resolve conflicts --- clients.json | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/clients.json b/clients.json index e71fc58a42..ebc6a41766 100644 --- a/clients.json +++ b/clients.json @@ -935,5 +935,13 @@ "repository": "https://github.com/andrew-bn/RedisBoost", "description": "Thread-safe async Redis client. Offers high performance and simple api", "authors": ["bn_andrew"] + }, + { + "name": "redis", + "language": "Dart", + "url": "https://github.com/ra1u/redis-dart", + "description": "Simple and fast client", + "authors": ["Luka Rahne"], + "active": true } ] From 6ce7a63a560db1ceb123063633bf10f4812a9005 Mon Sep 17 00:00:00 2001 From: Alexander Dinu Date: Thu, 26 Feb 2015 23:09:51 +0500 Subject: [PATCH 0144/2314] Add OCaml client --- clients.json | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/clients.json b/clients.json index ebc6a41766..32b7c6572f 100644 --- a/clients.json +++ b/clients.json @@ -943,5 +943,13 @@ "description": "Simple and fast client", "authors": ["Luka Rahne"], "active": true + }, + + { + "name": "ocaml-redis", + "language": "OCaml", + "repository": "https://github.com/0xffea/ocaml-redis", + "description": "Synchronous and asynchronous (via Lwt) Redis client library in OCaml. Provides implementation of cache and mutex helpers.", + "active": true } ] From d1831a9dc42739abd80e7c1d35f40505ac49aaa5 Mon Sep 17 00:00:00 2001 From: Damian Janowski Date: Thu, 26 Feb 2015 18:24:53 -0300 Subject: [PATCH 0145/2314] Unify credits. --- topics/sponsors.md | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/topics/sponsors.md b/topics/sponsors.md index a2e94d3aa1..6171a00a52 100644 --- a/topics/sponsors.md +++ b/topics/sponsors.md @@ -21,4 +21,16 @@ Also thanks to the following people or organizations that donated to the Project We are grateful to [Pivotal](http://gopivotal.com), [VMware](http://vmware.com) and to the other companies and people that donated to the Redis project. Thank you. -The Redis.io domain is kindly donated to the project by [I Want My Name](http://iwantmyname.com). +## redis.io + +[Citrusbyte](https://citrusbyte.com) sponsored the creation of the official +Redis logo (designed by [Carlos Prioglio](http://carlosprioglio.com)) and +transferred its copyright to Salvatore Sanfilippo. + +They also sponsored the initial implementation of this site by +[Damian Janowski](https://twitter.com/djanowski) and [Michel +Martens](https://twitter.com/soveran). Damian and Michel remain the current +maintainers. + +The `redis.io` domain is kindly donated to the project by [I Want My +Name](https://iwantmyname.com). From a1a2e051ef7a6e41cc8da86839f4eb8f7e80ae25 Mon Sep 17 00:00:00 2001 From: Michel Martens Date: Thu, 26 Feb 2015 21:33:31 +0000 Subject: [PATCH 0146/2314] Merge #446 --- clients.json | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/clients.json b/clients.json index 32b7c6572f..dc9bfdebdf 100644 --- a/clients.json +++ b/clients.json @@ -951,5 +951,13 @@ "repository": "https://github.com/0xffea/ocaml-redis", "description": "Synchronous and asynchronous (via Lwt) Redis client library in OCaml. Provides implementation of cache and mutex helpers.", "active": true + }, + { + "name": "Nhiredis", + "language": "C#", + "repository": "https://github.com/mhowlett/Nhiredis", + "description": "A lightweight wrapper around the C client hiredis.", + "authors": ["matt_howlett"], + "active": true } ] From a90504dd0726facadbcf79356eea4fa729670d06 Mon Sep 17 00:00:00 2001 From: Michel Martens Date: Thu, 26 Feb 2015 21:39:15 +0000 Subject: [PATCH 0147/2314] Resolve conflicts --- topics/admin.md | 2 +- topics/benchmarks.md | 8 ++--- topics/clients.md | 2 +- topics/cluster-spec.md | 8 ++--- topics/cluster-tutorial.md | 10 +++--- topics/debugging.md | 2 +- topics/distlock.md | 2 +- topics/internals-vm.md | 2 +- topics/introduction.md | 2 +- topics/latency.md | 71 ++++++++++++++++++++++++++++++++++++++ topics/license.md | 2 +- topics/notifications.md | 2 +- topics/partitioning.md | 4 +-- topics/quickstart.md | 2 +- topics/rdd-1.md | 4 +-- topics/rdd-2.md | 2 +- topics/rdd.md | 2 +- topics/releases.md | 4 +-- topics/replication.md | 2 +- topics/security.md | 2 +- topics/sentinel-old.md | 2 +- topics/sentinel.md | 4 +-- topics/signals.md | 2 +- topics/transactions.md | 2 +- topics/twitter-clone.md | 12 +++---- topics/whos-using-redis.md | 2 +- 26 files changed, 115 insertions(+), 44 deletions(-) diff --git a/topics/admin.md b/topics/admin.md index 1f8ce1c8ff..b7657e711a 100644 --- a/topics/admin.md +++ b/topics/admin.md @@ -7,7 +7,7 @@ Every topic is self contained in form of a FAQ. New topics will be created in th Redis setup hints ----------------- -+ We suggest deploying Redis using the **Linux operating system**. Redis is also tested heavily on osx, and tested from time to time on FreeBSD and OpenBSD systems. However Linux is where we do all the major stress testing, and where most production deployments are working. ++ We suggest deploying Redis using the **Linux operating system**. Redis is also tested heavily on OS X, and tested from time to time on FreeBSD and OpenBSD systems. However Linux is where we do all the major stress testing, and where most production deployments are working. + Make sure to set the Linux kernel **overcommit memory setting to 1**. Add `vm.overcommit_memory = 1` to `/etc/sysctl.conf` and then reboot or run the command `sysctl vm.overcommit_memory=1` for this to take effect immediately. * Make sure to disable Linux kernel feature *transparent huge pages*, it will affect greatly both memory usage and latency in a negative way. This is accomplished with the following command: `echo never > sys/kernel/mm/transparent_hugepage/enabled`. + Make sure to **setup some swap** in your system (we suggest as much as swap as memory). If Linux does not have swap and your Redis instance accidentally consumes too much memory, either Redis will crash for out of memory or the Linux kernel OOM killer will kill the Redis process. diff --git a/topics/benchmarks.md b/topics/benchmarks.md index 2a5b80173f..ee31ff86ea 100644 --- a/topics/benchmarks.md +++ b/topics/benchmarks.md @@ -104,7 +104,7 @@ Redis pipelining is able to dramatically improve the number of operations per second a server is able do deliver. This is an example of running the benchmark in a Macbook air 11" using a -pipeling of 16 commands: +pipelining of 16 commands: $ redis-benchmark -n 1000000 -t set,get -P 16 -q SET: 403063.28 requests per second @@ -122,7 +122,7 @@ different options. If you plan to compare Redis to something else, then it is important to evaluate the functional and technical differences, and take them in account. -+ Redis is a server: all commands involve network or IPC roundtrips. It is ++ Redis is a server: all commands involve network or IPC round trips. It is meaningless to compare it to embedded data stores such as SQLite, Berkeley DB, Tokyo/Kyoto Cabinet, etc ... because the cost of most operations is primarily in network/protocol management. @@ -134,7 +134,7 @@ mildly useful. itself, but rather measure your network (or IPC) latency. To really test Redis, you need multiple connections (like redis-benchmark) and/or to use pipelining to aggregate several commands and/or multiple threads or processes. -+ Redis is an in-memory data store with some optional persistency options. If ++ Redis is an in-memory data store with some optional persistence options. If you plan to compare it to transactional servers (MySQL, PostgreSQL, etc ...), then you should consider activating AOF and decide on a suitable fsync policy. + Redis is a single-threaded server. It is not designed to benefit from @@ -157,7 +157,7 @@ concurrency only (i.e. it creates several connections to the server). It does not use pipelining or any parallelism at all (one pending query per connection at most, and no multi-threading). -To run a benchmark using pipelining mode (and achieve higher throughputs), +To run a benchmark using pipelining mode (and achieve higher throughput), you need to explicitly use the -P option. Please note that it is still a realistic behavior since a lot of Redis based applications actively use pipelining to improve performance. diff --git a/topics/clients.md b/topics/clients.md index 99ae876de7..1c1f0d7227 100644 --- a/topics/clients.md +++ b/topics/clients.md @@ -2,7 +2,7 @@ Redis Clients Handling === This document provides information about how Redis handles clients from the -point of view of the networklayer: connections, timeouts, buffers, and +point of view of the network layer: connections, timeouts, buffers, and other similar topics are covered here. The information contained in this document is **only applicable to Redis version 2.6 or greater**. diff --git a/topics/cluster-spec.md b/topics/cluster-spec.md index e8fba63c81..64c7133ea4 100644 --- a/topics/cluster-spec.md +++ b/topics/cluster-spec.md @@ -10,7 +10,7 @@ Redis Cluster is a distributed implementation of Redis with the following goals, * Acceptable degree of write safety: the system tries (in a best-effort way) to retain all the writes originating from clients connected with the majority of the master nodes. Usually there are small windows where acknowledged writes can be lost. Windows to lose acknowledged writes are larger when clients are in a minority partition. * Availability: Redis Cluster is able to survive to partitions where the majority of the master nodes are reachable and there is at least a reachable slave for every master node that is no longer reachable. -What is described in this document is implemented in the `unstable` branch of the Github Redis repository. Redis Cluster has now entered the beta stage, so new betas are released every month and can be found in the [download page](http://redis.io/download) of the Redis web site. +What is described in this document is implemented in the `unstable` branch of the GitHub Redis repository. Redis Cluster has now entered the beta stage, so new betas are released every month and can be found in the [download page](http://redis.io/download) of the Redis web site. Implemented subset --- @@ -22,7 +22,7 @@ as long as the keys all belong to the same node. Redis Cluster implements a concept called **hash tags** that can be used in order to force certain keys to be stored in the same node. However during -manual reshardings multi-key operations may become unavailable for some time +manual reshardings, multi-key operations may become unavailable for some time while single keys operations are always available. Redis Cluster does not support multiple databases like the stand alone version @@ -46,7 +46,7 @@ signal specific conditions. The cluster bus is also used in order to propagate Pub/Sub messages across the cluster. Since cluster nodes are not able to proxy requests clients may be redirected -to other nodes using redirections errors `-MOVED` and `-ASK`. +to other nodes using redirection errors `-MOVED` and `-ASK`. The client is in theory free to send requests to all the nodes in the cluster, getting redirected if needed, so the client is not required to take the state of the cluster. However clients that are able to cache the map between @@ -770,7 +770,7 @@ replication offset is at rank 0, the second must updated at rank 1, and so forth Once a slave wins the election, it starts advertising itself as master in ping and pong packets, providing the set of served slots with a `configEpoch` set to the `currentEpoch` at which the election was started. -In order to speedup the reconfiguration of other nodes, a pong packet is broadcasted to all the nodes of the cluster (however nodes not currently reachable will eventually receive a ping or pong packet and will be reconfigured). +In order to speedup the reconfiguration of other nodes, a pong packet is broadcast to all the nodes of the cluster (however nodes not currently reachable will eventually receive a ping or pong packet and will be reconfigured). The other nodes will detect that there is a new master serving the same slots served by the old master but with a greater `configEpoch`, and will upgrade the configuration. Slaves of the old master, or the failed over master that rejoins the cluster, will not just upgrade the configuration but will also configure to replicate from the new master. diff --git a/topics/cluster-tutorial.md b/topics/cluster-tutorial.md index 03338669b5..4d97e056fa 100644 --- a/topics/cluster-tutorial.md +++ b/topics/cluster-tutorial.md @@ -12,7 +12,7 @@ Note that if you plan to run a serious Redis Cluster deployment, the more formal specification is a highly suggested reading. **Redis cluster is currently alpha quality code**, please get in touch in the -Redis mailing list or open an issue in the Redis Github repository if you +Redis mailing list or open an issue in the Redis GitHub repository if you find any issue. Redis Cluster 101 @@ -218,7 +218,7 @@ As a template for your configuration file just use the small example above, but make sure to replace the port number `7000` with the right port number according to the directory name. -Now copy your redis-server executable, **compiled from the latest sources in the unstable branch at Github**, into the `cluster-test` directory, and finally open 6 terminal tabs in your favorite terminal application. +Now copy your redis-server executable, **compiled from the latest sources in the unstable branch at GitHub**, into the `cluster-test` directory, and finally open 6 terminal tabs in your favorite terminal application. Start every instance like that, one every tab: @@ -286,7 +286,7 @@ I'm aware of the following implementations: * The popular [Predis](https://github.com/nrk/predis) has support for Redis Cluster, the support was recently updated and is in active development. * The most used Java client, [Jedis](https://github.com/xetorthio/jedis) recently added support for Redis Cluster, see the *Jedis Cluster* section in the project README. * [StackExchange.Redis](https://github.com/StackExchange/StackExchange.Redis) offers support for C# (and should work fine with most .NET languages; VB, F#, etc) -* The `redis-cli` utility in the unstable branch of the Redis repository at Github implements a very basic cluster support when started with the `-c` switch. +* The `redis-cli` utility in the unstable branch of the Redis repository at GitHub implements a very basic cluster support when started with the `-c` switch. An easy way to test Redis Cluster is either to try and of the above clients or simply the `redis-cli` command line utility. The following is an example @@ -499,7 +499,7 @@ A more interesting example application --- So far so good, but the example application we used is not very good. -It writes acritically to the cluster without ever checking if what was +It writes simply to the cluster without ever checking if what was written is the right thing. From our point of view the cluster receiving the writes could just always @@ -518,7 +518,7 @@ However instead of just writing, the application does two additional things: What this means is that this application is a simple **consistency checker**, and is able to tell you if the cluster lost some write, or if it accepted -a write that we did not received acknowledgement for. In the first case we'll +a write that we did not received acknowledgment for. In the first case we'll see a counter having a value that is smaller than the one we remember, while in the second case the value will be greater. diff --git a/topics/debugging.md b/topics/debugging.md index dae05dfbf9..8531f687c4 100644 --- a/topics/debugging.md +++ b/topics/debugging.md @@ -71,7 +71,7 @@ In order to attach GDB the first thing you need is the *process ID* of the runni In the above example the process ID is **58414**. + Login into your Redis server. -+ (Optional but recommended) Start **screen** or **tmux** or any other program that will make sure that your GDB session will not be closed if your ssh connection will timeout. If you don't know what screen is do yourself a favour and [Read this article](http://www.linuxjournal.com/article/6340) ++ (Optional but recommended) Start **screen** or **tmux** or any other program that will make sure that your GDB session will not be closed if your ssh connection will timeout. If you don't know what screen is do yourself a favor and [Read this article](http://www.linuxjournal.com/article/6340) + Attach GDB to the running Redis server typing: gdb `` `` diff --git a/topics/distlock.md b/topics/distlock.md index 646afb965d..c60456d3a4 100644 --- a/topics/distlock.md +++ b/topics/distlock.md @@ -14,7 +14,7 @@ This page is an attempt to provide a more canonical algorithm to implement distributed locks with Redis. We propose an algorithm, called **Redlock**, which implements a DLM which we believe to be safer than the vanilla single instance approach. We hope that the community will analyze it, provide -feedbacks, and use it as a starting point for the implementations or more +feedback, and use it as a starting point for the implementations or more complex or alternative designs. Implementations diff --git a/topics/internals-vm.md b/topics/internals-vm.md index 32658dad50..3dd2504a1f 100644 --- a/topics/internals-vm.md +++ b/topics/internals-vm.md @@ -42,7 +42,7 @@ This is how the Redis Object structure _robj_ looks like: int refcount; /* VM fields, this are only allocated if VM is active, otherwise the * object allocation function will just allocate - * sizeof(redisObjct) minus sizeof(redisObjectVM), so using + * sizeof(redisObject) minus sizeof(redisObjectVM), so using * Redis without VM active will not have any overhead. */ struct redisObjectVM vm; } robj; diff --git a/topics/introduction.md b/topics/introduction.md index c09caa39aa..c33aeb2b35 100644 --- a/topics/introduction.md +++ b/topics/introduction.md @@ -33,6 +33,6 @@ Other features include: You can use Redis from [most programming languages](/clients) out there. Redis is written in **ANSI C** and works in most POSIX systems like Linux, -\*BSD, OS X without external dependencies. Linux and OSX are the two operating systems where Redis is developed and more tested, and we **recommend using Linux for deploying**. Redis may work in Solaris-derived systems like SmartOS, but the support is *best effort*. There +\*BSD, OS X without external dependencies. Linux and OS X are the two operating systems where Redis is developed and more tested, and we **recommend using Linux for deploying**. Redis may work in Solaris-derived systems like SmartOS, but the support is *best effort*. There is no official support for Windows builds, but Microsoft develops and maintains a [Win-64 port of Redis](https://github.com/MSOpenTech/redis). diff --git a/topics/latency.md b/topics/latency.md index c3406a773d..03b9d102ca 100644 --- a/topics/latency.md +++ b/topics/latency.md @@ -617,3 +617,74 @@ Note: in the example the **DEBUG SLEEP** command was used in order to block the If you happen to collect multiple watchdog stack traces you are encouraged to send everything to the Redis Google Group: the more traces we obtain, the simpler it will be to understand what the problem with your instance is. +APPENDIX A: Experimenting with huge pages +----------------------------------------- + +Latency introduced by fork can be mitigated using huge pages at the cost of a bigger memory usage during persistence. The following appendix describe in details this feature as implemented in the Linux kernel. + +Some CPUs can use different page size though. AMD and Intel CPUs can support +2 MB page size if needed. These pages are nicknamed *huge pages*. Some +operating systems can optimize page size in real time, transparently +aggregating small pages into huge pages on the fly. + +On Linux, explicit huge pages management has been introduced in 2.6.16, and +implicit transparent huge pages are available starting in 2.6.38. If you +run recent Linux distributions (for example RH 6 or derivatives), transparent +huge pages can be activated, and you can use a vanilla Redis version with them. + +This is the preferred way to experiment/use with huge pages on Linux. + +Now, if you run older distributions (RH 5, SLES 10-11, or derivatives), and +not afraid of a few hacks, Redis requires to be patched in order to support +huge pages. + +The first step would be to read [Mel Gorman's primer on huge pages](http://lwn.net/Articles/374424/) + +There are currently two ways to patch Redis to support huge pages. + ++ For Redis 2.4, the embedded jemalloc allocator must be patched. +[patch](https://gist.github.com/1171054) by Pieter Noordhuis. +Note this patch relies on the anonymous mmap huge page support, +only available starting 2.6.32, so this method cannot be used for older +distributions (RH 5, SLES 10, and derivatives). + ++ For Redis 2.2, or 2.4 with the libc allocator, Redis makefile +must be altered to link Redis with +[the libhugetlbfs library](http://libhugetlbfs.sourceforge.net/). +It is a straightforward [change](https://gist.github.com/1240452) + +Then, the system must be configured to support huge pages. + +The following command allocates and makes N huge pages available: + + $ sudo sysctl -w vm.nr_hugepages= + +The following command mounts the huge page filesystem: + + $ sudo mount -t hugetlbfs none /mnt/hugetlbfs + +In all cases, once Redis is running with huge pages (transparent or +not), the following benefits are expected: + ++ The latency due to the fork operations is dramatically reduced. + This is mostly useful for very large instances, and especially + on a VM. ++ Redis is faster due to the fact the translation look-aside buffer + (TLB) of the CPU is more efficient to cache page table entries + (i.e. the hit ratio is better). Do not expect miracle, it is only + a few percent gain at most. ++ Redis memory cannot be swapped out anymore, which is interesting + to avoid outstanding latencies due to virtual memory. + +Unfortunately, and on top of the extra operational complexity, +there is also a significant drawback of running Redis with +huge pages. The COW mechanism granularity is the page. With +2 MB pages, the probability a page is modified during a background +save operation is 512 times higher than with 4 KB pages. The actual +memory required for a background save therefore increases a lot, +especially if the write traffic is truly random, with poor locality. +With huge pages, using twice the memory while saving is not anymore +a theoretical incident. It really happens. + +The result of a complete benchmark can be found +[here](https://gist.github.com/1272254). diff --git a/topics/license.md b/topics/license.md index 5b1c2cdec1..fcec35880c 100644 --- a/topics/license.md +++ b/topics/license.md @@ -37,7 +37,7 @@ POSSIBILITY OF SUCH DAMAGE. # Third party files and licenses -Redis uses source code from third parties. All this code contians a BSD or BSD-compatible license. The following is a list of third party files and information about their copyright. +Redis uses source code from third parties. All this code contains a BSD or BSD-compatible license. The following is a list of third party files and information about their copyright. * Redis uses the [LHF compression library](http://oldhome.schmorp.de/marc/liblzf.html). LibLZF is copyright Marc Alexander Lehmann and is released under the terms of the **two clause BSD license**. diff --git a/topics/notifications.md b/topics/notifications.md index 05662eec6f..6732044458 100644 --- a/topics/notifications.md +++ b/topics/notifications.md @@ -159,4 +159,4 @@ The `expired` events are generated when a key is accessed and is found to be exp If no command targets the key constantly, and there are many keys with a TTL associated, there can be a significant delay between the time the key time to live drops to zero, and the time the `expired` event is generated. -Basically `expired` events **are generated when the Redis server deletes the key** and not when the time to live theorically reaches the value of zero. +Basically `expired` events **are generated when the Redis server deletes the key** and not when the time to live theoretically reaches the value of zero. diff --git a/topics/partitioning.md b/topics/partitioning.md index 0abdb62006..87bb942636 100644 --- a/topics/partitioning.md +++ b/topics/partitioning.md @@ -43,9 +43,9 @@ Some features of Redis don't play very well with partitioning: * Operations involving multiple keys are usually not supported. For instance you can't perform the intersection between two sets if they are stored in keys that are mapped to different Redis instances (actually there are ways to do this, but not directly). * Redis transactions involving multiple keys can not be used. -* The partitioning granuliary is the key, so it is not possible to shard a dataset with a single huge key like a very big sorted set. +* The partitioning granularity is the key, so it is not possible to shard a dataset with a single huge key like a very big sorted set. * When partitioning is used, data handling is more complex, for instance you have to handle multiple RDB / AOF files, and to make a backup of your data you need to aggregate the persistence files from multiple instances and hosts. -* Adding and removing capacity can be complex. For instance Redis Cluster supports mostly transparent rebalancing of data with the ability to add and remove nodes at runtime, but other systems like client side partitioning and proxies don't support this feature. However a technique called *Presharding* helps in this regard. +* Adding and removing capacity can be complex. For instance Redis Cluster supports mostly transparent rebalancing of data with the ability to add and remove nodes at runtime, but other systems like client side partitioning and proxies don't support this feature. However a technique called *Pre-sharding* helps in this regard. Data store or cache? --- diff --git a/topics/quickstart.md b/topics/quickstart.md index b4b87fe328..6c09719db4 100644 --- a/topics/quickstart.md +++ b/topics/quickstart.md @@ -117,7 +117,7 @@ commands calling methods. A short interactive example using Ruby: Redis persistence ================= -You can learn [how Redis persisence works on this page](http://redis.io/topics/persistence), however what is important to understand for a quick start is that by default, if you start Redis with the default configuration, Redis will spontaneously save the dataset only from time to time (for instance after at least five minutes if you have at least 100 changes in your data), so if you want your database to persist and be reloaded after a restart make sure to call the **SAVE** command manually every time you want to force a data set snapshot. Otherwise make sure to shutdown the database using the **SHUTDOWN** command: +You can learn [how Redis persistence works on this page](http://redis.io/topics/persistence), however what is important to understand for a quick start is that by default, if you start Redis with the default configuration, Redis will spontaneously save the dataset only from time to time (for instance after at least five minutes if you have at least 100 changes in your data), so if you want your database to persist and be reloaded after a restart make sure to call the **SAVE** command manually every time you want to force a data set snapshot. Otherwise make sure to shutdown the database using the **SHUTDOWN** command: $ redis-cli shutdown diff --git a/topics/rdd-1.md b/topics/rdd-1.md index 7daa5ec92b..0ea8402aa3 100644 --- a/topics/rdd-1.md +++ b/topics/rdd-1.md @@ -12,9 +12,9 @@ Redis Design Drafts are a way to make the community aware of designs planned in order to modify or evolve Redis. Every new Redis Design Draft is published in the Redis mailing list and announced on Twitter, in the hope to receive -feedbacks before implementing a given feature. +feedback before implementing a given feature. -The way the community can provide feedbacks about a RDD is simply writing +The way the community can provide feedback about a RDD is simply writing a message to the Redis mailing list, or commenting in the associated Github issue if any. diff --git a/topics/rdd-2.md b/topics/rdd-2.md index 15a464abf9..4c85b50207 100644 --- a/topics/rdd-2.md +++ b/topics/rdd-2.md @@ -22,7 +22,7 @@ Also with minimal changes it will be possible to add RDB version 7 support to Redis 2.6 without actually supporting the additional fields but just skipping them when loading an RDB file. -RDB info fields may have semantical meaning if needed, so that the presence +RDB info fields may have semantic meaning if needed, so that the presence of the field may add information about the data set specified in the RDB file format, however when an info field is required to be correctly decoded in order to understand and load the data set content of the RDB file, the diff --git a/topics/rdd.md b/topics/rdd.md index 15f1238bfd..f674462af4 100644 --- a/topics/rdd.md +++ b/topics/rdd.md @@ -3,7 +3,7 @@ Redis Design Drafts are a way to make the community aware about the design of new features before this feature is actually implemented. This is done in the -hope to get good feedbacks from the user base, that may result in a change +hope to get good feedback from the user base, that may result in a change of the design if a flaw or possible improvement was discovered. The following is the list of published RDDs so far: diff --git a/topics/releases.md b/topics/releases.md index 38972fdccc..a6c78d9086 100644 --- a/topics/releases.md +++ b/topics/releases.md @@ -43,7 +43,7 @@ was forked into the `2.8` branch. This new branch can be at three different levels of stability: development, frozen, and release candidate. -* Development: new features and bug fixes are committed into the branch, but not everything going into `unstable` is merged here. Only the features that can become stable in a reasonable timeframe are merged. +* Development: new features and bug fixes are committed into the branch, but not everything going into `unstable` is merged here. Only the features that can become stable in a reasonable time frame are merged. * Frozen: no new feature is added, unless it is almost guaranteed to have zero stability impacts on the source code, and at the same time for some reason it is a very important feature that must be shipped ASAP. Big code changes are only allowed when they are needed in order to fix bugs. * Release Candidate: only fixes are committed against this release. @@ -64,7 +64,7 @@ Stable releases follow the usual `major.minor.patch` versioning schema, with the * The minor is even in stable versions of Redis. * The minor is odd in unstable, development, frozen, release candidates. For instance the unstable version of 2.8.x will have a version number in the form 2.7.x. In general the unstable version of x.y.z will have a version x.(y-1).z. -* As an unstable version of Redis progresses, the patchlevel is incremented from time to time, so at a given time you may have 2.7.2, and later 2.7.3 and so forth. However when the release candidate state is reached, the patchlevel starts from 101. So for instance 2.7.101 is the first release candidate for 2.8, 2.7.105 is Release Candidate 5, and so forth. +* As an unstable version of Redis progresses, the patch level is incremented from time to time, so at a given time you may have 2.7.2, and later 2.7.3 and so forth. However when the release candidate state is reached, the patch level starts from 101. So for instance 2.7.101 is the first release candidate for 2.8, 2.7.105 is Release Candidate 5, and so forth. Support --- diff --git a/topics/replication.md b/topics/replication.md index 3558bc7154..09b1aaf829 100644 --- a/topics/replication.md +++ b/topics/replication.md @@ -50,7 +50,7 @@ is wiped from the master and all its slaves: 2. A crashes, however it has some auto-restart system, that restarts the process. However since persistence is turned off, the node restarts with an empty data set. 3. Nodes B and C will replicate from A, which is empty, so they'll effectively destroy their copy of the data. -When Redis Sentinel is used for high availability, also turning off persistency +When Redis Sentinel is used for high availability, also turning off persistence on the master, together with auto restart of the process, is dangerous. For example the master can restart fast enough for Sentinel to don't detect a failure, so that the failure mode described above happens. Every time data safety is important, and replication is used with master configured without persistence, auto restart of instances should be disabled. diff --git a/topics/security.md b/topics/security.md index d0a81cb433..777c9bff58 100644 --- a/topics/security.md +++ b/topics/security.md @@ -6,7 +6,7 @@ view of Redis: the access control provided by Redis, code security concerns, attacks that can be triggered from the outside by selecting malicious inputs and other similar topics are covered. -For security related contacts please open an issue on Github, or when you feel it +For security related contacts please open an issue on GitHub, or when you feel it is really important that the security of the communication is preserved, use the GPG key at the end of this document. diff --git a/topics/sentinel-old.md b/topics/sentinel-old.md index 48a318e84e..22913529c7 100644 --- a/topics/sentinel-old.md +++ b/topics/sentinel-old.md @@ -113,7 +113,7 @@ concepts of *being down*, one is called a *Subjectively Down* condition (SDOWN) and is a down condition that is local to a given Sentinel instance. Another is called *Objectively Down* condition (ODOWN) and is reached when enough Sentinels (at least the number configured as the `quorum` parameter -of the monitored master) have an SDOWN condition, and get feedbacks from +of the monitored master) have an SDOWN condition, and get feedback from other Sentinels using the `SENTINEL is-master-down-by-addr` command. From the point of view of a Sentinel an SDOWN condition is reached if we diff --git a/topics/sentinel.md b/topics/sentinel.md index 8841fad349..8e1247b318 100644 --- a/topics/sentinel.md +++ b/topics/sentinel.md @@ -208,7 +208,7 @@ using Redis Pub/Sub messages, both in the master and all the slaves. At the same time all the Sentinels wait for messages to see what is the configuration advertised by the other Sentinels. -Configurations are broadcasted in the `__sentinel__:hello` Pub/Sub channel. +Configurations are broadcast in the `__sentinel__:hello` Pub/Sub channel. Because every configuration has a different version number, the greater version always wins over smaller versions. @@ -237,7 +237,7 @@ concepts of *being down*, one is called a *Subjectively Down* condition (SDOWN) and is a down condition that is local to a given Sentinel instance. Another is called *Objectively Down* condition (ODOWN) and is reached when enough Sentinels (at least the number configured as the `quorum` parameter -of the monitored master) have an SDOWN condition, and get feedbacks from +of the monitored master) have an SDOWN condition, and get feedback from other Sentinels using the `SENTINEL is-master-down-by-addr` command. From the point of view of a Sentinel an SDOWN condition is reached if we diff --git a/topics/signals.md b/topics/signals.md index c7785c6086..e5764765fa 100644 --- a/topics/signals.md +++ b/topics/signals.md @@ -2,7 +2,7 @@ Redis Signals Handling === This document provides information about how Redis reacts to the reception -of differe POSIX signals such as `SIGTERM`, `SIGSEGV` and so forth. +of different POSIX signals such as `SIGTERM`, `SIGSEGV` and so forth. The information contained in this document is **only applicable to Redis version 2.6 or greater**. diff --git a/topics/transactions.md b/topics/transactions.md index fc9df00b78..dd921a2a22 100644 --- a/topics/transactions.md +++ b/topics/transactions.md @@ -71,7 +71,7 @@ During a transaction it is possible to encounter two kind of command errors: Clients used to sense the first kind of errors, happening before the `EXEC` call, by checking the return value of the queued command: if the command replies with QUEUED it was queued correctly, otherwise Redis returns an error. If there is an error while queueing a command, most clients will abort the transaction discarding it. -However starting with Redis 2.6.5, the server will remember that there was an error during the accumulation of commands, and will refuse to execute the transaction returning also an error during `EXEC`, and discarding the transcation automatically. +However starting with Redis 2.6.5, the server will remember that there was an error during the accumulation of commands, and will refuse to execute the transaction returning also an error during `EXEC`, and discarding the transaction automatically. Before Redis 2.6.5 the behavior was to execute the transaction with just the subset of commands queued successfully in case the client called `EXEC` regardless of previous errors. The new behavior makes it much more simple to mix transactions with pipelining, so that the whole transaction can be sent at once, reading all the replies later at once. diff --git a/topics/twitter-clone.md b/topics/twitter-clone.md index 22c50cbab8..01a1386a81 100644 --- a/topics/twitter-clone.md +++ b/topics/twitter-clone.md @@ -10,7 +10,7 @@ in order to learn how to create more complex applications. Note: the original version of this article was written in 2009 when Redis was released. It was not exactly clear at the time that the Redis data model was suitable to write entire applications. Now after 5 years there are many cases of -applications using Redis as their main store, so the gaol of the article today +applications using Redis as their main store, so the goal of the article today is to be a tutorial for Redis newcomers. You'll learn how to design a simple data layout using Redis, and how to apply different data structures. @@ -18,7 +18,7 @@ Our Twitter clone, called [Retwis](http://retwis.antirez.com), is structurally s I use PHP for the example since it can be read by everybody. The same (or better) results can be obtained using Ruby, Python, Erlang, and so on. A few clones exist (however not all the clones use the same data layout as the -current version of this toturial, so please, stick with the official PHP +current version of this tutorial, so please, stick with the official PHP implementation for the sake of following the article better). * [Retwis-RB](http://retwisrb.danlucraft.com/) is a port of Retwis to Ruby and Sinatra written by Daniel Lucraft! Full source code is included of course, and a link to its Git repository appears in the footer of this article. The rest of this article targets PHP, but Ruby programmers can also check the Retwis-RB source code since it's conceptually very similar. @@ -133,7 +133,7 @@ of Sorted Sets usage: ZADD zset 12.55 c ZRANGE zset 0 -1 => b,a,c -In the above example we added a few elements with `ZADD`, and later retrivied +In the above example we added a few elements with `ZADD`, and later retrieved the elements with `ZRANGE`. As you can see the elements are returned in order according to their score. In order to check if a given element exists, and also to retrieve its score if it exists, we use the `ZSCORE` command: @@ -142,7 +142,7 @@ also to retrieve its score if it exists, we use the `ZSCORE` command: ZSCORE zset non_existing_element => NULL Sorted Sets are a very powerful data structure, you can query elements by -score range, lexocographically, in reverse order, and so forth. +score range, lexicographically, in reverse order, and so forth. To know more [please check the Sorted Set sections in the official Redis commands documentation](http://redis.io/commands/#sorted_set). The Hash data type @@ -156,7 +156,7 @@ collection of fields associated with values: HMSET myuser name Salvatore surname Sanfilippo country Italy HGET myuser surname => Sanfilippo -`HMSET` can be used to set fields in the hash, that can be retrivied with +`HMSET` can be used to set fields in the hash, that can be retrieved with `HGET` later. It is possible to check if a field exists with `HEXISTS`, or to increment an hash field with `HINCRBY` and so forth. @@ -186,7 +186,7 @@ Let's start with Users. We need to represent users, of course, with their userna *Note: you should use an hashed password in a real application, for simplicity we store the password in clear text.* -We use the `next_user_id` key in order to always get an unique ID for every new user. Then we use this unique ID to name the key holdign an Hash with user's data. *This is a common design pattern* with key-values stores! Keep it in mind. +We use the `next_user_id` key in order to always get an unique ID for every new user. Then we use this unique ID to name the key holding an Hash with user's data. *This is a common design pattern* with key-values stores! Keep it in mind. Besides the fields already defined, we need some more stuff in order to fully define a User. For example, sometimes it can be useful to be able to get the user ID from the username, so every time we add an user, we also populate the `users` key, which is an Hash, with the username as field, and its ID as value. HSET users antirez 1000 diff --git a/topics/whos-using-redis.md b/topics/whos-using-redis.md index 2bdec6c13b..825a19387e 100644 --- a/topics/whos-using-redis.md +++ b/topics/whos-using-redis.md @@ -9,7 +9,7 @@ A list of well known companies using Redis:
  • - Github + GitHub
  • From 43d53c6c47d716914851c2732bc53580ea0be77c Mon Sep 17 00:00:00 2001 From: Michel Martens Date: Thu, 26 Feb 2015 21:47:46 +0000 Subject: [PATCH 0148/2314] Resolve conflicts --- commands/bitpos.md | 2 +- commands/client kill.md | 2 +- commands/config rewrite.md | 2 +- commands/dump.md | 2 +- commands/eval.md | 2 +- commands/pfmerge.md | 2 +- commands/role.md | 2 +- commands/set.md | 2 +- commands/srandmember.md | 2 +- commands/zremrangebylex.md | 2 +- topics/debugging.md | 6 +++--- topics/internals-vm.md | 2 +- topics/latency-monitor.md | 6 +++--- topics/notifications.md | 6 +++--- topics/problems.md | 2 +- topics/rdd-2.md | 4 ++-- topics/sentinel-clients.md | 2 +- topics/sentinel-old.md | 4 ++-- topics/sentinel.md | 8 ++++---- 19 files changed, 30 insertions(+), 30 deletions(-) diff --git a/commands/bitpos.md b/commands/bitpos.md index 57c4bf3dd5..85f406e5b0 100644 --- a/commands/bitpos.md +++ b/commands/bitpos.md @@ -7,7 +7,7 @@ byte's most significant bit is at position 8, and so forth. The same bit position convention is followed by `GETBIT` and `SETBIT`. By default, all the bytes contained in the string are examined. -It is possible to look for bits only in a specified interval passing the additional arguments _start_ and _end_ (it is possible to just pass _start_, the operation will assume that the end is the last byte of the string. However there are semantical differences as explained later). The range is interpreted as a range of bytes and not a range of bits, so `start=0` and `end=2` means to look at the first three bytes. +It is possible to look for bits only in a specified interval passing the additional arguments _start_ and _end_ (it is possible to just pass _start_, the operation will assume that the end is the last byte of the string. However there are semantic differences as explained later). The range is interpreted as a range of bytes and not a range of bits, so `start=0` and `end=2` means to look at the first three bytes. Note that bit positions are returned always as absolute values starting from bit zero even when _start_ and _end_ are used to specify a range. diff --git a/commands/client kill.md b/commands/client kill.md index ec3ccd94c9..6e6290fc43 100644 --- a/commands/client kill.md +++ b/commands/client kill.md @@ -34,7 +34,7 @@ its configuration. ## Notes -Due to the single-treaded nature of Redis, it is not possible to +Due to the single-threaded nature of Redis, it is not possible to kill a client connection while it is executing a command. From the client point of view, the connection can never be closed in the middle of the execution of a command. However, the client diff --git a/commands/config rewrite.md b/commands/config rewrite.md index 4e18bc261b..5eb0952027 100644 --- a/commands/config rewrite.md +++ b/commands/config rewrite.md @@ -12,7 +12,7 @@ CONFIG REWRITE is also able to rewrite the configuration file from scratch if th ## Atomic rewrite process -In order to make sure the redis.conf file is always consistent, that is, on errors or crashes you always end with the old file, or the new one, the rewrite is perforemd with a single `write(2)` call that has enough content to be at least as big as the old file. Sometimes additional padding in the form of comments is added in order to make sure the resulting file is big enough, and later the file gets truncated to remove the padding at the end. +In order to make sure the redis.conf file is always consistent, that is, on errors or crashes you always end with the old file, or the new one, the rewrite is performed with a single `write(2)` call that has enough content to be at least as big as the old file. Sometimes additional padding in the form of comments is added in order to make sure the resulting file is big enough, and later the file gets truncated to remove the padding at the end. @return diff --git a/commands/dump.md b/commands/dump.md index 8507a86c53..d74003314e 100644 --- a/commands/dump.md +++ b/commands/dump.md @@ -4,7 +4,7 @@ The returned value can be synthesized back into a Redis key using the `RESTORE` command. The serialization format is opaque and non-standard, however it has a few -semantical characteristics: +semantic characteristics: * It contains a 64-bit checksum that is used to make sure errors will be detected. diff --git a/commands/eval.md b/commands/eval.md index 763f6a57b0..a25096b12d 100644 --- a/commands/eval.md +++ b/commands/eval.md @@ -496,7 +496,7 @@ Starting from Redis 2.8.12 the database selected by the Lua script only affects the execution of the script itself, but does not modify the database selected by the client calling the script. -The semantical change between patch level releases was needed since the old +The semantic change between patch level releases was needed since the old behavior was inherently incompatible with the Redis replication layer and was the cause of bugs. diff --git a/commands/pfmerge.md b/commands/pfmerge.md index e1e0199cf1..38500b55d7 100644 --- a/commands/pfmerge.md +++ b/commands/pfmerge.md @@ -3,7 +3,7 @@ the cardinality of the union of the observed Sets of the source HyperLogLog structures. The computed merged HyperLogLog is set to the destination variable, which is -created if does not exist (defauling to an empty HyperLogLog). +created if does not exist (defaulting to an empty HyperLogLog). @return diff --git a/commands/role.md b/commands/role.md index 5b0ef581a6..ce77d09f0e 100644 --- a/commands/role.md +++ b/commands/role.md @@ -1,4 +1,4 @@ -Provide information on the role of a Redis instance in the context of replication, by returing if the instance is currently a `master`, `slave`, or `sentinel`. The command also returns additional information about the state of the replication (if the role is master or slave) or the list of monitored master names (if the role is sentinel). +Provide information on the role of a Redis instance in the context of replication, by returning if the instance is currently a `master`, `slave`, or `sentinel`. The command also returns additional information about the state of the replication (if the role is master or slave) or the list of monitored master names (if the role is sentinel). ## Output format diff --git a/commands/set.md b/commands/set.md index e9cbda3fb9..2a27498382 100644 --- a/commands/set.md +++ b/commands/set.md @@ -17,7 +17,7 @@ Note: Since the `SET` command options can replace `SETNX`, `SETEX`, `PSETEX`, it @return @simple-string-reply: `OK` if `SET` was executed correctly. -@nil-reply: a Null Bulk Reply is returned if the `SET` operation was not performed becase the user specified the `NX` or `XX` option but the condition was not met. +@nil-reply: a Null Bulk Reply is returned if the `SET` operation was not performed because the user specified the `NX` or `XX` option but the condition was not met. @examples diff --git a/commands/srandmember.md b/commands/srandmember.md index 53bd03f76e..df2d960015 100644 --- a/commands/srandmember.md +++ b/commands/srandmember.md @@ -1,6 +1,6 @@ When called with just the `key` argument, return a random element from the set value stored at `key`. -Starting from Redis version 2.6, when called with the additional `count` argument, return an array of `count` **distinct elements** if `count` is positive. If called with a negative `count` the behavior changes and the command is allowed to return the **same element multiple times**. In this case the numer of returned elements is the absolute value of the specified `count`. +Starting from Redis version 2.6, when called with the additional `count` argument, return an array of `count` **distinct elements** if `count` is positive. If called with a negative `count` the behavior changes and the command is allowed to return the **same element multiple times**. In this case the number of returned elements is the absolute value of the specified `count`. When called with just the key argument, the operation is similar to `SPOP`, however while `SPOP` also removes the randomly selected element from the set, `SRANDMEMBER` will just return a random element without altering the original set in any way. diff --git a/commands/zremrangebylex.md b/commands/zremrangebylex.md index 19c9639da5..4098f1e4c2 100644 --- a/commands/zremrangebylex.md +++ b/commands/zremrangebylex.md @@ -1,6 +1,6 @@ When all the elements in a sorted set are inserted with the same score, in order to force lexicographical ordering, this command removes all elements in the sorted set stored at `key` between the lexicographical range specified by `min` and `max`. -The meaining of `min` and `max` are the same of the `ZRANGEBYLEX` command. Similarly, this command actually returns the same elements that `ZRANGEBYLEX` would return if called with the same `min` and `max` arguments. +The meaning of `min` and `max` are the same of the `ZRANGEBYLEX` command. Similarly, this command actually returns the same elements that `ZRANGEBYLEX` would return if called with the same `min` and `max` arguments. @return diff --git a/topics/debugging.md b/topics/debugging.md index 8531f687c4..dbfacd0f66 100644 --- a/topics/debugging.md +++ b/topics/debugging.md @@ -11,7 +11,7 @@ sometimes looking at the crash report is not enough, nor it is possible for the Redis core team to reproduce the issue independently: in this scenario we need help from the user that is able to reproduce the issue. -This little guide shows how to use GDB to provide all the informations the +This little guide shows how to use GDB to provide all the information the Redis developers will need to track the bug more easily. What is GDB? @@ -19,7 +19,7 @@ What is GDB? GDB is the Gnu Debugger: a program that is able to inspect the internal state of another program. Usually tracking and fixing a bug is an exercise in -gathering more informations about the state of the program at the moment the +gathering more information about the state of the program at the moment the bug happens, so GDB is an extremely useful tool. GDB can be used in two ways: @@ -177,7 +177,7 @@ Finally you can send everything to the Redis core team: + The Redis executable you are using. + The stack trace produced by the **bt** command, and the registers dump. + The core file you generated with gdb. -+ Informations about the operating system and GCC version, and Redis version you are using. ++ Information about the operating system and GCC version, and Redis version you are using. Thank you --------- diff --git a/topics/internals-vm.md b/topics/internals-vm.md index 3dd2504a1f..5e96d095b5 100644 --- a/topics/internals-vm.md +++ b/topics/internals-vm.md @@ -191,7 +191,7 @@ Threaded VM --- There are basically three main ways to turn the blocking VM into a non blocking one. -* 1: One way is obvious, and in my opinion, not a good idea at all, that is, turning Redis itself into a theaded server: if every request is served by a different thread automatically other clients don't need to wait for blocked ones. Redis is fast, exports atomic operations, has no locks, and is just 10k lines of code, *because* it is single threaded, so this was not an option for me. +* 1: One way is obvious, and in my opinion, not a good idea at all, that is, turning Redis itself into a threaded server: if every request is served by a different thread automatically other clients don't need to wait for blocked ones. Redis is fast, exports atomic operations, has no locks, and is just 10k lines of code, *because* it is single threaded, so this was not an option for me. * 2: Using non-blocking I/O against the swap file. After all you can think Redis already event-loop based, why don't just handle disk I/O in a non-blocking fashion? I also discarded this possibility because of two main reasons. One is that non blocking file operations, unlike sockets, are an incompatibility nightmare. It's not just like calling select, you need to use OS-specific things. The other problem is that the I/O is just one part of the time consumed to handle VM, another big part is the CPU used in order to encode/decode data to/from the swap file. This is I picked option three, that is... * 3: Using I/O threads, that is, a pool of threads handling the swap I/O operations. This is what the Redis VM is using, so let's detail how this works. diff --git a/topics/latency-monitor.md b/topics/latency-monitor.md index 753c38c9dd..a2dc6d0fc5 100644 --- a/topics/latency-monitor.md +++ b/topics/latency-monitor.md @@ -168,7 +168,7 @@ minutes, hours or days ago the event happened. For example "15s" means that the first graphed event happened 15 seconds ago. The graph is normalized in the min-max scale so that the zero (the underscore -in the lower row) is the minumum, and a # in the higher row is the maximum. +in the lower row) is the minimum, and a # in the higher row is the maximum. The graph subcommand is useful in order to get a quick idea about the trend of a given latency event without using additional tooling, and without the @@ -180,8 +180,8 @@ LATENCY DOCTOR The `LATENCY DOCTOR` command is the most powerful analysis tool in the latency monitoring, and is able to provide additional statistical data like the average period between latency spikes, the median deviation, and an human readable -analysis of the event. For certain events, like `fork`, additional informations -are provided, like the rate at which the system forks processes. +analysis of the event. For certain events, like `fork`, additional information +is provided, like the rate at which the system forks processes. This is the output you should post in the Redis mailing list if you are looking for help about Latency related issues. diff --git a/topics/notifications.md b/topics/notifications.md index 6732044458..0a392f35e2 100644 --- a/topics/notifications.md +++ b/topics/notifications.md @@ -41,7 +41,7 @@ the delivering of two messages, exactly equivalent to the following two PUBLISH __keyevent@0__:del mykey It is easy to see how one channel allows to listen to all the events targeting -the key `mykey` and the other channel allows to obtain informations about +the key `mykey` and the other channel allows to obtain information about all the keys that are target of a `del` operation. The first kind of event, with `keyspace` prefix in the channel is called @@ -120,13 +120,13 @@ Different commands generate different kind of events according to the following * `SREM` generates a single `srem` event, and an additional `del` event if the resulting set is empty and the key is removed. * `SMOVE` generates an `srem` event for the source key, and an `sadd` event for the destination key. * `SPOP` generates an `spop` event, and an additional `del` event if the resulting set is empty and the key is removed. -* `SINTERSTORE`, `SUNIONSTORE`, `SDIFFSTORE` generate `sinterstore`, `sunionostore`, `sdiffstore` events respectively. In the speical case the resulting set is empty, and the key where the result is stored already exists, a `del` event is generated since the key is removed. +* `SINTERSTORE`, `SUNIONSTORE`, `SDIFFSTORE` generate `sinterstore`, `sunionostore`, `sdiffstore` events respectively. In the special case the resulting set is empty, and the key where the result is stored already exists, a `del` event is generated since the key is removed. * `ZINCR` generates a `zincr` event. * `ZADD` generates a single `zadd` event even when multiple elements are added. * `ZREM` generates a single `zrem` event even when multiple elements are deleted. When the resulting sorted set is empty and the key is generated, an additional `del` event is generated. * `ZREMBYSCORE` generates a single `zrembyscore` event. When the resulting sorted set is empty and the key is generated, an additional `del` event is generated. * `ZREMBYRANK` generates a single `zrembyrank` event. When the resulting sorted set is empty and the key is generated, an additional `del` event is generated. -* `ZINTERSTORE` and `ZUNIONSTORE` respectively generate `zinterstore` and `zunionstore` events. In the speical case the resulting sorted set is empty, and the key where the result is stored already exists, a `del` event is generated since the key is removed. +* `ZINTERSTORE` and `ZUNIONSTORE` respectively generate `zinterstore` and `zunionstore` events. In the special case the resulting sorted set is empty, and the key where the result is stored already exists, a `del` event is generated since the key is removed. * Every time a key with a time to live associated is removed from the data set because it expired, an `expired` event is generated. * Every time a key is evicted from the data set in order to free memory as a result of the `maxmemory` policy, an `evicted` event is generated. diff --git a/topics/problems.md b/topics/problems.md index 43d1f8f57f..05e9183617 100644 --- a/topics/problems.md +++ b/topics/problems.md @@ -4,7 +4,7 @@ Problems with Redis? This is a good starting point. This page tries to help you about what to do if you have issues with Redis. Part of the Redis project is helping people that are experiencing problems because we don't like to let people alone with their issues. * If you have **latency problems** with Redis, that in some way appears to be idle for some time, read our [Redis latency troubleshooting guide](/topics/latency). -* Redis stable releases are usually very reliable, however in the rare event you are **experiencing crashes** the developers can help a lot more if you provide debugging informations. Please read our [Debugging Redis guide](/topics/debugging). +* Redis stable releases are usually very reliable, however in the rare event you are **experiencing crashes** the developers can help a lot more if you provide debugging information. Please read our [Debugging Redis guide](/topics/debugging). * We have a long history of users experiencing crashes with Redis that actually turned out to be servers with **broken RAM**. Please test your RAM using **redis-server --test-memory** in case Redis is not stable in your system. Redis built-in memory test is fast and reasonably reliable, but if you can you should reboot your server and use [memtest86](http://memtest86.com). For every other problem please drop a message to the [Redis Google Group](http://groups.google.com/group/redis-db). We will be glad to help. diff --git a/topics/rdd-2.md b/topics/rdd-2.md index 4c85b50207..f1afc06bdf 100644 --- a/topics/rdd-2.md +++ b/topics/rdd-2.md @@ -14,7 +14,7 @@ without causing a backward compatibility issue even if the added meta data is not required in order to load data from the RDB file. For example thanks to the info fields specified in this document it will -be possible to add to RDB informations like file creation time, Redis version +be possible to add to RDB information like file creation time, Redis version generating the file, and any other useful information, in a way that not every field is required for an RDB version 7 file to be correctly processed. @@ -30,7 +30,7 @@ RDB file format must be increased so that previous versions of Redis will not attempt to load it. However currently the info fields are designed to only hold additional -informations that are not useful to load the dataset, but can better specify +information that are not useful to load the dataset, but can better specify how the RDB file was created. ## Info fields representation diff --git a/topics/sentinel-clients.md b/topics/sentinel-clients.md index 1f56af7d69..659b3114da 100644 --- a/topics/sentinel-clients.md +++ b/topics/sentinel-clients.md @@ -15,7 +15,7 @@ This document is targeted at Redis clients developers that want to support Senti * Automatic configuration of clients via Sentinel. * Improved safety of Redis Sentinel automatic failover. -For details about how Redis Sentinel works, please check the [Redis Documentation](/topics/sentinel), as this document only contains informations needed for Redis client developers, and it is expected that readers are familiar with the way Redis Sentinel works. +For details about how Redis Sentinel works, please check the [Redis Documentation](/topics/sentinel), as this document only contains information needed for Redis client developers, and it is expected that readers are familiar with the way Redis Sentinel works. Redis service discovery via Sentinel === diff --git a/topics/sentinel-old.md b/topics/sentinel-old.md index 22913529c7..a9ce9b8971 100644 --- a/topics/sentinel-old.md +++ b/topics/sentinel-old.md @@ -257,7 +257,7 @@ and is only specified if the instance is not a master itself. * **failover-end-for-timeout** `` -- The failover terminated for timeout. If we are the failover leader, we sent a *best effort* `SLAVEOF` command to all the slaves yet to reconfigure. * **failover-end** `` -- The failover terminated with success. All the slaves appears to be reconfigured to replicate with the new master. * **switch-master** ` ` -- We are starting to monitor the new master, using the same name of the old one. The old master will be completely removed from our tables. -* **failover-abort-x-sdown** `` -- The failover was undoed (aborted) because the promoted slave appears to be in extended SDOWN state. +* **failover-abort-x-sdown** `` -- The failover was undone (aborted) because the promoted slave appears to be in extended SDOWN state. * **-slave-reconf-undo** `` -- The failover aborted so we sent a `SLAVEOF` command to the specified instance to reconfigure it back to the original master instance. * **+tilt** -- Tilt mode entered. * **-tilt** -- Tilt mode exited. @@ -285,7 +285,7 @@ it the **Subjective Leader**, and is selected using the following rule: * We remove all the Sentinels in SDOWN, disconnected, or with the last ping reply received more than `SENTINEL_INFO_VALIDITY_TIME` milliseconds ago (currently defined as 5 seconds). * Of all the remaining instances, we get the one with the lowest `runid`, lexicographically (every Redis instance has a Run ID, that is an identifier of every single execution). -For a Sentinel to sense to be the **Objective Leader**, that is, the Sentinel that should start the failove process, the following conditions are needed. +For a Sentinel to sense to be the **Objective Leader**, that is, the Sentinel that should start the failover process, the following conditions are needed. * It thinks it is the subjective leader itself. * It receives acknowledges from other Sentinels about the fact it is the leader: at least 50% plus one of all the Sentinels that were able to reply to the `SENTINEL is-master-down-by-addr` request should agree it is the leader, and additionally we need a total level of agreement at least equal to the configured quorum of the master instance that we are going to failover. diff --git a/topics/sentinel.md b/topics/sentinel.md index 8e1247b318..02d2e81176 100644 --- a/topics/sentinel.md +++ b/topics/sentinel.md @@ -90,7 +90,7 @@ following: You only need to specify the masters to monitor, giving to each separated master (that may have any number of slaves) a different name. There is no need to specify slaves, which are auto-discovered. Sentinel will update the -configuration automatically with additional informations about slaves (in +configuration automatically with additional information about slaves (in order to retain the information in case of restart). The configuration is also rewritten every time a slave is promoted to master during a failover. @@ -385,7 +385,7 @@ is in `ODOWN` state and the Sentinel received the authorization to failover from the majority of the Sentinel instances known, a suitable slave needs to be selected. -The slave selection process evaluates the following informations about slaves: +The slave selection process evaluates the following information about slaves: 1. Disconnection time from the master. 2. Slave priority. @@ -403,7 +403,7 @@ disconnected from the master for more than: (down-after-milliseconds * 10) + milliseconds_since_master_is_in_SDOWN_state -Is considered to be not reliable and is disregarded at all. +Is considered to be unreliable and is disregarded entirely. The slave selection only considers the slaves that passed the above test, and sorts it based on the above criteria, in the following order. @@ -596,7 +596,7 @@ and is only specified if the instance is not a master itself. * **+failover-state-select-slave** `` -- New failover state is `select-slave`: we are trying to find a suitable slave for promotion. * **no-good-slave** `` -- There is no good slave to promote. Currently we'll try after some time, but probably this will change and the state machine will abort the failover at all in this case. * **selected-slave** `` -- We found the specified good slave to promote. -* **failover-state-send-slaveof-noone** `` -- We are trynig to reconfigure the promoted slave as master, waiting for it to switch. +* **failover-state-send-slaveof-noone** `` -- We are trying to reconfigure the promoted slave as master, waiting for it to switch. * **failover-end-for-timeout** `` -- The failover terminated for timeout, slaves will eventually be configured to replicate with the new master anyway. * **failover-end** `` -- The failover terminated with success. All the slaves appears to be reconfigured to replicate with the new master. * **switch-master** ` ` -- The master new IP and address is the specified one after a configuration change. This is **the message most external users are interested in**. From 376d29c0f420291f34b03a37c8c0ef26d76576c7 Mon Sep 17 00:00:00 2001 From: Michel Martens Date: Fri, 27 Feb 2015 07:55:53 +0000 Subject: [PATCH 0149/2314] Remove the recommended flag for now --- clients.json | 1 - 1 file changed, 1 deletion(-) diff --git a/clients.json b/clients.json index 662012eafe..08697d4c19 100644 --- a/clients.json +++ b/clients.json @@ -678,7 +678,6 @@ "repository": "https://github.com/thunks/thunk-redis", "description": "A redis client with pipelining, rely on thunks, support promise.", "authors": ["zensh"], - "recommended": true, "active": true }, From 034b0637b7edfe22f5685c770dd079ed5fdf3a7c Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 27 Feb 2015 15:50:20 +0100 Subject: [PATCH 0150/2314] HSTRLEN documentation. --- commands.json | 16 ++++++++++++++++ commands/hstrlen.md | 14 ++++++++++++++ 2 files changed, 30 insertions(+) create mode 100644 commands/hstrlen.md diff --git a/commands.json b/commands.json index 98886e0708..6dd2623eeb 100644 --- a/commands.json +++ b/commands.json @@ -765,6 +765,22 @@ "since": "2.0.0", "group": "hash" }, + "HSTRLEN": { + "summary": "Get the length of the value of a hash field", + "complexity": "O(1)", + "arguments": [ + { + "name": "key", + "type": "key" + }, + { + "name": "field", + "type": "string" + } + ], + "since": "3.2.0", + "group": "hash" + }, "HVALS": { "summary": "Get all the values in a hash", "complexity": "O(N) where N is the size of the hash.", diff --git a/commands/hstrlen.md b/commands/hstrlen.md new file mode 100644 index 0000000000..b187f75fb7 --- /dev/null +++ b/commands/hstrlen.md @@ -0,0 +1,14 @@ +Returns the string length of the value associated with `field` in the hash stored at `key`. If the `key` or the `field` do not exist, 0 is returned. + +@return + +@integer-reply: the string length of the value associated with `field`, or zero when `field` is not present in the hash or `key` does not exist at all. + +@examples + +```cli +HMSET myhash f1 HelloWorld f2 99 f3 -256 +HSTRLEN myhash f1 +HSTRLEN myhash f2 +HSTRLEN myhash f3 +``` From 00332f19771cd2300116673a6b09f4420fafcc37 Mon Sep 17 00:00:00 2001 From: Mark Paluch Date: Fri, 27 Feb 2015 19:26:13 +0100 Subject: [PATCH 0151/2314] Add missing active/recommended flags to lettuce client forgotten in #492 --- clients.json | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/clients.json b/clients.json index 08697d4c19..ffeb5b1b80 100644 --- a/clients.json +++ b/clients.json @@ -828,7 +828,9 @@ "language": "Java", "repository": "https://github.com/mp911de/lettuce", "description": "Thread-safe client supporting async usage and key/value codecs", - "authors": ["ar3te", "mp911de"] + "authors": ["ar3te", "mp911de"], + "recommended": true, + "active": true }, { From da91a546fd3b9835a93f7afbd7da0188342a50b6 Mon Sep 17 00:00:00 2001 From: zensh Date: Sun, 1 Mar 2015 13:57:05 +0800 Subject: [PATCH 0152/2314] fix thunk-redis authors (https://twitter.com/izensh) --- clients.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clients.json b/clients.json index 08697d4c19..a5c0b1e7cc 100644 --- a/clients.json +++ b/clients.json @@ -677,7 +677,7 @@ "language": "Node.js", "repository": "https://github.com/thunks/thunk-redis", "description": "A redis client with pipelining, rely on thunks, support promise.", - "authors": ["zensh"], + "authors": ["izensh"], "active": true }, From 8cb39d2f95681846a2db94f4fba327013a9bc466 Mon Sep 17 00:00:00 2001 From: zsx Date: Wed, 4 Mar 2015 09:32:47 +0800 Subject: [PATCH 0153/2314] modify client.json --- clients.json | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/clients.json b/clients.json index 08697d4c19..19457df9cc 100644 --- a/clients.json +++ b/clients.json @@ -764,6 +764,16 @@ "authors": ["loopole"] }, + { + "name": "redis-client for C++" + "language": "C++", + "url": "https://github.com/zhengshuxin/acl/tree/master/lib_acl_cpp/samples/redis", + "repository": "https://github.com/zhengshuxin/acl/tree/master/lib_acl_cpp/include/acl_cpp/redis", + "description": "one redis command one redis function, including STRING, HASH, LIST, SET, ZSET, HLL, PUBSUB, TRANSACTION, SCRIPT, CONNECTION, SERVER, CLUSTER", + "authors": [], + "active": true + }, + { "name": "redox", "language": "C++", From 476ca33833476fa5c3419e9ca280013599905665 Mon Sep 17 00:00:00 2001 From: zsx Date: Wed, 4 Mar 2015 09:37:51 +0800 Subject: [PATCH 0154/2314] add one powerful C++ redis client, support all redis client commands, including redis3.0 cluster --- clients.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clients.json b/clients.json index 19457df9cc..b5dd78c3d4 100644 --- a/clients.json +++ b/clients.json @@ -769,7 +769,7 @@ "language": "C++", "url": "https://github.com/zhengshuxin/acl/tree/master/lib_acl_cpp/samples/redis", "repository": "https://github.com/zhengshuxin/acl/tree/master/lib_acl_cpp/include/acl_cpp/redis", - "description": "one redis command one redis function, including STRING, HASH, LIST, SET, ZSET, HLL, PUBSUB, TRANSACTION, SCRIPT, CONNECTION, SERVER, CLUSTER", + "description": "full redis client commands, one redis command one redis function, including STRING, HASH, LIST, SET, ZSET, HLL, PUBSUB, TRANSACTION, SCRIPT, CONNECTION, SERVER, CLUSTER", "authors": [], "active": true }, From ba7f9cd1d9fb1e467026c6614f41421f3bd4de57 Mon Sep 17 00:00:00 2001 From: Michel Martens Date: Wed, 4 Mar 2015 07:29:48 +0000 Subject: [PATCH 0155/2314] Fix build by adding missing comma --- clients.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clients.json b/clients.json index b5dd78c3d4..b54064113c 100644 --- a/clients.json +++ b/clients.json @@ -765,7 +765,7 @@ }, { - "name": "redis-client for C++" + "name": "redis-client for C++", "language": "C++", "url": "https://github.com/zhengshuxin/acl/tree/master/lib_acl_cpp/samples/redis", "repository": "https://github.com/zhengshuxin/acl/tree/master/lib_acl_cpp/include/acl_cpp/redis", From 655a1c89fa26d566748e0df8497ec45eb648ad05 Mon Sep 17 00:00:00 2001 From: Mark Paluch Date: Wed, 4 Mar 2015 21:50:02 +0100 Subject: [PATCH 0156/2314] Remove recommended flag from lettuce client --- clients.json | 1 - 1 file changed, 1 deletion(-) diff --git a/clients.json b/clients.json index ffeb5b1b80..ef046e854c 100644 --- a/clients.json +++ b/clients.json @@ -829,7 +829,6 @@ "repository": "https://github.com/mp911de/lettuce", "description": "Thread-safe client supporting async usage and key/value codecs", "authors": ["ar3te", "mp911de"], - "recommended": true, "active": true }, From 8d1387b190815d918be5e615b7424e71f9d8e6a6 Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Thu, 5 Mar 2015 00:22:44 +0100 Subject: [PATCH 0157/2314] Add note about the count argument --- commands/spop.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/commands/spop.md b/commands/spop.md index cc1b11a490..928468de71 100644 --- a/commands/spop.md +++ b/commands/spop.md @@ -2,6 +2,8 @@ Removes and returns one or more random elements from the set value store at `key This operation is similar to `SRANDMEMBER`, that returns one or more random elements from a set but does not remove it. +The `count` arugment will be available in 3.0 and is not available in 2.6 or 2.8 + @return @bulk-string-reply: the removed element, or `nil` when `key` does not exist. @@ -27,4 +29,4 @@ If count is bigger than the number of elements inside the Set, the command will ## Distribution of returned elements -Note that this command is not suitable when you need a guaranteed uniform distribution of the returned elements. For more information about the algorithms used for SPOP, look up both the Knuth sampling and Floyd sampling algorithms. \ No newline at end of file +Note that this command is not suitable when you need a guaranteed uniform distribution of the returned elements. For more information about the algorithms used for SPOP, look up both the Knuth sampling and Floyd sampling algorithms. From aad9ebad9e89ecfcb670599cb3eea11cbaeb48d8 Mon Sep 17 00:00:00 2001 From: Ed Costello Date: Sat, 7 Mar 2015 20:46:05 -0500 Subject: [PATCH 0158/2314] Copyedits for typos --- commands/spop.md | 2 +- commands/zadd.md | 2 +- topics/admin.md | 2 +- topics/latency.md | 2 +- topics/sentinel-spec.md | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/commands/spop.md b/commands/spop.md index 928468de71..e55d41add4 100644 --- a/commands/spop.md +++ b/commands/spop.md @@ -2,7 +2,7 @@ Removes and returns one or more random elements from the set value store at `key This operation is similar to `SRANDMEMBER`, that returns one or more random elements from a set but does not remove it. -The `count` arugment will be available in 3.0 and is not available in 2.6 or 2.8 +The `count` argument will be available in 3.0 and is not available in 2.6 or 2.8 @return diff --git a/commands/zadd.md b/commands/zadd.md index 22a08127fc..f6dab260e6 100644 --- a/commands/zadd.md +++ b/commands/zadd.md @@ -13,7 +13,7 @@ The score values should be the string representation of a double precision float Range of integer scores that can be expressed precisely --- -Redis sorted sets use a *double 64-bit floating point number* to represent the score. In all the architectures we support, this is represented as an **IEEE 754 floating point number**, that is able to represent precisely integer numbers between `-(2^53)` and `+(2^53)` included. In more practical terms, all the integers between -9007199254740992 and 9007199254740992 are prefectly representable. Larger integers, or fractions, are internally represented in exponential form, so it is possible that you get only an approximation of the decimal number, or of the very big integer, that you set as score. +Redis sorted sets use a *double 64-bit floating point number* to represent the score. In all the architectures we support, this is represented as an **IEEE 754 floating point number**, that is able to represent precisely integer numbers between `-(2^53)` and `+(2^53)` included. In more practical terms, all the integers between -9007199254740992 and 9007199254740992 are perfectly representable. Larger integers, or fractions, are internally represented in exponential form, so it is possible that you get only an approximation of the decimal number, or of the very big integer, that you set as score. Sorted sets 101 --- diff --git a/topics/admin.md b/topics/admin.md index b7657e711a..3ea8fa32d7 100644 --- a/topics/admin.md +++ b/topics/admin.md @@ -23,7 +23,7 @@ Running Redis on EC2 + Use HVM based instances, not PV based instances. + Don't use old instances families, for example: use m3.medium with HVM instead of m1.medium with PV. + The use of Redis persistence with **EC2 EBS volumes** needs to be handled with care since sometimes EBS volumes have high latency characteristics. -+ You may want to try the new **diskless replication** (currently experimetnal) if you have issues when slaves are synchronizing with the master. ++ You may want to try the new **diskless replication** (currently experimental) if you have issues when slaves are synchronizing with the master. Upgrading or restarting a Redis instance without downtime ------------------------------------------------------- diff --git a/topics/latency.md b/topics/latency.md index 03b9d102ca..f1476ec1de 100644 --- a/topics/latency.md +++ b/topics/latency.md @@ -247,7 +247,7 @@ using m3.medium (or better) instances will provide good results. * **Linux VM on EC2, new instance types (Xen)** 1GB RSS forked in 10 milliseconds (10 milliseconds per GB). * **Linux VM on Linode (Xen)** 0.9GBRSS forked into 382 milliseconds (424 milliseconds per GB). -As you can see certanin VM running on Xen have a performance hit that is between one order to two orders of magnitude. For EC2 users the suggestion is simple: use modern HVM based instances. +As you can see certain VM running on Xen have a performance hit that is between one order to two orders of magnitude. For EC2 users the suggestion is simple: use modern HVM based instances. Latency induced by transparent huge pages ----------------------------------------- diff --git a/topics/sentinel-spec.md b/topics/sentinel-spec.md index caeb7c03ab..d260562d9d 100644 --- a/topics/sentinel-spec.md +++ b/topics/sentinel-spec.md @@ -431,7 +431,7 @@ Setup examples Imaginary setup: computer A runs the Redis master. - computer B runs the Reids slave and the client software. + computer B runs the Redis slave and the client software. In this naive configuration it is possible to place a single sentinel, with "minimal agreement" set to the value of one (no acknowledge from other From 98446cb31b79fa8f089e33e14bc93617f061ed51 Mon Sep 17 00:00:00 2001 From: Tianfeng Date: Mon, 9 Mar 2015 14:04:26 +0800 Subject: [PATCH 0159/2314] Update clients.json --- clients.json | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/clients.json b/clients.json index 606c61403d..a085547916 100644 --- a/clients.json +++ b/clients.json @@ -442,6 +442,15 @@ "authors": ["jamescauwelier"], "active": true }, + + { + "name": "redis-async", + "language": "PHP", + "repository": "https://github.com/swoole/redis-async", + "description": "Asynchronous redis client library for PHP.", + "authors": ["matyhtf"], + "active": true + }, { "name": "Yampee Redis", From ccd027cbe95bc66f84fe44a6f00cc1c2d2ad265d Mon Sep 17 00:00:00 2001 From: Stephen McDonald Date: Tue, 10 Mar 2015 07:20:48 +1100 Subject: [PATCH 0160/2314] Fixed error in ZRANGE example. --- commands/zrange.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands/zrange.md b/commands/zrange.md index a5354318fd..f1a842e610 100644 --- a/commands/zrange.md +++ b/commands/zrange.md @@ -43,7 +43,7 @@ ZRANGE myzset 2 3 ZRANGE myzset -2 -1 ``` -The following example using `WITHSCORES` shows how the command returns always an array, but this time, populated with *element_1*, *score_2*, *element_2*, *score_2*, ..., *element_N*, *score_N*. +The following example using `WITHSCORES` shows how the command returns always an array, but this time, populated with *element_1*, *score_1*, *element_2*, *score_2*, ..., *element_N*, *score_N*. ```cli ZRANGE myzset 0 1 WITHSCORES From ee1306dc559f8ae505312219084ce96dd3b4395d Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 12 Mar 2015 14:50:44 +0100 Subject: [PATCH 0161/2314] Note about SPOP optional count in Redis 3.2. --- commands/spop.md | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/commands/spop.md b/commands/spop.md index cc1b11a490..92c81fd796 100644 --- a/commands/spop.md +++ b/commands/spop.md @@ -20,11 +20,14 @@ SPOP myset 3 SMEMBERS myset ``` - ## Specification of the behavior when count is passed If count is bigger than the number of elements inside the Set, the command will only return the whole set without additional elements. ## Distribution of returned elements -Note that this command is not suitable when you need a guaranteed uniform distribution of the returned elements. For more information about the algorithms used for SPOP, look up both the Knuth sampling and Floyd sampling algorithms. \ No newline at end of file +Note that this command is not suitable when you need a guaranteed uniform distribution of the returned elements. For more information about the algorithms used for SPOP, look up both the Knuth sampling and Floyd sampling algorithms. + +## Count argument extension + +Redis 3.2 will be the first version where an optional `count` argument can be passed to `SPOP` in order to retrieve multiple elements in a single call. The implementation is already available in the `unstable` branch. From 24d0d0943e2b86ab7eed3968787c37d381c78af1 Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 12 Mar 2015 14:52:05 +0100 Subject: [PATCH 0162/2314] Move CLUSTER SLOTS to cluster group. --- commands.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands.json b/commands.json index c6145b526e..3cd2e351ad 100644 --- a/commands.json +++ b/commands.json @@ -231,7 +231,7 @@ "summary": "Get array of Cluster slot to node mappings", "complexity": "O(N) where N is the total number of Cluster nodes", "since": "3.0.0", - "group": "server" + "group": "cluster" }, "COMMAND": { "summary": "Get array of Redis command details", From 352977b5c437b79a25b316d54271d2d43e545bad Mon Sep 17 00:00:00 2001 From: zsx Date: Wed, 4 Mar 2015 09:32:47 +0800 Subject: [PATCH 0163/2314] modify client.json --- clients.json | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/clients.json b/clients.json index 08697d4c19..19457df9cc 100644 --- a/clients.json +++ b/clients.json @@ -764,6 +764,16 @@ "authors": ["loopole"] }, + { + "name": "redis-client for C++" + "language": "C++", + "url": "https://github.com/zhengshuxin/acl/tree/master/lib_acl_cpp/samples/redis", + "repository": "https://github.com/zhengshuxin/acl/tree/master/lib_acl_cpp/include/acl_cpp/redis", + "description": "one redis command one redis function, including STRING, HASH, LIST, SET, ZSET, HLL, PUBSUB, TRANSACTION, SCRIPT, CONNECTION, SERVER, CLUSTER", + "authors": [], + "active": true + }, + { "name": "redox", "language": "C++", From f9933d35a6e141525fd12475d6259a9b11962595 Mon Sep 17 00:00:00 2001 From: zsx Date: Wed, 4 Mar 2015 09:37:51 +0800 Subject: [PATCH 0164/2314] add one powerful C++ redis client, support all redis client commands, including redis3.0 cluster --- clients.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clients.json b/clients.json index 19457df9cc..b5dd78c3d4 100644 --- a/clients.json +++ b/clients.json @@ -769,7 +769,7 @@ "language": "C++", "url": "https://github.com/zhengshuxin/acl/tree/master/lib_acl_cpp/samples/redis", "repository": "https://github.com/zhengshuxin/acl/tree/master/lib_acl_cpp/include/acl_cpp/redis", - "description": "one redis command one redis function, including STRING, HASH, LIST, SET, ZSET, HLL, PUBSUB, TRANSACTION, SCRIPT, CONNECTION, SERVER, CLUSTER", + "description": "full redis client commands, one redis command one redis function, including STRING, HASH, LIST, SET, ZSET, HLL, PUBSUB, TRANSACTION, SCRIPT, CONNECTION, SERVER, CLUSTER", "authors": [], "active": true }, From 1d327b8977fcea6f56f479dc16c56abd5ee05f46 Mon Sep 17 00:00:00 2001 From: Michel Martens Date: Wed, 4 Mar 2015 07:29:48 +0000 Subject: [PATCH 0165/2314] Fix build by adding missing comma --- clients.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clients.json b/clients.json index b5dd78c3d4..b54064113c 100644 --- a/clients.json +++ b/clients.json @@ -765,7 +765,7 @@ }, { - "name": "redis-client for C++" + "name": "redis-client for C++", "language": "C++", "url": "https://github.com/zhengshuxin/acl/tree/master/lib_acl_cpp/samples/redis", "repository": "https://github.com/zhengshuxin/acl/tree/master/lib_acl_cpp/include/acl_cpp/redis", From d0316cee738d467bbacd2a452da07a2037cf7b1b Mon Sep 17 00:00:00 2001 From: zensh Date: Sun, 1 Mar 2015 13:57:05 +0800 Subject: [PATCH 0166/2314] fix thunk-redis authors (https://twitter.com/izensh) --- clients.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clients.json b/clients.json index b54064113c..606c61403d 100644 --- a/clients.json +++ b/clients.json @@ -677,7 +677,7 @@ "language": "Node.js", "repository": "https://github.com/thunks/thunk-redis", "description": "A redis client with pipelining, rely on thunks, support promise.", - "authors": ["zensh"], + "authors": ["izensh"], "active": true }, From 9cbec9812b06ef00f94b2c98bc91ea0520ae8228 Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Thu, 5 Mar 2015 00:22:44 +0100 Subject: [PATCH 0167/2314] Add note about the count argument --- commands/spop.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/commands/spop.md b/commands/spop.md index 92c81fd796..9a299a700e 100644 --- a/commands/spop.md +++ b/commands/spop.md @@ -2,6 +2,8 @@ Removes and returns one or more random elements from the set value store at `key This operation is similar to `SRANDMEMBER`, that returns one or more random elements from a set but does not remove it. +The `count` arugment will be available in 3.0 and is not available in 2.6 or 2.8 + @return @bulk-string-reply: the removed element, or `nil` when `key` does not exist. From 4de614f39ed79fc60e2da68f08bbdf8c9210745e Mon Sep 17 00:00:00 2001 From: Ed Costello Date: Sat, 7 Mar 2015 20:46:05 -0500 Subject: [PATCH 0168/2314] Copyedits for typos --- commands/spop.md | 2 +- commands/zadd.md | 2 +- topics/admin.md | 2 +- topics/latency.md | 2 +- topics/sentinel-spec.md | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/commands/spop.md b/commands/spop.md index 9a299a700e..98023611be 100644 --- a/commands/spop.md +++ b/commands/spop.md @@ -2,7 +2,7 @@ Removes and returns one or more random elements from the set value store at `key This operation is similar to `SRANDMEMBER`, that returns one or more random elements from a set but does not remove it. -The `count` arugment will be available in 3.0 and is not available in 2.6 or 2.8 +The `count` argument will be available in 3.0 and is not available in 2.6 or 2.8 @return diff --git a/commands/zadd.md b/commands/zadd.md index 22a08127fc..f6dab260e6 100644 --- a/commands/zadd.md +++ b/commands/zadd.md @@ -13,7 +13,7 @@ The score values should be the string representation of a double precision float Range of integer scores that can be expressed precisely --- -Redis sorted sets use a *double 64-bit floating point number* to represent the score. In all the architectures we support, this is represented as an **IEEE 754 floating point number**, that is able to represent precisely integer numbers between `-(2^53)` and `+(2^53)` included. In more practical terms, all the integers between -9007199254740992 and 9007199254740992 are prefectly representable. Larger integers, or fractions, are internally represented in exponential form, so it is possible that you get only an approximation of the decimal number, or of the very big integer, that you set as score. +Redis sorted sets use a *double 64-bit floating point number* to represent the score. In all the architectures we support, this is represented as an **IEEE 754 floating point number**, that is able to represent precisely integer numbers between `-(2^53)` and `+(2^53)` included. In more practical terms, all the integers between -9007199254740992 and 9007199254740992 are perfectly representable. Larger integers, or fractions, are internally represented in exponential form, so it is possible that you get only an approximation of the decimal number, or of the very big integer, that you set as score. Sorted sets 101 --- diff --git a/topics/admin.md b/topics/admin.md index b7657e711a..3ea8fa32d7 100644 --- a/topics/admin.md +++ b/topics/admin.md @@ -23,7 +23,7 @@ Running Redis on EC2 + Use HVM based instances, not PV based instances. + Don't use old instances families, for example: use m3.medium with HVM instead of m1.medium with PV. + The use of Redis persistence with **EC2 EBS volumes** needs to be handled with care since sometimes EBS volumes have high latency characteristics. -+ You may want to try the new **diskless replication** (currently experimetnal) if you have issues when slaves are synchronizing with the master. ++ You may want to try the new **diskless replication** (currently experimental) if you have issues when slaves are synchronizing with the master. Upgrading or restarting a Redis instance without downtime ------------------------------------------------------- diff --git a/topics/latency.md b/topics/latency.md index 03b9d102ca..f1476ec1de 100644 --- a/topics/latency.md +++ b/topics/latency.md @@ -247,7 +247,7 @@ using m3.medium (or better) instances will provide good results. * **Linux VM on EC2, new instance types (Xen)** 1GB RSS forked in 10 milliseconds (10 milliseconds per GB). * **Linux VM on Linode (Xen)** 0.9GBRSS forked into 382 milliseconds (424 milliseconds per GB). -As you can see certanin VM running on Xen have a performance hit that is between one order to two orders of magnitude. For EC2 users the suggestion is simple: use modern HVM based instances. +As you can see certain VM running on Xen have a performance hit that is between one order to two orders of magnitude. For EC2 users the suggestion is simple: use modern HVM based instances. Latency induced by transparent huge pages ----------------------------------------- diff --git a/topics/sentinel-spec.md b/topics/sentinel-spec.md index caeb7c03ab..d260562d9d 100644 --- a/topics/sentinel-spec.md +++ b/topics/sentinel-spec.md @@ -431,7 +431,7 @@ Setup examples Imaginary setup: computer A runs the Redis master. - computer B runs the Reids slave and the client software. + computer B runs the Redis slave and the client software. In this naive configuration it is possible to place a single sentinel, with "minimal agreement" set to the value of one (no acknowledge from other From 8390fadf51bc48b88b0a0b88d2b4060484071b78 Mon Sep 17 00:00:00 2001 From: Tianfeng Date: Mon, 9 Mar 2015 14:04:26 +0800 Subject: [PATCH 0169/2314] Update clients.json --- clients.json | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/clients.json b/clients.json index 606c61403d..a085547916 100644 --- a/clients.json +++ b/clients.json @@ -442,6 +442,15 @@ "authors": ["jamescauwelier"], "active": true }, + + { + "name": "redis-async", + "language": "PHP", + "repository": "https://github.com/swoole/redis-async", + "description": "Asynchronous redis client library for PHP.", + "authors": ["matyhtf"], + "active": true + }, { "name": "Yampee Redis", From 885113b62fc2dd216c289f10a72cf86dadcb35ca Mon Sep 17 00:00:00 2001 From: Stephen McDonald Date: Tue, 10 Mar 2015 07:20:48 +1100 Subject: [PATCH 0170/2314] Fixed error in ZRANGE example. --- commands/zrange.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands/zrange.md b/commands/zrange.md index a5354318fd..f1a842e610 100644 --- a/commands/zrange.md +++ b/commands/zrange.md @@ -43,7 +43,7 @@ ZRANGE myzset 2 3 ZRANGE myzset -2 -1 ``` -The following example using `WITHSCORES` shows how the command returns always an array, but this time, populated with *element_1*, *score_2*, *element_2*, *score_2*, ..., *element_N*, *score_N*. +The following example using `WITHSCORES` shows how the command returns always an array, but this time, populated with *element_1*, *score_1*, *element_2*, *score_2*, ..., *element_N*, *score_N*. ```cli ZRANGE myzset 0 1 WITHSCORES From ee0adaaa47dc654197b2c6a3b65dafd30295dd91 Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 12 Mar 2015 15:22:46 +0100 Subject: [PATCH 0171/2314] CLUSTER SLOTS, SPOP, doc improved. CLUSTER MEET added. --- commands.json | 6 ++++++ commands/cluster meet.md | 36 ++++++++++++++++++++++++++++++++++++ commands/cluster slots.md | 15 ++++++++++----- 3 files changed, 52 insertions(+), 5 deletions(-) create mode 100644 commands/cluster meet.md diff --git a/commands.json b/commands.json index 3cd2e351ad..bf7c7d1d9d 100644 --- a/commands.json +++ b/commands.json @@ -227,6 +227,12 @@ ], "group": "server" }, + "CLUSTER MEET": { + "summary": "Force a node cluster to handshake with another node", + "complexity": "O(1)", + "since": "3.0.0", + "group": "cluster" + }, "CLUSTER SLOTS": { "summary": "Get array of Cluster slot to node mappings", "complexity": "O(N) where N is the total number of Cluster nodes", diff --git a/commands/cluster meet.md b/commands/cluster meet.md new file mode 100644 index 0000000000..8adca9c5fe --- /dev/null +++ b/commands/cluster meet.md @@ -0,0 +1,36 @@ +`CLUSTER MEET` is used in order to connect different Redis nodes with cluster +support enabled, into a working cluster. + +The basic idea is that nodes by default don't trust each other, and are +considered unknown, so that it is unlikely that different cluster nodes will +mix into a single one because of system administration errors or network +addresses modifications. + +So in order for a given node to accept another one into the list of nodes +composing a Redis Cluster, there are only two ways: + +1. The system administrator sends a `CLUSTER MEET` command to to force a node to meet another one. +2. An already known node sends a list of nodes in the gossip section that we are not aware of. If the receiving node trusts the sending node as a known node, it will process the gossip section and send an handshake to the nodes that are still not known. + +Note that Redis Cluster forms a full mesh, but it is not needed to send as much as `CLUSTER MEET` commands as needed to form the full mesh, because thanks to gossiping the missing links will be created. + +For example if we imagine a cluster formed of the following four nodes called A, B, C and D, we may send just the following set of commands to A: + +1. CLUSTER MEET B-ip B-port +2. CLUSTER MEET C-ip C-port +3. CLUSTER MEET D-ip D-port + +As a side effect of `A` knowing and being known by all the other nodes, it will send gossip sections in the heartbeat packets that will allow each other node to create a link with each other one, forming a full mesh in a matter of seconds, even if the cluster is large. + +## Implementation details: MEET and PING packets + +When a given node receives a `CLUSTER MEET` message, the node specified in the +command still does not know the node we sent the command to. So in order for +the node to force the receiver to accept it as a trusted node, it sends a +`MEET` packet instead of a `PING` packet. The two packets have exactly the +same format, but the former forces the receiver to acknowledge the node as +trusted. + +@return + +@simple-string-reply: `OK` if the command was successful. If the address or port specified are invalid an error is returned. diff --git a/commands/cluster slots.md b/commands/cluster slots.md index e4293fa63c..daa71933d1 100644 --- a/commands/cluster slots.md +++ b/commands/cluster slots.md @@ -1,7 +1,10 @@ -Returns @array-reply of current cluster state. - `CLUSTER SLOTS` returns details about which cluster slots map to which -Redis instances. +Redis instances. The command is suitable to be used by Redis Cluster client +libraries implementations in order to retrieve (or update when a redirection +is received) the map associating cluster *hash slots* with actual nodes +network coordinates (composed of an IP address and a TCP port), so that when +a command is received, it can be sent to what is likely the right instance +for the keys specified in the command. ## Nested Result Array Each nested result is: @@ -25,6 +28,10 @@ If a cluster instance has non-contiguous slots (e.g. 1-400,900,1800-6000) then master and replica IP/Port results will be duplicated for each top-level slot range reply. +@return + +@array-reply: nested list of slot ranges with IP/Port mappings. + ### Sample Output ``` 127.0.0.1:7001> cluster slots @@ -54,6 +61,4 @@ slot range reply. 2) (integer) 7006 ``` -@return -@array-reply: nested list of slot ranges with IP/Port mappings. From b3907fd61d5089eb007b3507eb8642fa00bfb8c5 Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 12 Mar 2015 15:44:01 +0100 Subject: [PATCH 0172/2314] CLUSTER MEET typo fixed. --- commands/cluster meet.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands/cluster meet.md b/commands/cluster meet.md index 8adca9c5fe..4ee1c04531 100644 --- a/commands/cluster meet.md +++ b/commands/cluster meet.md @@ -9,7 +9,7 @@ addresses modifications. So in order for a given node to accept another one into the list of nodes composing a Redis Cluster, there are only two ways: -1. The system administrator sends a `CLUSTER MEET` command to to force a node to meet another one. +1. The system administrator sends a `CLUSTER MEET` command to force a node to meet another one. 2. An already known node sends a list of nodes in the gossip section that we are not aware of. If the receiving node trusts the sending node as a known node, it will process the gossip section and send an handshake to the nodes that are still not known. Note that Redis Cluster forms a full mesh, but it is not needed to send as much as `CLUSTER MEET` commands as needed to form the full mesh, because thanks to gossiping the missing links will be created. From e6a40d6ce9689af47346093538c1cd5519e78b60 Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 12 Mar 2015 15:46:35 +0100 Subject: [PATCH 0173/2314] commands.json: CLUSTER MEET args added. --- commands.json | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/commands.json b/commands.json index bf7c7d1d9d..4b7b4ef705 100644 --- a/commands.json +++ b/commands.json @@ -230,6 +230,16 @@ "CLUSTER MEET": { "summary": "Force a node cluster to handshake with another node", "complexity": "O(1)", + "arguments": [ + { + "name": "ip", + "type": "string" + }, + { + "name": "port", + "type": "integer" + } + ], "since": "3.0.0", "group": "cluster" }, From 01381c1218f0319ce505d1a7b9b88a7a0db69405 Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 12 Mar 2015 15:53:06 +0100 Subject: [PATCH 0174/2314] Clearer CLUSTER MEET description, hopefully. --- commands/cluster meet.md | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/commands/cluster meet.md b/commands/cluster meet.md index 4ee1c04531..aaf574ef6e 100644 --- a/commands/cluster meet.md +++ b/commands/cluster meet.md @@ -12,9 +12,11 @@ composing a Redis Cluster, there are only two ways: 1. The system administrator sends a `CLUSTER MEET` command to force a node to meet another one. 2. An already known node sends a list of nodes in the gossip section that we are not aware of. If the receiving node trusts the sending node as a known node, it will process the gossip section and send an handshake to the nodes that are still not known. -Note that Redis Cluster forms a full mesh, but it is not needed to send as much as `CLUSTER MEET` commands as needed to form the full mesh, because thanks to gossiping the missing links will be created. +Note that Redis Cluster needs to form a full mesh (each node is connected with each other node), but in order to create a cluster, there is no need to send all the `CLUSTER MEET` commands needed to form the full mesh. What matter is to send enough `CLUSTER MEET` messages so that each node can reach each other node through a *chain of known nodes*. Thanks to the exchange of gossip informations in heartbeat packets, the missing links will be created. -For example if we imagine a cluster formed of the following four nodes called A, B, C and D, we may send just the following set of commands to A: +So, if we link node A with node B via `CLUSTER MEET`, and B with C, A and C will find their ways to handshake and create a link. + +Another example: if we imagine a cluster formed of the following four nodes called A, B, C and D, we may send just the following set of commands to A: 1. CLUSTER MEET B-ip B-port 2. CLUSTER MEET C-ip C-port @@ -22,6 +24,8 @@ For example if we imagine a cluster formed of the following four nodes called A, As a side effect of `A` knowing and being known by all the other nodes, it will send gossip sections in the heartbeat packets that will allow each other node to create a link with each other one, forming a full mesh in a matter of seconds, even if the cluster is large. +Moreover `CLUSTER MEET` does not need to be reciprocal. If I send the command to A in order to join B, I don't need to also send it to B in order to join A. + ## Implementation details: MEET and PING packets When a given node receives a `CLUSTER MEET` message, the node specified in the From 98fcfee74651f2bee4aa1aecb955d7be8bb49487 Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 12 Mar 2015 18:04:15 +0100 Subject: [PATCH 0175/2314] Redis Cluster tutorial updated. --- topics/cluster-tutorial.md | 143 +++++++++++++++++++++++++++---------- 1 file changed, 105 insertions(+), 38 deletions(-) diff --git a/topics/cluster-tutorial.md b/topics/cluster-tutorial.md index e0fd21ce3b..739af18f14 100644 --- a/topics/cluster-tutorial.md +++ b/topics/cluster-tutorial.md @@ -8,14 +8,16 @@ going into the details that are covered in the [Redis Cluster specification](/topics/cluster-spec) but just describing how the system behaves from the point of view of the user. -Note this tutorial requires Redis version 3.0 or higher. +However this tutorial tries to provide informations about the availability +and consistency characteristics of Redis Cluster from the point of view +of the final user, state in a simple to understand way. -Note that if you plan to run a serious Redis Cluster deployment, the -more formal specification is a highly suggested reading. +Note this tutorial requires Redis version 3.0 or higher. -**Redis cluster is currently alpha quality code**, please get in touch in the -Redis mailing list or open an issue in the Redis GitHub repository if you -find any issue. +If you plan to run a serious Redis Cluster deployment, the +more formal specification is a suggested reading, even if not +strictly required. However it is a good idea to start from this document, +play with Redis Cluster some time, and only later read the specification. Redis Cluster 101 --- @@ -23,19 +25,16 @@ Redis Cluster 101 Redis Cluster provides a way to run a Redis installation where data is **automatically sharded across multiple Redis nodes**. -Commands dealing with multiple keys are not supported by the cluster, because -this would require moving data between Redis nodes, making Redis Cluster -not able to provide Redis-alike performances and predictable behavior -under load. - Redis Cluster also provides **some degree of availability during partitions**, that is in practical terms the ability to continue the operations when -some nodes fail or are not able to communicate. +some nodes fail or are not able to communicate. However the cluster stops +to operate in the event of larger failures (for example when the majority of +masters are unavailable). So in practical terms, what you get with Redis Cluster? -* The ability to automatically split your dataset among multiple nodes. -* The ability to continue operations when a subset of the nodes are experiencing failures or are unable to communicate with the rest of the cluster. +* The ability to **automatically split your dataset among multiple nodes**. +* The ability to **continue operations when a subset of the nodes are experiencing failures** or are unable to communicate with the rest of the cluster. Redis Cluster TCP ports --- @@ -61,6 +60,10 @@ Note that for a Redis Cluster to work properly you need, for each node: If you don't open both TCP ports, your cluster will not work as expected. +The cluster bus uses a different, binary protocol, for node to node data +exchange, which is more suited to exchange information between nodes using +little bandwidth and processing time. + Redis Cluster data sharding --- @@ -88,13 +91,24 @@ Because moving hash slots from a node to another does not require to stop operations, adding and removing nodes, or changing the percentage of hash slots hold by nodes, does not require any downtime. +Redis Cluster supports multiple key operations as long as all the keys involved +into a single command execution (or whole transaction, or Lua script +execution) all belong to the same hash slot. The user can force multiple keys +to be part of the same hash slot by using a concept called *hash tags*. + +Hash tags are documented in the Redis Cluster specification, but the gist is +that if there is a substring between {} brackets in a key, only what is +inside the string is hashed, so fo example `this{foo}key` and `another{foo}key` +are guaranteed to be in the same hash slot, and can be used together in a +command with multiple keys as arguments. + Redis Cluster master-slave model --- -In order to remain available when a subset of nodes are failing or are not able -to communicate with the majority of nodes, Redis Cluster uses a master-slave -model where every node has from 1 (the master itself) to N replicas (N-1 -additional slaves). +In order to remain available when a subset of master nodes are failing or are +not able to communicate with the majority of nodes, Redis Cluster uses a +master-slave model where every hash slot has from 1 (the master itself) to N +replicas (N-1 additional slaves nodes). In our example cluster with nodes A, B, C, if node B fails the cluster is not able to continue, since we no longer have a way to serve hash slots in the @@ -102,11 +116,11 @@ range 5501-11000. However if when the cluster is created (or at a latter time) we add a slave node to every master, so that the final cluster is composed of A, B, C -that are masters, and A1, B1, C1 that are slaves, the system is able to -continue if node B fails. +that are masters nodes, and A1, B1, C1 that are slaves nodes, the system is +able to continue if node B fails. -Node B1 replicates B, so the cluster will elect node B1 as the new master -and will continue to operate correctly. +Node B1 replicates B, and B fails, the cluster will promote node B1 as the new +master and will continue to operate correctly. However note that if nodes B and B1 fail at the same time Redis Cluster is not able to continue to operate. @@ -116,7 +130,7 @@ Redis Cluster consistency guarantees Redis Cluster is not able to guarantee **strong consistency**. In practical terms this means that under certain conditions it is possible that Redis -Cluster will forget a write that was acknowledged by the system. +Cluster will lose writes that were acknowledged by the system to the client. The first reason why Redis Cluster can lose writes is because it uses asynchronous replication. This means that during writes the following @@ -130,7 +144,8 @@ As you can see B does not wait for an acknowledge from B1, B2, B3 before replying to the client, since this would be a prohibitive latency penalty for Redis, so if your client writes something, B acknowledges the write, but crashes before being able to send the write to its slaves, one of the -slaves can be promoted to master losing the write forever. +slaves (that did not received the write) can be promoted to master, losing +the write forever. This is **very similar to what happens** with most databases that are configured to flush data to disk every second, so it is a scenario you @@ -138,16 +153,21 @@ are already able to reason about because of past experiences with traditional database systems not involving distributed systems. Similarly you can improve consistency by forcing the database to flush data on disk before replying to the client, but this usually results into prohibitively low -performances. +performances. That would be the equivalent of synchronous replication in +the case of Redis Cluster. Basically there is a trade-off to take between performances and consistency. -Note: Redis Cluster in the future will allow users to perform synchronous -writes when absolutely needed. +Redis Cluster has support for synchronous writes when absolutely needed, +implemented via the `WAIT` command, this makes losing writes a lot less +likely, however note that Redis Cluster does not implement strong consistency +even when synchronous replication is used: it is always possible under more +complex failure scenarios that a slave that was not able to receive the write +is elected as master. -There is another scenario where Redis Cluster will lose writes, that happens -during a network partition where a client is isolated with a minority of -instances including at least a master. +There is another notable scenario where Redis Cluster will lose writes, that +happens during a network partition where a client is isolated with a minority +of instances including at least a master. Take as an example our 6 nodes cluster composed of A, B, C, A1, B1, C1, with 3 masters and 3 slaves. There is also a client, that we will call Z1. @@ -161,7 +181,7 @@ However if the partition lasts enough time for B1 to be promoted to master in the majority side of the partition, the writes that Z1 is sending to B will be lost. -Note that there is a maximum window to the amount of writes Z1 will be able +Note that there is a **maximum window** to the amount of writes Z1 will be able to send to B: if enough time has elapsed for the majority side of the partition to elect a slave as master, every master node in the minority side stops accepting writes. @@ -178,6 +198,10 @@ and stops accepting writes. Creating and using a Redis Cluster === +Note: to deploy a Redis Cluster manually is **very important to learn** certain +operation aspects of it. However if you want to get a cluster up and running +ASAP skip this section and the next one and go directly to **Creating a Redis Cluster using the create-custer script**. + To create a cluster, the first thing we need is to have a few empty Redis instances running in **cluster mode**. This basically means that clusters are not created using normal Redis instances, but a special mode @@ -275,6 +299,33 @@ you'll see a message like that: This means that there is at least a master instance serving each of the 16384 slots available. +Creating a Redis Cluster using the create-custer script +--- + +If you don't want to create a Redis Cluster by configuring and executing +individual instances manually as explained above, there is a much simpler +system (but you'll not learn the same amount of operational details). + +Just check `utils/create-cluster` directory in the Redis distribution. +There is a script called `create-cluster` inside (same name as the directory +it is contained into), it's a simple bash script. In order to start +a 6 nodes cluster with 3 masters and 3 slaves just type the following +commands: + +1. `create-cluster start` +2. `create-cluster create` + +Reply to `yes` in step 2 when the `redis-trib` utility wants you to accept +the cluster layout. + +You can now interact with the cluster, the first node will start at port 30000 +by default. When you are done, stop the cluster with: + +3. `create-cluster stop`. + +Please read the `README` inside this directory for more information on how +to run the script. + Playing with the cluster --- @@ -310,6 +361,9 @@ redis 127.0.0.1:7000> get hello "world" ``` +**Note:** if you craeted the cluster using the script your nodes may listen +to different ports, starting from 30000 by default. + The redis-cli cluster support is very basic so it always uses the fact that Redis Cluster nodes are able to redirect a client to the right node. A serious client is able to do better than that, and cache the map between @@ -497,26 +551,39 @@ the following command: All the slots will be covered as usually, but this time the master at 127.0.0.1:7000 will have more hash slots, something around 6461. +Scripting a resharding operation +--- + +Reshardings can be performed automatically without the need to manually +enter the parameters in an interactive way. This is possible using a command +line like the following: + + ./redis-trib.rb reshard : --from --to --slots --yes + +This allows to build some automatism if you are likely to reshard often +however currently, there is no way for `redis-trib` to automatically +rebalance the cluster checking the distribution of keys across the cluster +nodes and intelligently moving slots as needed. This feature will be added +in the future. + A more interesting example application --- -So far so good, but the example application we used is not very good. -It writes simply to the cluster without ever checking if what was +The example application we wrote early is not very good. +It writes to the cluster in a simple way without even checking if what was written is the right thing. From our point of view the cluster receiving the writes could just always write the key `foo` to `42` to every operation, and we would not notice at all. -So in the reids-rb-cluster repository, there is a more interesting application -that is called `consistency-test.rb`. It is a much more interesting application -as it uses a set of counters, by default 1000, and sends `INCR` commands -in order to increment the counters. +So in the `redis-rb-cluster` repository, there is a more interesting application +that is called `consistency-test.rb`. It uses a set of counters, by default 1000, and sends `INCR` commands in order to increment the counters. However instead of just writing, the application does two additional things: * When a counter is updated using `INCR`, the application remembers the write. -* It also reads a random counter before every write, and check if the value is what it expected it to be, comparing it with the value it has in memory. +* It also reads a random counter before every write, and check if the value is what we expected it to be, comparing it with the value it has in memory. What this means is that this application is a simple **consistency checker**, and is able to tell you if the cluster lost some write, or if it accepted From 9939d28ba8edff80506abf1808c32e24a07d3f11 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 13 Mar 2015 10:14:48 +0100 Subject: [PATCH 0176/2314] Cluster tutorial: wrong port number fixed. --- topics/cluster-tutorial.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/topics/cluster-tutorial.md b/topics/cluster-tutorial.md index 739af18f14..0a4ed7f00d 100644 --- a/topics/cluster-tutorial.md +++ b/topics/cluster-tutorial.md @@ -318,7 +318,7 @@ commands: Reply to `yes` in step 2 when the `redis-trib` utility wants you to accept the cluster layout. -You can now interact with the cluster, the first node will start at port 30000 +You can now interact with the cluster, the first node will start at port 30001 by default. When you are done, stop the cluster with: 3. `create-cluster stop`. @@ -362,7 +362,7 @@ redis 127.0.0.1:7000> get hello ``` **Note:** if you craeted the cluster using the script your nodes may listen -to different ports, starting from 30000 by default. +to different ports, starting from 30001 by default. The redis-cli cluster support is very basic so it always uses the fact that Redis Cluster nodes are able to redirect a client to the right node. From f5470e93cce0de04a0f4e8e85e41a2818b2f7de1 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 13 Mar 2015 10:40:50 +0100 Subject: [PATCH 0177/2314] CLUSTER NODES documented. --- commands.json | 6 ++++ commands/cluster nodes.md | 59 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 65 insertions(+) create mode 100644 commands/cluster nodes.md diff --git a/commands.json b/commands.json index 4b7b4ef705..d532a427c0 100644 --- a/commands.json +++ b/commands.json @@ -243,6 +243,12 @@ "since": "3.0.0", "group": "cluster" }, + "CLUSTER NODES": { + "summary": "Get Cluster config for the node", + "complexity": "O(N) where N is the total number of Cluster nodes", + "since": "3.0.0", + "group": "cluster" + }, "CLUSTER SLOTS": { "summary": "Get array of Cluster slot to node mappings", "complexity": "O(N) where N is the total number of Cluster nodes", diff --git a/commands/cluster nodes.md b/commands/cluster nodes.md new file mode 100644 index 0000000000..44cb52fec0 --- /dev/null +++ b/commands/cluster nodes.md @@ -0,0 +1,59 @@ +Each node in a Redis Cluster has its view of the current cluster configuration, +given by the set of known nodes, the state of the connection we have with such +nodes, their flags, properties and assigned slots, and so forth. + +`CLUSTER NODES` provides all this information, that is, the current cluster +configuration of the node we are contacting, in a serialization format which +happens to be exactly the same as the one used by Redis Cluster itself in +order to store on disk the cluster state (however the on disk cluster state +has a few additional info appended at the end). + +Note that normally clients willing to fetch the map between Cluster +hash slots and node addresses should use `CLUSTER SLOTS` instead. +`CLUSTER NODES`, that provides more information, should be used for +administrative tasks, debugging, and configuration inspections. +It is also used by `redis-trib` in order to manage a cluster. + +## Serialization format + +The output of the command is just a space-separated CSV string, where +each line represents a node in the cluster. The following is an example +of output: + + 07c37dfeb235213a872192d90877d0cd55635b91 127.0.0.1:30004 slave e7d1eecce10fd6bb5eb35b9f99a514335d9ba9ca 0 1426238317239 4 connected + 67ed2db8d677e59ec4a4cefb06858cf2a1a89fa1 127.0.0.1:30002 master - 0 1426238316232 2 connected 5461-10922 + 292f8b365bb7edb5e285caf0b7e6ddc7265d2f4f 127.0.0.1:30003 master - 0 1426238318243 3 connected 10923-16383 + 6ec23923021cf3ffec47632106199cb7f496ce01 127.0.0.1:30005 slave 67ed2db8d677e59ec4a4cefb06858cf2a1a89fa1 0 1426238316232 5 connected + 824fe116063bc5fcf9f4ffd895bc17aee7731ac3 127.0.0.1:30006 slave 292f8b365bb7edb5e285caf0b7e6ddc7265d2f4f 0 1426238317741 6 connected + e7d1eecce10fd6bb5eb35b9f99a514335d9ba9ca 127.0.0.1:30001 myself,master - 0 0 1 connected 0-5460 + +Each line is composed of the following fields: + + ... + +The meaning of each filed is the following: + +1. **id** The node ID, a 40 characters random string generated when a node is created and never changed again (unless `CLUSTER RESET HARD` is used). +2. **ip:port** The node address where clients should contact the node to run queries. +3. **flags** A list of comma separated flags: `myself`, `master`, `slave`, `fail?`, `fail`, `handshake`, `noaddr`, `noflags`. Flags are explained in detail in the next section. +4. **master** If the node is a slave, and the master is known, the master node ID, oterwise the "-" character. +5. **ping-sent** Milliseconds unix time at which the currently active ping was sent, or zero if there are no pending pings. +6. **pong-recv** Milliseconds unix time the last pong was received. +7. **config-epoch** The configuration epoch (or version) of the current node (or of the current master if the node is a slave). Each time there is a failover, a new, unique, monotonically increasing configuration epoch is created. If multiple nodes claim to serve the same hash slots, the one with higher configuration epoc wins. +8. **link-state** The state of the link used for the node-to-node cluster bus. We use this link to communicate with the node. Can be `connected` or `disconnected`. +9. **slot** An hash slot number or range. Starting from argument number 9, but there may be up to 16384 entries in total (limit never reached). This is the list of hash slots served by this node. If the entry is just a number, is parsed as such. If it is a range, it is in the form `start-end`, and means that the node is responsible for all the hash slots from `start` to `end` including the start and end values. + +Meaning of the flags (field number 3): + +* `myself` the node you are contacting. +* `master` node is a master. +* `slave` node is a slave. +* `fail?` node is in PFAIL state. Not reachable for the node you are contacting, but still logically reachable (not in FAIL state). +* `fail` node is in FAIL state. It was not reachable for multiple nodes that promoted the PFAIL state to FAIL. +* `handshake` untrusted node, we are handshaking. +* `noaddr` No address known for this node. +* `noflags` No flags at all. + +@return + +@bulk-string-reply: The serialized cluster configuration. From e14c127d926e30461f705bce825f765c47a4cf03 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 13 Mar 2015 10:44:08 +0100 Subject: [PATCH 0178/2314] CLUSTER NODES markdown fixes. --- commands/cluster nodes.md | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/commands/cluster nodes.md b/commands/cluster nodes.md index 44cb52fec0..49113e5c0f 100644 --- a/commands/cluster nodes.md +++ b/commands/cluster nodes.md @@ -18,18 +18,24 @@ It is also used by `redis-trib` in order to manage a cluster. The output of the command is just a space-separated CSV string, where each line represents a node in the cluster. The following is an example -of output: +of output (there are no blank newlines between output lines in the real +output): - 07c37dfeb235213a872192d90877d0cd55635b91 127.0.0.1:30004 slave e7d1eecce10fd6bb5eb35b9f99a514335d9ba9ca 0 1426238317239 4 connected - 67ed2db8d677e59ec4a4cefb06858cf2a1a89fa1 127.0.0.1:30002 master - 0 1426238316232 2 connected 5461-10922 - 292f8b365bb7edb5e285caf0b7e6ddc7265d2f4f 127.0.0.1:30003 master - 0 1426238318243 3 connected 10923-16383 - 6ec23923021cf3ffec47632106199cb7f496ce01 127.0.0.1:30005 slave 67ed2db8d677e59ec4a4cefb06858cf2a1a89fa1 0 1426238316232 5 connected - 824fe116063bc5fcf9f4ffd895bc17aee7731ac3 127.0.0.1:30006 slave 292f8b365bb7edb5e285caf0b7e6ddc7265d2f4f 0 1426238317741 6 connected - e7d1eecce10fd6bb5eb35b9f99a514335d9ba9ca 127.0.0.1:30001 myself,master - 0 0 1 connected 0-5460 +07c37dfeb235213a872192d90877d0cd55635b91 127.0.0.1:30004 slave e7d1eecce10fd6bb5eb35b9f99a514335d9ba9ca 0 1426238317239 4 connected + +67ed2db8d677e59ec4a4cefb06858cf2a1a89fa1 127.0.0.1:30002 master - 0 1426238316232 2 connected 5461-10922 + +292f8b365bb7edb5e285caf0b7e6ddc7265d2f4f 127.0.0.1:30003 master - 0 1426238318243 3 connected 10923-16383 + +6ec23923021cf3ffec47632106199cb7f496ce01 127.0.0.1:30005 slave 67ed2db8d677e59ec4a4cefb06858cf2a1a89fa1 0 1426238316232 5 connected + +824fe116063bc5fcf9f4ffd895bc17aee7731ac3 127.0.0.1:30006 slave 292f8b365bb7edb5e285caf0b7e6ddc7265d2f4f 0 1426238317741 6 connected + +e7d1eecce10fd6bb5eb35b9f99a514335d9ba9ca 127.0.0.1:30001 myself,master - 0 0 1 connected 0-5460 Each line is composed of the following fields: - ... +`` `` `` `` `` `` `` `` `` `` `...` `` The meaning of each filed is the following: From 0d94733868974d3f8f3413c5fc1e4bc3fdb329c1 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 13 Mar 2015 10:46:17 +0100 Subject: [PATCH 0179/2314] More layout fixes for CLUSTER NODES page. --- commands/cluster nodes.md | 22 ++++++++-------------- 1 file changed, 8 insertions(+), 14 deletions(-) diff --git a/commands/cluster nodes.md b/commands/cluster nodes.md index 49113e5c0f..470d1d8e9b 100644 --- a/commands/cluster nodes.md +++ b/commands/cluster nodes.md @@ -18,20 +18,14 @@ It is also used by `redis-trib` in order to manage a cluster. The output of the command is just a space-separated CSV string, where each line represents a node in the cluster. The following is an example -of output (there are no blank newlines between output lines in the real -output): - -07c37dfeb235213a872192d90877d0cd55635b91 127.0.0.1:30004 slave e7d1eecce10fd6bb5eb35b9f99a514335d9ba9ca 0 1426238317239 4 connected - -67ed2db8d677e59ec4a4cefb06858cf2a1a89fa1 127.0.0.1:30002 master - 0 1426238316232 2 connected 5461-10922 - -292f8b365bb7edb5e285caf0b7e6ddc7265d2f4f 127.0.0.1:30003 master - 0 1426238318243 3 connected 10923-16383 - -6ec23923021cf3ffec47632106199cb7f496ce01 127.0.0.1:30005 slave 67ed2db8d677e59ec4a4cefb06858cf2a1a89fa1 0 1426238316232 5 connected - -824fe116063bc5fcf9f4ffd895bc17aee7731ac3 127.0.0.1:30006 slave 292f8b365bb7edb5e285caf0b7e6ddc7265d2f4f 0 1426238317741 6 connected - -e7d1eecce10fd6bb5eb35b9f99a514335d9ba9ca 127.0.0.1:30001 myself,master - 0 0 1 connected 0-5460 +of output: + +* 07c37dfeb235213a872192d90877d0cd55635b91 127.0.0.1:30004 slave e7d1eecce10fd6bb5eb35b9f99a514335d9ba9ca 0 1426238317239 4 connected +* 67ed2db8d677e59ec4a4cefb06858cf2a1a89fa1 127.0.0.1:30002 master - 0 1426238316232 2 connected 5461-10922 +* 292f8b365bb7edb5e285caf0b7e6ddc7265d2f4f 127.0.0.1:30003 master - 0 1426238318243 3 connected 10923-16383 +* 6ec23923021cf3ffec47632106199cb7f496ce01 127.0.0.1:30005 slave 67ed2db8d677e59ec4a4cefb06858cf2a1a89fa1 0 1426238316232 5 connected +* 824fe116063bc5fcf9f4ffd895bc17aee7731ac3 127.0.0.1:30006 slave 292f8b365bb7edb5e285caf0b7e6ddc7265d2f4f 0 1426238317741 6 connected +* e7d1eecce10fd6bb5eb35b9f99a514335d9ba9ca 127.0.0.1:30001 myself,master - 0 0 1 connected 0-5460 Each line is composed of the following fields: From 70781aefc48aea24eaaad9b87ff3f0eb2ea2c57f Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 13 Mar 2015 10:52:33 +0100 Subject: [PATCH 0180/2314] Clearer flags doc in CLUSTER NODE. --- commands/cluster nodes.md | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/commands/cluster nodes.md b/commands/cluster nodes.md index 470d1d8e9b..758bd57533 100644 --- a/commands/cluster nodes.md +++ b/commands/cluster nodes.md @@ -45,14 +45,14 @@ The meaning of each filed is the following: Meaning of the flags (field number 3): -* `myself` the node you are contacting. -* `master` node is a master. -* `slave` node is a slave. -* `fail?` node is in PFAIL state. Not reachable for the node you are contacting, but still logically reachable (not in FAIL state). -* `fail` node is in FAIL state. It was not reachable for multiple nodes that promoted the PFAIL state to FAIL. -* `handshake` untrusted node, we are handshaking. -* `noaddr` No address known for this node. -* `noflags` No flags at all. +* **myself** The node you are contacting. +* **master** Node is a master. +* **slave** Node is a slave. +* **fail?** Node is in PFAIL state. Not reachable for the node you are contacting, but still logically reachable (not in FAIL state). +* **fail** Node is in FAIL state. It was not reachable for multiple nodes that promoted the PFAIL state to FAIL. +* **handshake** Untrusted node, we are handshaking. +* **noaddr** No address known for this node. +* **noflags** No flags at all. @return From d61b561236cf0bfbd317456e2a96643266785233 Mon Sep 17 00:00:00 2001 From: zensh Date: Mon, 9 Mar 2015 23:51:44 +0800 Subject: [PATCH 0181/2314] update thunk-redis --- clients.json | 2 +- topics/cluster-tutorial.md | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/clients.json b/clients.json index a085547916..ebf9edf6cb 100644 --- a/clients.json +++ b/clients.json @@ -685,7 +685,7 @@ "name": "thunk-redis", "language": "Node.js", "repository": "https://github.com/thunks/thunk-redis", - "description": "A redis client with pipelining, rely on thunks, support promise.", + "description": "A thunk/promise-based redis client with pipelining and cluster.", "authors": ["izensh"], "active": true }, diff --git a/topics/cluster-tutorial.md b/topics/cluster-tutorial.md index 0a4ed7f00d..2f4bc52d51 100644 --- a/topics/cluster-tutorial.md +++ b/topics/cluster-tutorial.md @@ -339,6 +339,7 @@ I'm aware of the following implementations: * The popular [Predis](https://github.com/nrk/predis) has support for Redis Cluster, the support was recently updated and is in active development. * The most used Java client, [Jedis](https://github.com/xetorthio/jedis) recently added support for Redis Cluster, see the *Jedis Cluster* section in the project README. * [StackExchange.Redis](https://github.com/StackExchange/StackExchange.Redis) offers support for C# (and should work fine with most .NET languages; VB, F#, etc) +* [thunk-redis](https://github.com/thunks/thunk-redis) offers support for Node.js and io.js, it is a thunk/promise-based redis client with pipelining and cluster. * The `redis-cli` utility in the unstable branch of the Redis repository at GitHub implements a very basic cluster support when started with the `-c` switch. An easy way to test Redis Cluster is either to try and of the above clients From d594ffa87d4ea4b82337639255e49036f7ba71c3 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 13 Mar 2015 18:14:00 +0100 Subject: [PATCH 0182/2314] CLUSTER NODES: info about migrating/importing slots. --- commands/cluster nodes.md | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/commands/cluster nodes.md b/commands/cluster nodes.md index 758bd57533..5c31209a30 100644 --- a/commands/cluster nodes.md +++ b/commands/cluster nodes.md @@ -54,6 +54,40 @@ Meaning of the flags (field number 3): * **noaddr** No address known for this node. * **noflags** No flags at all. +## Special slot entries + +Normally hash slots associated to a given node are in one of the following formats, +as already explained above: + +1. Single number: 3894 +2. Range: 3900-4000 + +However node hash slots can be in a special state, used in order to communicate errors after a node restart (mismatch between the keys in the AOF/RDB file, and the node hash slots configuration), or when there is a resharding operation in progress. This two states are **importing** and **migrating**. + +The meaning of the two states is explained in the Redis Specification, howoever the gist of the two states is the following: + +* **Importing** slots are yet not part of the nodes hash slot, there is a migration in progress. The node will accept queries about these slots only if the `ASK` command is used. +* **Migrating** slots are assigned to the node, but but are being migrated to some other node. The node will accept queries if all the keys in the command exist already, otherwise it will emit what is called an **ASK redirection**, to force new keys creation directly in the importing node. + +Importing and migrating slots are emitted in the `CLUSTER NODES` output as follows: + +* **Importing slot:** `[slot_number-<-importing_from_node_id]` +* **Migarting slot:** `[slot_number->-migrating_to_node_id]` + +The following are a few examples of importing and migrating slots: + +* `[93-<-292f8b365bb7edb5e285caf0b7e6ddc7265d2f4f]` +* `[1002-<-67ed2db8d677e59ec4a4cefb06858cf2a1a89fa1]` +* `[77->-e7d1eecce10fd6bb5eb35b9f99a514335d9ba9ca]` +* `[16311->-292f8b365bb7edb5e285caf0b7e6ddc7265d2f4f]` + +Note that the format does not have any space, so `CLUSTER NODES` output format is plain CSV with space as separator even when this special slots are emitted. However a complete parser for the format should be able to handle them. + +Note that: + +1. Migration and importing slots are only added to the node flagged as **myself**. This information is local to a node, for its own slots. +2. Importing and migrating slots are provided as **additional info**. If the node has a given hash slot assigned, it will be also a plain number in the list of hash slots, so clients that don't have a clue about hash slots migrations can just skip this special fields. + @return @bulk-string-reply: The serialized cluster configuration. From b509c7c2d0f467255ada51ea4fa76e6337d6a0e9 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 13 Mar 2015 22:38:21 +0100 Subject: [PATCH 0183/2314] CLUSTER ADDSLOTS documented. --- commands.json | 13 +++++++++ commands/cluster addslots.md | 51 ++++++++++++++++++++++++++++++++++++ 2 files changed, 64 insertions(+) create mode 100644 commands/cluster addslots.md diff --git a/commands.json b/commands.json index d532a427c0..dd47f08084 100644 --- a/commands.json +++ b/commands.json @@ -227,6 +227,19 @@ ], "group": "server" }, + "CLUSTER ADDSLOTS": { + "summary": "Assign new hash slots to receiving node", + "complexity": "O(N) where N is the total number of hash slot arguments", + "arguments": [ + { + "name": "slot", + "type": "integer", + "multiple": true + } + ], + "since": "3.0.0", + "group": "cluster" + }, "CLUSTER MEET": { "summary": "Force a node cluster to handshake with another node", "complexity": "O(1)", diff --git a/commands/cluster addslots.md b/commands/cluster addslots.md new file mode 100644 index 0000000000..45d91d0b5e --- /dev/null +++ b/commands/cluster addslots.md @@ -0,0 +1,51 @@ +This command is useful in order to modify a node's view of the cluster +configuration. Specifically it assigns a set of hash slots to the node +receiving the command. If the command is successful, the node will map +the specified hash slots to itself, and will start broadcasting the new +configuration. + +However note that: + +1. The command only works if all the specified slots are, from the point of view of the node receiving the command, currently not assigned. A node will refuse to take ownership for slots that already belog to some other node (including itself). +2. The command fails if the same slot is specified multiple times. +3. As a side effect of the command execution, if a slot among the ones specified as argument is set as `importing`, this state gets cleared once the node assigns the (previously unbound) slot to itself. + +## Example + +For example the following command assigns slots 1 2 3 to the node receiving +the command: + + > ADDSLOTS 1 2 3 + OK + +However trying to execute it again results into an error since the slots +are already assigned: + + > ADDSLOTS 1 2 3 + ERR Slot 1 is already busy + +## Usage in Redis Cluster + +This command only works in cluster mode and is useful in the following +Redis Cluster operations: + +1. To create a new cluster ADDSLOTS is used in order to initially setup master nodes splitting the available hash slots among them. +2. In order to fix a broken cluster where certain slots are unassigned. + +## Information about slots propagation and warnings + +Note that once a node assigns a set of slots to itself, it will start +propagating this information in hearthbeats packets headers. However the +other nodes will accept the information only if they have the slot as +not already bound with another node, or if the configuration epoch of the +node advertising the new hash slot, is greater than the node currently listed +in the table. + +This means that this command should be used with care only by applications +orchestrating Redis Cluster, like `redis-trib`, and the command if used +out of the right context can leave the cluster in a wrong state or cause +data loss. + +@return + +@simple-string-reply: `OK` if the command was successful. Otheriwse an error is returned. From 626ff3ba5a693b713da7bad865e536d75597b7b0 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 13 Mar 2015 22:39:49 +0100 Subject: [PATCH 0184/2314] Fix ADDSLOTS example. --- commands/cluster addslots.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/commands/cluster addslots.md b/commands/cluster addslots.md index 45d91d0b5e..dee51d6d19 100644 --- a/commands/cluster addslots.md +++ b/commands/cluster addslots.md @@ -15,13 +15,13 @@ However note that: For example the following command assigns slots 1 2 3 to the node receiving the command: - > ADDSLOTS 1 2 3 + > CLUSTER ADDSLOTS 1 2 3 OK However trying to execute it again results into an error since the slots are already assigned: - > ADDSLOTS 1 2 3 + > CLUSTER ADDSLOTS 1 2 3 ERR Slot 1 is already busy ## Usage in Redis Cluster From fc0efa8dfc40b1025c7883a44336e51806287a6e Mon Sep 17 00:00:00 2001 From: Ed Costello Date: Fri, 13 Mar 2015 18:01:47 -0400 Subject: [PATCH 0185/2314] Copy edits for typos in .json files --- commands.json | 2 +- tools.json | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/commands.json b/commands.json index dd47f08084..b01c1d7ff7 100644 --- a/commands.json +++ b/commands.json @@ -1436,7 +1436,7 @@ }, "RESTORE": { "summary": "Create a key using the provided serialized value, previously obtained using DUMP.", - "complexity": "O(1) to create the new key and additional O(N*M) to recostruct the serialized value, where N is the number of Redis objects composing the value and M their average size. For small string values the time complexity is thus O(1)+O(1*M) where M is small, so simply O(1). However for sorted set values the complexity is O(N*M*log(N)) because inserting values into sorted sets is O(log(N)).", + "complexity": "O(1) to create the new key and additional O(N*M) to reconstruct the serialized value, where N is the number of Redis objects composing the value and M their average size. For small string values the time complexity is thus O(1)+O(1*M) where M is small, so simply O(1). However for sorted set values the complexity is O(N*M*log(N)) because inserting values into sorted sets is O(log(N)).", "arguments": [ { "name": "key", diff --git a/tools.json b/tools.json index c56e1d7aab..4640a9d23d 100644 --- a/tools.json +++ b/tools.json @@ -38,7 +38,7 @@ "name": "Kombu", "language": "Python", "repository": "https://github.com/celery/kombu", - "description": "Python AMQP Framework with redis suppport", + "description": "Python AMQP Framework with redis support", "authors": [] }, { From c8d500da1a6c02b0bdbde04b4e068a1bb50e81ed Mon Sep 17 00:00:00 2001 From: Ed Costello Date: Fri, 13 Mar 2015 18:21:51 -0400 Subject: [PATCH 0186/2314] Copy edits for typos --- commands/cluster addslots.md | 6 +++--- commands/cluster meet.md | 2 +- commands/cluster nodes.md | 6 +++--- topics/cluster-tutorial.md | 2 +- topics/sentinel-old.md | 2 +- 5 files changed, 9 insertions(+), 9 deletions(-) diff --git a/commands/cluster addslots.md b/commands/cluster addslots.md index dee51d6d19..b93fa16c81 100644 --- a/commands/cluster addslots.md +++ b/commands/cluster addslots.md @@ -6,7 +6,7 @@ configuration. However note that: -1. The command only works if all the specified slots are, from the point of view of the node receiving the command, currently not assigned. A node will refuse to take ownership for slots that already belog to some other node (including itself). +1. The command only works if all the specified slots are, from the point of view of the node receiving the command, currently not assigned. A node will refuse to take ownership for slots that already belong to some other node (including itself). 2. The command fails if the same slot is specified multiple times. 3. As a side effect of the command execution, if a slot among the ones specified as argument is set as `importing`, this state gets cleared once the node assigns the (previously unbound) slot to itself. @@ -35,7 +35,7 @@ Redis Cluster operations: ## Information about slots propagation and warnings Note that once a node assigns a set of slots to itself, it will start -propagating this information in hearthbeats packets headers. However the +propagating this information in heartbeat packet headers. However the other nodes will accept the information only if they have the slot as not already bound with another node, or if the configuration epoch of the node advertising the new hash slot, is greater than the node currently listed @@ -48,4 +48,4 @@ data loss. @return -@simple-string-reply: `OK` if the command was successful. Otheriwse an error is returned. +@simple-string-reply: `OK` if the command was successful. Otherwise an error is returned. diff --git a/commands/cluster meet.md b/commands/cluster meet.md index aaf574ef6e..c7f53a58b7 100644 --- a/commands/cluster meet.md +++ b/commands/cluster meet.md @@ -12,7 +12,7 @@ composing a Redis Cluster, there are only two ways: 1. The system administrator sends a `CLUSTER MEET` command to force a node to meet another one. 2. An already known node sends a list of nodes in the gossip section that we are not aware of. If the receiving node trusts the sending node as a known node, it will process the gossip section and send an handshake to the nodes that are still not known. -Note that Redis Cluster needs to form a full mesh (each node is connected with each other node), but in order to create a cluster, there is no need to send all the `CLUSTER MEET` commands needed to form the full mesh. What matter is to send enough `CLUSTER MEET` messages so that each node can reach each other node through a *chain of known nodes*. Thanks to the exchange of gossip informations in heartbeat packets, the missing links will be created. +Note that Redis Cluster needs to form a full mesh (each node is connected with each other node), but in order to create a cluster, there is no need to send all the `CLUSTER MEET` commands needed to form the full mesh. What matter is to send enough `CLUSTER MEET` messages so that each node can reach each other node through a *chain of known nodes*. Thanks to the exchange of gossip information in heartbeat packets, the missing links will be created. So, if we link node A with node B via `CLUSTER MEET`, and B with C, A and C will find their ways to handshake and create a link. diff --git a/commands/cluster nodes.md b/commands/cluster nodes.md index 5c31209a30..71d332ca98 100644 --- a/commands/cluster nodes.md +++ b/commands/cluster nodes.md @@ -36,7 +36,7 @@ The meaning of each filed is the following: 1. **id** The node ID, a 40 characters random string generated when a node is created and never changed again (unless `CLUSTER RESET HARD` is used). 2. **ip:port** The node address where clients should contact the node to run queries. 3. **flags** A list of comma separated flags: `myself`, `master`, `slave`, `fail?`, `fail`, `handshake`, `noaddr`, `noflags`. Flags are explained in detail in the next section. -4. **master** If the node is a slave, and the master is known, the master node ID, oterwise the "-" character. +4. **master** If the node is a slave, and the master is known, the master node ID, otherwise the "-" character. 5. **ping-sent** Milliseconds unix time at which the currently active ping was sent, or zero if there are no pending pings. 6. **pong-recv** Milliseconds unix time the last pong was received. 7. **config-epoch** The configuration epoch (or version) of the current node (or of the current master if the node is a slave). Each time there is a failover, a new, unique, monotonically increasing configuration epoch is created. If multiple nodes claim to serve the same hash slots, the one with higher configuration epoc wins. @@ -64,7 +64,7 @@ as already explained above: However node hash slots can be in a special state, used in order to communicate errors after a node restart (mismatch between the keys in the AOF/RDB file, and the node hash slots configuration), or when there is a resharding operation in progress. This two states are **importing** and **migrating**. -The meaning of the two states is explained in the Redis Specification, howoever the gist of the two states is the following: +The meaning of the two states is explained in the Redis Specification, however the gist of the two states is the following: * **Importing** slots are yet not part of the nodes hash slot, there is a migration in progress. The node will accept queries about these slots only if the `ASK` command is used. * **Migrating** slots are assigned to the node, but but are being migrated to some other node. The node will accept queries if all the keys in the command exist already, otherwise it will emit what is called an **ASK redirection**, to force new keys creation directly in the importing node. @@ -72,7 +72,7 @@ The meaning of the two states is explained in the Redis Specification, howoever Importing and migrating slots are emitted in the `CLUSTER NODES` output as follows: * **Importing slot:** `[slot_number-<-importing_from_node_id]` -* **Migarting slot:** `[slot_number->-migrating_to_node_id]` +* **Migrating slot:** `[slot_number->-migrating_to_node_id]` The following are a few examples of importing and migrating slots: diff --git a/topics/cluster-tutorial.md b/topics/cluster-tutorial.md index 0a4ed7f00d..7f71e3ad97 100644 --- a/topics/cluster-tutorial.md +++ b/topics/cluster-tutorial.md @@ -361,7 +361,7 @@ redis 127.0.0.1:7000> get hello "world" ``` -**Note:** if you craeted the cluster using the script your nodes may listen +**Note:** if you created the cluster using the script your nodes may listen to different ports, starting from 30001 by default. The redis-cli cluster support is very basic so it always uses the fact that diff --git a/topics/sentinel-old.md b/topics/sentinel-old.md index a9ce9b8971..f56fcec5ac 100644 --- a/topics/sentinel-old.md +++ b/topics/sentinel-old.md @@ -253,7 +253,7 @@ and is only specified if the instance is not a master itself. * **+failover-state-select-slave** `` -- New failover state is `select-slave`: we are trying to find a suitable slave for promotion. * **no-good-slave** `` -- There is no good slave to promote. Currently we'll try after some time, but probably this will change and the state machine will abort the failover at all in this case. * **selected-slave** `` -- We found the specified good slave to promote. -* **failover-state-send-slaveof-noone** `` -- We are trynig to reconfigure the promoted slave as master, waiting for it to switch. +* **failover-state-send-slaveof-noone** `` -- We are trying to reconfigure the promoted slave as master, waiting for it to switch. * **failover-end-for-timeout** `` -- The failover terminated for timeout. If we are the failover leader, we sent a *best effort* `SLAVEOF` command to all the slaves yet to reconfigure. * **failover-end** `` -- The failover terminated with success. All the slaves appears to be reconfigured to replicate with the new master. * **switch-master** ` ` -- We are starting to monitor the new master, using the same name of the old one. The old master will be completely removed from our tables. From e8a95b26813906ce6c8dd26e140ca7ce9066492f Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Fri, 13 Mar 2015 23:36:22 +0100 Subject: [PATCH 0187/2314] =?UTF-8?q?hearthbeats=20=E2=86=92=20heartbeats?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- commands/cluster addslots.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands/cluster addslots.md b/commands/cluster addslots.md index dee51d6d19..25ee572b06 100644 --- a/commands/cluster addslots.md +++ b/commands/cluster addslots.md @@ -35,7 +35,7 @@ Redis Cluster operations: ## Information about slots propagation and warnings Note that once a node assigns a set of slots to itself, it will start -propagating this information in hearthbeats packets headers. However the +propagating this information in heartbeat packet headers. However the other nodes will accept the information only if they have the slot as not already bound with another node, or if the configuration epoch of the node advertising the new hash slot, is greater than the node currently listed From 81367839fe9b3d17c2a0854fbe6b5e3dc5a634ab Mon Sep 17 00:00:00 2001 From: Pavel Martynov Date: Sun, 15 Mar 2015 13:31:47 +0300 Subject: [PATCH 0188/2314] Fix distribution for allkeys-random policy in lru-cache.md --- topics/lru-cache.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/lru-cache.md b/topics/lru-cache.md index 5ac3e0c405..82cb208d81 100644 --- a/topics/lru-cache.md +++ b/topics/lru-cache.md @@ -60,7 +60,7 @@ using the Redis `INFO` output in order to tune your setup. In general as a rule of thumb: * Use the **allkeys-lru** policy when you expect a power-law distribution in the popularity of your requests, that is, you expect that a subset of elements will be accessed far more often than the rest. **This is a good pick if you are unsure**. -* Use the **allkeys-random** if you have a cyclic access where all the keys are scanned continuously, or when you expect the distribution to be normal (all elements likely accessed with the same probability). +* Use the **allkeys-random** if you have a cyclic access where all the keys are scanned continuously, or when you expect the distribution to be uniform (all elements likely accessed with the same probability). * Use the **volatile-ttl** if you want to be able to provide hints to Redis about what are good candidate for expiration by using different TTL values when you create your cache objects. The **allkeys-lru** and **volatile-random** policies are mainly useful when you want to use a single instance for both caching and to have a set of persistent keys. However it is usually a better idea to run two Redis instances to solve such a problem. From 6e52d499a8bf9145ffc564e9edcc734a2c8e8d13 Mon Sep 17 00:00:00 2001 From: antirez Date: Mon, 16 Mar 2015 10:19:00 +0100 Subject: [PATCH 0189/2314] CLUSTER DELSLOTS documented. --- commands.json | 13 +++++++++++++ commands/cluster delslots.md | 32 ++++++++++++++++++++++++++++++++ 2 files changed, 45 insertions(+) create mode 100644 commands/cluster delslots.md diff --git a/commands.json b/commands.json index dd47f08084..08879f3f82 100644 --- a/commands.json +++ b/commands.json @@ -240,6 +240,19 @@ "since": "3.0.0", "group": "cluster" }, + "CLUSTER DELSLOTS": { + "summary": "Set hash slots as unbound in receiving node", + "complexity": "O(N) where N is the total number of hash slot arguments", + "arguments": [ + { + "name": "slot", + "type": "integer", + "multiple": true + } + ], + "since": "3.0.0", + "group": "cluster" + }, "CLUSTER MEET": { "summary": "Force a node cluster to handshake with another node", "complexity": "O(1)", diff --git a/commands/cluster delslots.md b/commands/cluster delslots.md new file mode 100644 index 0000000000..00a0727f66 --- /dev/null +++ b/commands/cluster delslots.md @@ -0,0 +1,32 @@ +This command asks a Redis Cluster node to set the hash slots specified as arguments as *not associated* in the node receiving the command. A node associated, or +*unbound* hash slot, means that the node has no idea who is the master currently +serving the hash slot. Moreover hash slots which are not associated will be +associated as soon as we receive an heartbeat packet from some node claiming to +be the owner of the hash slot (moreover, the hash slot will be re-associated if +the node will receive an heartbeat or update message with a configuration +epoch greater than the one of the node currently bound to the hash slot). + +However note that: + +1. The command only works if all the specified slots are already associated with some node. +2. The command fails if the same slot is specified multiple times. +3. As a side effect of the command execution, the node may go into *down* state because not all hash slots are covered. + +## Example + +For example the following command assigns slots 1 2 3 to the node receiving +the command: + + > CLUSTER DELSLOTS 5000 5001 + OK + +## Usage in Redis Cluster + +This command only works in cluster mode and may be useful for debugging +and in order to manually orchestrate a cluster configuration when a new +cluster is created. It is currently not used by `redis-trib`, and mainly +exists for API completeness. + +@return + +@simple-string-reply: `OK` if the command was successful. Otheriwse an error is returned. From 0eb376450fd4ba581380790cacff79f420a4050d Mon Sep 17 00:00:00 2001 From: antirez Date: Mon, 16 Mar 2015 11:03:56 +0100 Subject: [PATCH 0190/2314] Two fixes to CLUSTER DELNODE page. --- commands/cluster delslots.md | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/commands/cluster delslots.md b/commands/cluster delslots.md index 00a0727f66..c0cc7ba13a 100644 --- a/commands/cluster delslots.md +++ b/commands/cluster delslots.md @@ -1,4 +1,4 @@ -This command asks a Redis Cluster node to set the hash slots specified as arguments as *not associated* in the node receiving the command. A node associated, or +This command asks a Redis Cluster node to set the hash slots specified as arguments as *not associated* in the node receiving the command. A not associated, or *unbound* hash slot, means that the node has no idea who is the master currently serving the hash slot. Moreover hash slots which are not associated will be associated as soon as we receive an heartbeat packet from some node claiming to @@ -14,8 +14,7 @@ However note that: ## Example -For example the following command assigns slots 1 2 3 to the node receiving -the command: +For example the following command unassigns slots 5000 and 5001 from the node receiving the command: > CLUSTER DELSLOTS 5000 5001 OK From 6fe849e4418e0ec2b08d9d48141c589c2c3a283e Mon Sep 17 00:00:00 2001 From: antirez Date: Mon, 16 Mar 2015 11:08:57 +0100 Subject: [PATCH 0191/2314] Otheriwse -> Otherwise. --- commands/cluster delslots.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands/cluster delslots.md b/commands/cluster delslots.md index c0cc7ba13a..5c86ca6410 100644 --- a/commands/cluster delslots.md +++ b/commands/cluster delslots.md @@ -28,4 +28,4 @@ exists for API completeness. @return -@simple-string-reply: `OK` if the command was successful. Otheriwse an error is returned. +@simple-string-reply: `OK` if the command was successful. Otherwise an error is returned. From 773ee373b05accc653cf0aeddde3d25d8088b236 Mon Sep 17 00:00:00 2001 From: antirez Date: Mon, 16 Mar 2015 15:04:26 +0100 Subject: [PATCH 0192/2314] CLUSTER SETSLOT documented. --- commands.json | 22 ++++++++++ commands/cluster setslot.md | 80 +++++++++++++++++++++++++++++++++++++ 2 files changed, 102 insertions(+) create mode 100644 commands/cluster setslot.md diff --git a/commands.json b/commands.json index 611cbfdcd6..1903cd291c 100644 --- a/commands.json +++ b/commands.json @@ -275,6 +275,28 @@ "since": "3.0.0", "group": "cluster" }, + "CLUSTER SETSLOT": { + "summary": "Bind an hash slot to a specific node", + "complexity": "O(1)", + "arguments": [ + { + "name": "slot", + "type": "integer" + }, + { + "name": "subcommand", + "type": "enum", + "enum": ["IMPORTING", "MIGRATING", "STABLE", "NODE"] + }, + { + "name": "node-id", + "type": "string", + "optional": true + } + } + "since": "3.0.0", + "group": "cluster" + }, "CLUSTER SLOTS": { "summary": "Get array of Cluster slot to node mappings", "complexity": "O(N) where N is the total number of Cluster nodes", diff --git a/commands/cluster setslot.md b/commands/cluster setslot.md new file mode 100644 index 0000000000..53fe74afad --- /dev/null +++ b/commands/cluster setslot.md @@ -0,0 +1,80 @@ +`CLUSTER SETSLOT` is responsible of changing the state of an hash slot in the receiving node in different ways. It can, depending on the subcommand used: + +1. `MIGRATING` subcommand: Set a hash slot in *importing* state. +2. `IMPORTING` subcommnad: Set a hash slot in *migrating* state. +3. `STABLE` subcommand: Clear any importing / migrating state from hash slot. +4. `NODE` subcommand: Bind the hash slot to a different node. + +The command with its set of subcommands is useful in order to start and end cluster live resharding operations, which are accomplished by setting an hash slot in migrating state in the source node, and importing state in the destination node. + +Each subcommand is documented below. At the end you'll find a description of +how live resharding is performed using this command and other related commands. + +# CLUSTER SETSLOT `` MIGRATING `` + +This subcommand sets a slot in *migrating* state. In order to set a slot +in this state, the node receiving teh command must be the hash slot owner, +otherwise an error is returned. + +When a slot is set in mgrating state, the node changes behavior in the +following way: + +1. If a command is received about an existing key, the command is processed as usually. +2. If a command is received about a key that does not exists, an `ASK` redirection is emitted by the node, asking the client to retry only that specific query into `destination-node`. In this case the client should not update its hash slot to node mapping. +3. If the command contains multiple keys, in case non exist, the behavior is the same as point 1, if all exist, is the same as point 2, however if only a partial number of keys exist, the command emits a `TRYAGAIN` error in order for the keys interested to finish being migrated to the target node, so that the multi keys command can be executed. + +# CLUSTER SETSLOT `` IMPORTING `` + +This subcommand is the reverse of `MIGRATING`, and prepares the destination +node to import keys from the specified source node. The command only works if +the node is not already owner of the specified hash slot. + +When a slot is set in importing state, the node changes behavior in the following way: + +1. Commands about this hash slot are refused and a `MOVED` redirection is generated as usually, but in the case the command follows an `ASKING` command, in this case the command is executed. + +In this way when a node in migrating state generates an `ASK` redirection, the client contacts the target node, send `ASKING`, and immediately after sends the command. This way what happens is that commands about non existing keys in the old node, or keys already migrated to the target node, are executed in the target node, so that: + +1. New keys are always created in the target node. During an hash slot migration we'll have to move only old keys, no new ones. +2. Commands abotu keys already migrated are correctly processed in the context of the node which is target of the migration, the new hash slot owner, in order to guarantee consistency. +3. Without `ASKING` the behavior is the same as usually. This guarantees that clients with a broken hash slots mapping will not write for error in the target node, creating a new version of a key that has yet to be migrated. + +# CLUSTER SETSLOT `` STABLE + +This subcommand just clear migrating / importing state from the slot. It is +mainly used to fix a cluster stuck in a wrong state by `redis-trib fix`. +Normally the two states are cleared automatically at the end of the migration +using the `SETSLOT ... NODE ...` subcommand as explained in the next section. + +# CLUSTER SETSLOT `` NODE `` + +The `SETSLOT` subcommand is the one with the most complex semantics. It +associates the hash slot with the specified node, however the command works +only in specified situations and has different side effects depending on the +slot state. The following is the set of pre-conditions and side effects of the +command: + +1. If the current hash slot owner is the node receiving the command, but for effect of the command the slot would be assigned to a different node, the command will return an error if there are still keys for that hash slot in the node receiving the command. +2. If the slot is in *migrating* state, the state gets cleared when the slot is assigned to another node. +3. If the slot was in *importing* state in the node receiving the command, and the command assigns the slot to this node (which happens in the target node at the end of the resharding of an hash slot from one node to another), the command has the following side effects: A) the importing state is cleared. B) If the node config epoch is not already the greatest of the cluster, it generates a new one and assigns the new config epoch to itself. This way its new hash slot ownership will win over any past configuration created by previous failovers or slot migraitons. + +It is important to note that in step 3 is the only time when a Redis Cluster node will create a new config epoch without agreement from other nodes. This only happens when a manual configuration is operated. However it is impossible that this creates a non-transient setup where two nodes have the same config epoch, since Redis Cluster uses a config epoch collision resolution algorithm. + +@return + +@simple-string-reply: All the subcommands return `OK` if the command was successful. Otherwise an error is returned. + +# Redis Cluster live resharding explained + +The `CLUSTER SETSLOT` command is an important piece used by Redis Cluster in order to migrate all the keys contained in one hash slot from one node to another. This is how the migration is orchestrated, with the help of other commands as well. We'll call the node that has the current ownership of the hash slot the `source` node, and the node were we want to migrate the `destination` node. + +1. Set the destination node slot in *importing* state using `CLUSTER SETSLOT IMPORTING `. +2. Set the source node slot in *migrating* state using `CLUSTER SETSLOT IMPORTING `. +3. Get keys from the source node with `CLUSTER GETKEYSINSLOT` command and move them into the destination node using the `MIGRATE` command. +4. Use `CLUSTER SETSLOT STABLE` in the source, destination, and all the other nodes. + +Notes: + +* The order of step 1 and 2 is important. We want the destination node to be ready to accept `ASK` redirections when the source node is configured to redirect. +* Step 4 does not technically need to use `SETSLOT` in the nodes not involved in the resharding, since the configuration will eventually propagate itself, however it is a good idea to do so in order to stop ASAP nodes from pointing to the wrong node, for the hash slot moved, resulting in more redirections than needed to eventually find the right node. + From edb5a03f42e32f7b36cc0dcf7ceda052d1af975c Mon Sep 17 00:00:00 2001 From: antirez Date: Mon, 16 Mar 2015 15:05:40 +0100 Subject: [PATCH 0193/2314] JSON fix. --- commands.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands.json b/commands.json index 1903cd291c..570fbb3e41 100644 --- a/commands.json +++ b/commands.json @@ -293,7 +293,7 @@ "type": "string", "optional": true } - } + ], "since": "3.0.0", "group": "cluster" }, From 319622d665824cdf338a60d099a2cad98ac72f52 Mon Sep 17 00:00:00 2001 From: antirez Date: Mon, 16 Mar 2015 15:06:34 +0100 Subject: [PATCH 0194/2314] CLUSTER SETSLOT: huge titles are huge. --- commands/cluster setslot.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/commands/cluster setslot.md b/commands/cluster setslot.md index 53fe74afad..8d3b2d44a8 100644 --- a/commands/cluster setslot.md +++ b/commands/cluster setslot.md @@ -10,7 +10,7 @@ The command with its set of subcommands is useful in order to start and end clus Each subcommand is documented below. At the end you'll find a description of how live resharding is performed using this command and other related commands. -# CLUSTER SETSLOT `` MIGRATING `` +## CLUSTER SETSLOT `` MIGRATING `` This subcommand sets a slot in *migrating* state. In order to set a slot in this state, the node receiving teh command must be the hash slot owner, @@ -23,7 +23,7 @@ following way: 2. If a command is received about a key that does not exists, an `ASK` redirection is emitted by the node, asking the client to retry only that specific query into `destination-node`. In this case the client should not update its hash slot to node mapping. 3. If the command contains multiple keys, in case non exist, the behavior is the same as point 1, if all exist, is the same as point 2, however if only a partial number of keys exist, the command emits a `TRYAGAIN` error in order for the keys interested to finish being migrated to the target node, so that the multi keys command can be executed. -# CLUSTER SETSLOT `` IMPORTING `` +## CLUSTER SETSLOT `` IMPORTING `` This subcommand is the reverse of `MIGRATING`, and prepares the destination node to import keys from the specified source node. The command only works if @@ -39,14 +39,14 @@ In this way when a node in migrating state generates an `ASK` redirection, the c 2. Commands abotu keys already migrated are correctly processed in the context of the node which is target of the migration, the new hash slot owner, in order to guarantee consistency. 3. Without `ASKING` the behavior is the same as usually. This guarantees that clients with a broken hash slots mapping will not write for error in the target node, creating a new version of a key that has yet to be migrated. -# CLUSTER SETSLOT `` STABLE +## CLUSTER SETSLOT `` STABLE This subcommand just clear migrating / importing state from the slot. It is mainly used to fix a cluster stuck in a wrong state by `redis-trib fix`. Normally the two states are cleared automatically at the end of the migration using the `SETSLOT ... NODE ...` subcommand as explained in the next section. -# CLUSTER SETSLOT `` NODE `` +## CLUSTER SETSLOT `` NODE `` The `SETSLOT` subcommand is the one with the most complex semantics. It associates the hash slot with the specified node, however the command works @@ -64,7 +64,7 @@ It is important to note that in step 3 is the only time when a Redis Cluster nod @simple-string-reply: All the subcommands return `OK` if the command was successful. Otherwise an error is returned. -# Redis Cluster live resharding explained +## Redis Cluster live resharding explained The `CLUSTER SETSLOT` command is an important piece used by Redis Cluster in order to migrate all the keys contained in one hash slot from one node to another. This is how the migration is orchestrated, with the help of other commands as well. We'll call the node that has the current ownership of the hash slot the `source` node, and the node were we want to migrate the `destination` node. From 3b8ce80fc5212f74b013d067216ab86ad8062a35 Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Tue, 17 Mar 2015 10:26:45 +0100 Subject: [PATCH 0195/2314] Fixed typos and reordered some sentences to make them more clear --- commands/cluster nodes.md | 2 +- commands/cluster setslot.md | 35 +++++++++++++++++------------------ 2 files changed, 18 insertions(+), 19 deletions(-) diff --git a/commands/cluster nodes.md b/commands/cluster nodes.md index 71d332ca98..11bb0d1a01 100644 --- a/commands/cluster nodes.md +++ b/commands/cluster nodes.md @@ -39,7 +39,7 @@ The meaning of each filed is the following: 4. **master** If the node is a slave, and the master is known, the master node ID, otherwise the "-" character. 5. **ping-sent** Milliseconds unix time at which the currently active ping was sent, or zero if there are no pending pings. 6. **pong-recv** Milliseconds unix time the last pong was received. -7. **config-epoch** The configuration epoch (or version) of the current node (or of the current master if the node is a slave). Each time there is a failover, a new, unique, monotonically increasing configuration epoch is created. If multiple nodes claim to serve the same hash slots, the one with higher configuration epoc wins. +7. **config-epoch** The configuration epoch (or version) of the current node (or of the current master if the node is a slave). Each time there is a failover, a new, unique, monotonically increasing configuration epoch is created. If multiple nodes claim to serve the same hash slots, the one with higher configuration epoch wins. 8. **link-state** The state of the link used for the node-to-node cluster bus. We use this link to communicate with the node. Can be `connected` or `disconnected`. 9. **slot** An hash slot number or range. Starting from argument number 9, but there may be up to 16384 entries in total (limit never reached). This is the list of hash slots served by this node. If the entry is just a number, is parsed as such. If it is a range, it is in the form `start-end`, and means that the node is responsible for all the hash slots from `start` to `end` including the start and end values. diff --git a/commands/cluster setslot.md b/commands/cluster setslot.md index 8d3b2d44a8..038bed6288 100644 --- a/commands/cluster setslot.md +++ b/commands/cluster setslot.md @@ -1,7 +1,7 @@ `CLUSTER SETSLOT` is responsible of changing the state of an hash slot in the receiving node in different ways. It can, depending on the subcommand used: 1. `MIGRATING` subcommand: Set a hash slot in *importing* state. -2. `IMPORTING` subcommnad: Set a hash slot in *migrating* state. +2. `IMPORTING` subcommand: Set a hash slot in *migrating* state. 3. `STABLE` subcommand: Clear any importing / migrating state from hash slot. 4. `NODE` subcommand: Bind the hash slot to a different node. @@ -12,16 +12,16 @@ how live resharding is performed using this command and other related commands. ## CLUSTER SETSLOT `` MIGRATING `` -This subcommand sets a slot in *migrating* state. In order to set a slot -in this state, the node receiving teh command must be the hash slot owner, +This subcommand sets a slot to *migrating* state. In order to set a slot +in this state, the node receiving the command must be the hash slot owner, otherwise an error is returned. -When a slot is set in mgrating state, the node changes behavior in the +When a slot is set in migrating state, the node changes behavior in the following way: 1. If a command is received about an existing key, the command is processed as usually. 2. If a command is received about a key that does not exists, an `ASK` redirection is emitted by the node, asking the client to retry only that specific query into `destination-node`. In this case the client should not update its hash slot to node mapping. -3. If the command contains multiple keys, in case non exist, the behavior is the same as point 1, if all exist, is the same as point 2, however if only a partial number of keys exist, the command emits a `TRYAGAIN` error in order for the keys interested to finish being migrated to the target node, so that the multi keys command can be executed. +3. If the command contains multiple keys, in case none exist, the behavior is the same as point 2, if all exist, it is the same as point 1, however if only a partial number of keys exist, the command emits a `TRYAGAIN` error in order for the keys interested to finish being migrated to the target node, so that the multi keys command can be executed. ## CLUSTER SETSLOT `` IMPORTING `` @@ -33,32 +33,32 @@ When a slot is set in importing state, the node changes behavior in the followin 1. Commands about this hash slot are refused and a `MOVED` redirection is generated as usually, but in the case the command follows an `ASKING` command, in this case the command is executed. -In this way when a node in migrating state generates an `ASK` redirection, the client contacts the target node, send `ASKING`, and immediately after sends the command. This way what happens is that commands about non existing keys in the old node, or keys already migrated to the target node, are executed in the target node, so that: +In this way when a node in migrating state generates an `ASK` redirection, the client contacts the target node, sends `ASKING`, and immediately after sends the command. This way commands about non-existing keys in the old node or keys already migrated to the target node are executed in the target node, so that: -1. New keys are always created in the target node. During an hash slot migration we'll have to move only old keys, no new ones. -2. Commands abotu keys already migrated are correctly processed in the context of the node which is target of the migration, the new hash slot owner, in order to guarantee consistency. +1. New keys are always created in the target node. During an hash slot migration we'll have to move only old keys, not new ones. +2. Commands about keys already migrated are correctly processed in the context of the node which is the target of the migration, the new hash slot owner, in order to guarantee consistency. 3. Without `ASKING` the behavior is the same as usually. This guarantees that clients with a broken hash slots mapping will not write for error in the target node, creating a new version of a key that has yet to be migrated. ## CLUSTER SETSLOT `` STABLE -This subcommand just clear migrating / importing state from the slot. It is +This subcommand just clears migrating / importing state from the slot. It is mainly used to fix a cluster stuck in a wrong state by `redis-trib fix`. Normally the two states are cleared automatically at the end of the migration using the `SETSLOT ... NODE ...` subcommand as explained in the next section. ## CLUSTER SETSLOT `` NODE `` -The `SETSLOT` subcommand is the one with the most complex semantics. It +The `NODE` subcommand is the one with the most complex semantics. It associates the hash slot with the specified node, however the command works -only in specified situations and has different side effects depending on the +only in specific situations and has different side effects depending on the slot state. The following is the set of pre-conditions and side effects of the command: 1. If the current hash slot owner is the node receiving the command, but for effect of the command the slot would be assigned to a different node, the command will return an error if there are still keys for that hash slot in the node receiving the command. 2. If the slot is in *migrating* state, the state gets cleared when the slot is assigned to another node. -3. If the slot was in *importing* state in the node receiving the command, and the command assigns the slot to this node (which happens in the target node at the end of the resharding of an hash slot from one node to another), the command has the following side effects: A) the importing state is cleared. B) If the node config epoch is not already the greatest of the cluster, it generates a new one and assigns the new config epoch to itself. This way its new hash slot ownership will win over any past configuration created by previous failovers or slot migraitons. +3. If the slot was in *importing* state in the node receiving the command, and the command assigns the slot to this node (which happens in the target node at the end of the resharding of an hash slot from one node to another), the command has the following side effects: A) the *importing* state is cleared. B) If the node config epoch is not already the greatest of the cluster, it generates a new one and assigns the new config epoch to itself. This way its new hash slot ownership will win over any past configuration created by previous failovers or slot migrations. -It is important to note that in step 3 is the only time when a Redis Cluster node will create a new config epoch without agreement from other nodes. This only happens when a manual configuration is operated. However it is impossible that this creates a non-transient setup where two nodes have the same config epoch, since Redis Cluster uses a config epoch collision resolution algorithm. +It is important to note that step 3 is the only time when a Redis Cluster node will create a new config epoch without agreement from other nodes. This only happens when a manual configuration is operated. However it is impossible that this creates a non-transient setup where two nodes have the same config epoch, since Redis Cluster uses a config epoch collision resolution algorithm. @return @@ -66,15 +66,14 @@ It is important to note that in step 3 is the only time when a Redis Cluster nod ## Redis Cluster live resharding explained -The `CLUSTER SETSLOT` command is an important piece used by Redis Cluster in order to migrate all the keys contained in one hash slot from one node to another. This is how the migration is orchestrated, with the help of other commands as well. We'll call the node that has the current ownership of the hash slot the `source` node, and the node were we want to migrate the `destination` node. +The `CLUSTER SETSLOT` command is an important piece used by Redis Cluster in order to migrate all the keys contained in one hash slot from one node to another. This is how the migration is orchestrated, with the help of other commands as well. We'll call the node that has the current ownership of the hash slot the `source` node, and the node where we want to migrate the `destination` node. -1. Set the destination node slot in *importing* state using `CLUSTER SETSLOT IMPORTING `. -2. Set the source node slot in *migrating* state using `CLUSTER SETSLOT IMPORTING `. +1. Set the destination node slot to *importing* state using `CLUSTER SETSLOT IMPORTING `. +2. Set the source node slot to *migrating* state using `CLUSTER SETSLOT MIGRATING `. 3. Get keys from the source node with `CLUSTER GETKEYSINSLOT` command and move them into the destination node using the `MIGRATE` command. 4. Use `CLUSTER SETSLOT STABLE` in the source, destination, and all the other nodes. Notes: * The order of step 1 and 2 is important. We want the destination node to be ready to accept `ASK` redirections when the source node is configured to redirect. -* Step 4 does not technically need to use `SETSLOT` in the nodes not involved in the resharding, since the configuration will eventually propagate itself, however it is a good idea to do so in order to stop ASAP nodes from pointing to the wrong node, for the hash slot moved, resulting in more redirections than needed to eventually find the right node. - +* Step 4 does not technically need to use `SETSLOT` in the nodes not involved in the resharding, since the configuration will eventually propagate itself, however it is a good idea to do so in order to stop nodes from pointing to the wrong node for the hash slot moved as soon as possible, resulting in less redirections to find the right node. From 03054b1718ddd1b51b1b38234f14f5cbb7138387 Mon Sep 17 00:00:00 2001 From: antirez Date: Tue, 17 Mar 2015 12:32:21 +0100 Subject: [PATCH 0196/2314] CLUSTER INFO documentation. --- commands.json | 6 ++++++ commands/cluster info.md | 35 +++++++++++++++++++++++++++++++++++ 2 files changed, 41 insertions(+) create mode 100644 commands/cluster info.md diff --git a/commands.json b/commands.json index 570fbb3e41..076e13e01e 100644 --- a/commands.json +++ b/commands.json @@ -253,6 +253,12 @@ "since": "3.0.0", "group": "cluster" }, + "CLUSTER INFO": { + "summary": "Provides info about Redis Cluster node state", + "complexity": "O(1)", + "since": "3.0.0", + "group": "cluster" + }, "CLUSTER MEET": { "summary": "Force a node cluster to handshake with another node", "complexity": "O(1)", diff --git a/commands/cluster info.md b/commands/cluster info.md new file mode 100644 index 0000000000..b7fa033552 --- /dev/null +++ b/commands/cluster info.md @@ -0,0 +1,35 @@ +`CLUSTER INFO` provides `INFO` style information about Redis Cluster +vital parameters. The following is a sample output, followed by the +description of each field reported. + +``` +cluster_state:ok +cluster_slots_assigned:16384 +cluster_slots_ok:16384 +cluster_slots_pfail:0 +cluster_slots_fail:0 +cluster_known_nodes:6 +cluster_size:3 +cluster_current_epoch:6 +cluster_my_epoch:2 +cluster_stats_messages_sent:1483972 +cluster_stats_messages_received:1483968 +``` + +* **cluster_state**: State is `ok` if the node is able to receive queries. `fail` if there is at least one hash slot which is unbound (no node associated), in error state (node serving it is flagged with FAIL flag), or if the majority of masters can't be reached by this node. +* **cluster_slots_assigned**: Number of slots which are associated to some node (not unbound). This number should be 16384 for the node to work properly, which means that each hash slot should be mapped to a node. +* **cluster_slots_ok**: Number of hash slots mapping to a node not in `FAIL` or `PFAIL` state. +* **cluster_slots_pfail**: Number of hash slots mapping to a node in `PFAIL` state. Note that those hash slots still work correctly, as long as the `PFAIL` state is not promoted to `FAIL` by the failure detection algorithm. `PFAIL` only means that we are currently not able to talk with the node, but may be just a transient error. +* **cluster_slots_fail**: Number of hash slots in mapping to a node in `FAIL` state. If this number is not zero the node is not able to serve queries unless `cluster-require-full-coverage` is set to `no` in the configuration. +* **cluster_known_nodes**: The total number of known nodes in the cluster, including nodes in `HANDSHAKE` state that may not currently be proper members of the cluster. +* **cluster_size**: The number of master nodes serving at least one hash slot in the cluster. +* **cluster_current_epoch**: The local `Current Epoch` variable. This is used in order to create unique increasing version numbers during fail overs. +* **cluster_my_epoch**: The `Config Epoch` of the node we are talking with. This is the current configuration version assigned to this node. +* **cluster_stats_messages_sent**: Number of messages sent via the cluster node-to-node binary bus. +* **cluster_stats_messages_received**: Number of messages received via the cluster node-to-node binary bus. + +More information about the Current Epoch and Config Epoch variables are available in the Redis Cluster specification document. + +@return + +@bulk-string-reply: A map between named fields and values in the form of `:` lines separated by newlines compoesd by the two bytes `CRLF`. From 332fa98b7ee4f20a9ce0ad85c3d51bee4f448a50 Mon Sep 17 00:00:00 2001 From: antirez Date: Tue, 17 Mar 2015 12:34:37 +0100 Subject: [PATCH 0197/2314] Markdown fixes. --- commands/cluster info.md | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/commands/cluster info.md b/commands/cluster info.md index b7fa033552..b51e614430 100644 --- a/commands/cluster info.md +++ b/commands/cluster info.md @@ -16,17 +16,17 @@ cluster_stats_messages_sent:1483972 cluster_stats_messages_received:1483968 ``` -* **cluster_state**: State is `ok` if the node is able to receive queries. `fail` if there is at least one hash slot which is unbound (no node associated), in error state (node serving it is flagged with FAIL flag), or if the majority of masters can't be reached by this node. -* **cluster_slots_assigned**: Number of slots which are associated to some node (not unbound). This number should be 16384 for the node to work properly, which means that each hash slot should be mapped to a node. -* **cluster_slots_ok**: Number of hash slots mapping to a node not in `FAIL` or `PFAIL` state. -* **cluster_slots_pfail**: Number of hash slots mapping to a node in `PFAIL` state. Note that those hash slots still work correctly, as long as the `PFAIL` state is not promoted to `FAIL` by the failure detection algorithm. `PFAIL` only means that we are currently not able to talk with the node, but may be just a transient error. -* **cluster_slots_fail**: Number of hash slots in mapping to a node in `FAIL` state. If this number is not zero the node is not able to serve queries unless `cluster-require-full-coverage` is set to `no` in the configuration. -* **cluster_known_nodes**: The total number of known nodes in the cluster, including nodes in `HANDSHAKE` state that may not currently be proper members of the cluster. -* **cluster_size**: The number of master nodes serving at least one hash slot in the cluster. -* **cluster_current_epoch**: The local `Current Epoch` variable. This is used in order to create unique increasing version numbers during fail overs. -* **cluster_my_epoch**: The `Config Epoch` of the node we are talking with. This is the current configuration version assigned to this node. -* **cluster_stats_messages_sent**: Number of messages sent via the cluster node-to-node binary bus. -* **cluster_stats_messages_received**: Number of messages received via the cluster node-to-node binary bus. +* **`cluster_state`**: State is `ok` if the node is able to receive queries. `fail` if there is at least one hash slot which is unbound (no node associated), in error state (node serving it is flagged with FAIL flag), or if the majority of masters can't be reached by this node. +* **`cluster_slots_assigned`**: Number of slots which are associated to some node (not unbound). This number should be 16384 for the node to work properly, which means that each hash slot should be mapped to a node. +* **`cluster_slots_ok`**: Number of hash slots mapping to a node not in `FAIL` or `PFAIL` state. +* **`cluster_slots_pfail`**: Number of hash slots mapping to a node in `PFAIL` state. Note that those hash slots still work correctly, as long as the `PFAIL` state is not promoted to `FAIL` by the failure detection algorithm. `PFAIL` only means that we are currently not able to talk with the node, but may be just a transient error. +* **`cluster_slots_fail`**: Number of hash slots in mapping to a node in `FAIL` state. If this number is not zero the node is not able to serve queries unless `cluster-require-full-coverage` is set to `no` in the configuration. +* **`cluster_known_nodes`**: The total number of known nodes in the cluster, including nodes in `HANDSHAKE` state that may not currently be proper members of the cluster. +* **`cluster_size`**: The number of master nodes serving at least one hash slot in the cluster. +* **`cluster_current_epoch`**: The local `Current Epoch` variable. This is used in order to create unique increasing version numbers during fail overs. +* **`cluster_my_epoch`**: The `Config Epoch` of the node we are talking with. This is the current configuration version assigned to this node. +* **`cluster_stats_messages_sent`**: Number of messages sent via the cluster node-to-node binary bus. +* **`cluster_stats_messages_received`**: Number of messages received via the cluster node-to-node binary bus. More information about the Current Epoch and Config Epoch variables are available in the Redis Cluster specification document. From 2d3d7f1577a500d6e3a427ff999f51413a2fe004 Mon Sep 17 00:00:00 2001 From: antirez Date: Tue, 17 Mar 2015 12:36:40 +0100 Subject: [PATCH 0198/2314] More markdown fixes. --- commands/cluster info.md | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/commands/cluster info.md b/commands/cluster info.md index b51e614430..51ccfbf94f 100644 --- a/commands/cluster info.md +++ b/commands/cluster info.md @@ -16,17 +16,17 @@ cluster_stats_messages_sent:1483972 cluster_stats_messages_received:1483968 ``` -* **`cluster_state`**: State is `ok` if the node is able to receive queries. `fail` if there is at least one hash slot which is unbound (no node associated), in error state (node serving it is flagged with FAIL flag), or if the majority of masters can't be reached by this node. -* **`cluster_slots_assigned`**: Number of slots which are associated to some node (not unbound). This number should be 16384 for the node to work properly, which means that each hash slot should be mapped to a node. -* **`cluster_slots_ok`**: Number of hash slots mapping to a node not in `FAIL` or `PFAIL` state. -* **`cluster_slots_pfail`**: Number of hash slots mapping to a node in `PFAIL` state. Note that those hash slots still work correctly, as long as the `PFAIL` state is not promoted to `FAIL` by the failure detection algorithm. `PFAIL` only means that we are currently not able to talk with the node, but may be just a transient error. -* **`cluster_slots_fail`**: Number of hash slots in mapping to a node in `FAIL` state. If this number is not zero the node is not able to serve queries unless `cluster-require-full-coverage` is set to `no` in the configuration. -* **`cluster_known_nodes`**: The total number of known nodes in the cluster, including nodes in `HANDSHAKE` state that may not currently be proper members of the cluster. -* **`cluster_size`**: The number of master nodes serving at least one hash slot in the cluster. -* **`cluster_current_epoch`**: The local `Current Epoch` variable. This is used in order to create unique increasing version numbers during fail overs. -* **`cluster_my_epoch`**: The `Config Epoch` of the node we are talking with. This is the current configuration version assigned to this node. -* **`cluster_stats_messages_sent`**: Number of messages sent via the cluster node-to-node binary bus. -* **`cluster_stats_messages_received`**: Number of messages received via the cluster node-to-node binary bus. +* `cluster_state`: State is `ok` if the node is able to receive queries. `fail` if there is at least one hash slot which is unbound (no node associated), in error state (node serving it is flagged with FAIL flag), or if the majority of masters can't be reached by this node. +* `cluster_slots_assigned`: Number of slots which are associated to some node (not unbound). This number should be 16384 for the node to work properly, which means that each hash slot should be mapped to a node. +* `cluster_slots_ok`: Number of hash slots mapping to a node not in `FAIL` or `PFAIL` state. +* `cluster_slots_pfail`: Number of hash slots mapping to a node in `PFAIL` state. Note that those hash slots still work correctly, as long as the `PFAIL` state is not promoted to `FAIL` by the failure detection algorithm. `PFAIL` only means that we are currently not able to talk with the node, but may be just a transient error. +* `cluster_slots_fail`: Number of hash slots in mapping to a node in `FAIL` state. If this number is not zero the node is not able to serve queries unless `cluster-require-full-coverage` is set to `no` in the configuration. +* `cluster_known_nodes`: The total number of known nodes in the cluster, including nodes in `HANDSHAKE` state that may not currently be proper members of the cluster. +* `cluster_size`: The number of master nodes serving at least one hash slot in the cluster. +* `cluster_current_epoch`: The local `Current Epoch` variable. This is used in order to create unique increasing version numbers during fail overs. +* `cluster_my_epoch`: The `Config Epoch` of the node we are talking with. This is the current configuration version assigned to this node. +* `cluster_stats_messages_sent`: Number of messages sent via the cluster node-to-node binary bus. +* `cluster_stats_messages_received`: Number of messages received via the cluster node-to-node binary bus. More information about the Current Epoch and Config Epoch variables are available in the Redis Cluster specification document. From f2714affb199ecfddf7665f885484f11d5f018aa Mon Sep 17 00:00:00 2001 From: antirez Date: Tue, 17 Mar 2015 12:49:27 +0100 Subject: [PATCH 0199/2314] CLUSTER KEYSLOT documented. --- commands.json | 12 ++++++++++++ commands/cluster keyslot.md | 27 +++++++++++++++++++++++++++ 2 files changed, 39 insertions(+) create mode 100644 commands/cluster keyslot.md diff --git a/commands.json b/commands.json index 076e13e01e..b8ebc79ccc 100644 --- a/commands.json +++ b/commands.json @@ -259,6 +259,18 @@ "since": "3.0.0", "group": "cluster" }, + "CLUSTER KEYSLOT": { + "summary": "Returns the hash slot of the specified key", + "complexity": "O(N) where N is the number of bytes in the key", + "arguments": [ + { + "name": "key", + "type": "string" + } + ], + "since": "3.0.0", + "group": "cluster" + }, "CLUSTER MEET": { "summary": "Force a node cluster to handshake with another node", "complexity": "O(1)", diff --git a/commands/cluster keyslot.md b/commands/cluster keyslot.md new file mode 100644 index 0000000000..829b9c3a40 --- /dev/null +++ b/commands/cluster keyslot.md @@ -0,0 +1,27 @@ +Returns an integer identifying the hash slot the specified key hashes to. +This command is mainly useful for debugging and testing, since it exposes +via an API the underlying Redis implementation of the hashing algorithm. +Example use cases for this command: + +1. Client libs may use Redis in order to test their own hashing algorithm generating random keys and hashing with their local implementation and using a Redis server, and checking if the computed hash slot is the same. +2. Humans may use the command in order to check what is the hash slot, and then the instance, responsible for a given key. + +## Example + +``` +> CLUSTER KEYSLOT somekey +11058 +> CLUSTER KEYSLOT foo{hash_tag} +(integer) 2515 +> CLUSTER KEYSLOT bar{hash_tag} +(integer) 2515 +``` + +Note that the command implements the full hashing algorithm, including support +for **hash tags**, that is the special property of hashing just what is +between `{` and `}` if such a pattern is found inside the key name, in order +to force multiple keys to be handled by the same node. + +@return + +@integer-reply: The hash slot number. From b89fd641d45087decd559910c12f5d178a3caea5 Mon Sep 17 00:00:00 2001 From: antirez Date: Tue, 17 Mar 2015 12:52:24 +0100 Subject: [PATCH 0200/2314] Better grammar for KEYSLOT. --- commands/cluster keyslot.md | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/commands/cluster keyslot.md b/commands/cluster keyslot.md index 829b9c3a40..3fd711e793 100644 --- a/commands/cluster keyslot.md +++ b/commands/cluster keyslot.md @@ -3,8 +3,8 @@ This command is mainly useful for debugging and testing, since it exposes via an API the underlying Redis implementation of the hashing algorithm. Example use cases for this command: -1. Client libs may use Redis in order to test their own hashing algorithm generating random keys and hashing with their local implementation and using a Redis server, and checking if the computed hash slot is the same. -2. Humans may use the command in order to check what is the hash slot, and then the instance, responsible for a given key. +1. Client libraries may use Redis in order to test their own hashing algorithm, generating random keys and hashing them with both their local implementation and using Redis `CLUSTER KEYSLOT` command, then checking if the result is the same. +2. Humans may use this command in order to check what is the hash slot, and then the associated Redis Cluster node, responsible for a given key. ## Example @@ -17,10 +17,7 @@ Example use cases for this command: (integer) 2515 ``` -Note that the command implements the full hashing algorithm, including support -for **hash tags**, that is the special property of hashing just what is -between `{` and `}` if such a pattern is found inside the key name, in order -to force multiple keys to be handled by the same node. +Note that the command implements the full hashing algorithm, including support for **hash tags**, that is the special property of Redis Cluster key hashing algorithm, of hashing just what is between `{` and `}` if such a pattern is found inside the key name, in order to force multiple keys to be handled by the same node. @return From 884918100cf37a4518d8d76c0e8108b60bcb6490 Mon Sep 17 00:00:00 2001 From: Michel Martens Date: Tue, 17 Mar 2015 12:07:52 +0000 Subject: [PATCH 0201/2314] CLUSTER DELSLOTS modified. --- commands/cluster delslots.md | 53 ++++++++++++++++++++++++------------ 1 file changed, 35 insertions(+), 18 deletions(-) diff --git a/commands/cluster delslots.md b/commands/cluster delslots.md index 5c86ca6410..2e5339b7c3 100644 --- a/commands/cluster delslots.md +++ b/commands/cluster delslots.md @@ -1,31 +1,48 @@ -This command asks a Redis Cluster node to set the hash slots specified as arguments as *not associated* in the node receiving the command. A not associated, or -*unbound* hash slot, means that the node has no idea who is the master currently -serving the hash slot. Moreover hash slots which are not associated will be -associated as soon as we receive an heartbeat packet from some node claiming to -be the owner of the hash slot (moreover, the hash slot will be re-associated if -the node will receive an heartbeat or update message with a configuration -epoch greater than the one of the node currently bound to the hash slot). - -However note that: - -1. The command only works if all the specified slots are already associated with some node. +In Redis Cluster, each node keeps track of which master is serving +a particular hash slot. + +The `DELSLOTS` command asks a particular Redis Cluster node to +forget which master is serving the hash slots specified as arguments. + +In the context of a node that has received a `DELSLOTS` command and +has consequently removed the associations for the passed hash slots, +we say those hash slots are *unbound*. Note that the existence of +unbound hashs slots occurs naturally when a node has not been +configured to handle them (something that can be done with the +`ADDSLOTS` command) and if it has not received any information about +who owns those hash slots (something that it can learn from heartbeat +or update messages). + +If a node with unbound hash slots receives a heartbeat packet from +another node that claims to be the owner of some of those hash +slots, the association is established instantly. Moreover, if a +heartbeat or update message is received with a configuration epoch +greater than the node's own, the association is re-established. + +However, note that: + +1. The command only works if all the specified slots are already +associated with some node. 2. The command fails if the same slot is specified multiple times. -3. As a side effect of the command execution, the node may go into *down* state because not all hash slots are covered. +3. As a side effect of the command execution, the node may go into +*down* state because not all hash slots are covered. ## Example -For example the following command unassigns slots 5000 and 5001 from the node receiving the command: +The following command removes the association for slots 5000 and +5001 from the node receiving the command: > CLUSTER DELSLOTS 5000 5001 OK ## Usage in Redis Cluster -This command only works in cluster mode and may be useful for debugging -and in order to manually orchestrate a cluster configuration when a new -cluster is created. It is currently not used by `redis-trib`, and mainly -exists for API completeness. +This command only works in cluster mode and may be useful for +debugging and in order to manually orchestrate a cluster configuration +when a new cluster is created. It is currently not used by `redis-trib`, +and mainly exists for API completeness. @return -@simple-string-reply: `OK` if the command was successful. Otherwise an error is returned. +@simple-string-reply: `OK` if the command was successful. Otherwise +an error is returned. From 16dc2e1707080157f48fe792961b956509471e68 Mon Sep 17 00:00:00 2001 From: antirez Date: Tue, 17 Mar 2015 15:59:10 +0100 Subject: [PATCH 0202/2314] CLUSTER SAVECONFIG documented. --- commands.json | 6 ++++++ commands/cluster saveconfig.md | 15 +++++++++++++++ 2 files changed, 21 insertions(+) create mode 100644 commands/cluster saveconfig.md diff --git a/commands.json b/commands.json index b8ebc79ccc..872079fab2 100644 --- a/commands.json +++ b/commands.json @@ -293,6 +293,12 @@ "since": "3.0.0", "group": "cluster" }, + "CLUSTER SAVECONFIG": { + "summary": "Forces the node to save cluster state on disk", + "complexity": "O(1)", + "since": "3.0.0", + "group": "cluster" + }, "CLUSTER SETSLOT": { "summary": "Bind an hash slot to a specific node", "complexity": "O(1)", diff --git a/commands/cluster saveconfig.md b/commands/cluster saveconfig.md new file mode 100644 index 0000000000..31308c2028 --- /dev/null +++ b/commands/cluster saveconfig.md @@ -0,0 +1,15 @@ +Forces a node to save the `nodes.conf` configuration on disk. Before to return +the command calls `fsync(2)` in order to make sure the configuration is +flushed on the computer disk. + +This command is mainly used in the event a `nodes.conf` node state file +gets lost / deleted for some reason, and we want to generate it again from +scratch. It can also be useful in case of mundane alterations of a node cluster +configuration via the `CLUSTER` command in order to ensure the new configuration +is persisted on disk, however all the commands should normally be able to +auto schedule to persist the configuration on disk when it is important +to do so for the correctness of the system in the event of a restart. + +@return + +@simple-string-reply: `OK` or an error if the operation fails. From 7c7ec2c07fa170b51bb0863a3f4bdd9ba8dc0703 Mon Sep 17 00:00:00 2001 From: antirez Date: Tue, 17 Mar 2015 16:34:13 +0100 Subject: [PATCH 0203/2314] CLUSTER COUNTKEYSINSLOT documented. --- commands.json | 12 ++++++++++++ commands/cluster countkeysinslot.md | 13 +++++++++++++ 2 files changed, 25 insertions(+) create mode 100644 commands/cluster countkeysinslot.md diff --git a/commands.json b/commands.json index 872079fab2..1098fbe24b 100644 --- a/commands.json +++ b/commands.json @@ -240,6 +240,18 @@ "since": "3.0.0", "group": "cluster" }, + "CLUSTER COUNTKEYSINSLOT": { + "summary": "Return the number of local keys in the specified hash slot", + "complexity": "O(1)", + "arguments": [ + { + "name": "slot", + "type": "integer" + } + ], + "since": "3.0.0", + "group": "cluster" + }, "CLUSTER DELSLOTS": { "summary": "Set hash slots as unbound in receiving node", "complexity": "O(N) where N is the total number of hash slot arguments", diff --git a/commands/cluster countkeysinslot.md b/commands/cluster countkeysinslot.md new file mode 100644 index 0000000000..0bffec84b5 --- /dev/null +++ b/commands/cluster countkeysinslot.md @@ -0,0 +1,13 @@ +Returns the number of keys in the specified Redis Cluster hash slot. The +command only queries the local data set, so contacting a node +that is not serving the specified hash slot will always result in a count of +zero being returned. + +``` +> CLUSTER COUNTKEYSINSLOT 7000 +(integer) 50341 +``` + +@return + +@integer-reply: The number of keys in the specified hash slot, or an error if the hash slot is invalid. From 0270d71800b21ebec991612efb50b124fcf7ebe8 Mon Sep 17 00:00:00 2001 From: antirez Date: Tue, 17 Mar 2015 16:41:16 +0100 Subject: [PATCH 0204/2314] CLUSTER GETKEYSINSLOT documented. --- commands.json | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/commands.json b/commands.json index 1098fbe24b..c823728de3 100644 --- a/commands.json +++ b/commands.json @@ -265,6 +265,22 @@ "since": "3.0.0", "group": "cluster" }, + "CLUSTER GETKEYSINSLOT": { + "summary": "Return local key names in the specified hash slot", + "complexity": "O(1)", + "arguments": [ + { + "name": "slot", + "type": "integer" + }, + { + "name": "count", + "type": "integer" + } + ], + "since": "3.0.0", + "group": "cluster" + }, "CLUSTER INFO": { "summary": "Provides info about Redis Cluster node state", "complexity": "O(1)", From 63f8c6803b6b2e615bc42b51451255ca5ee1b26c Mon Sep 17 00:00:00 2001 From: antirez Date: Tue, 17 Mar 2015 16:45:12 +0100 Subject: [PATCH 0205/2314] Missing file added. --- commands.json | 2 +- commands/cluster getkeysinslot.md | 20 ++++++++++++++++++++ 2 files changed, 21 insertions(+), 1 deletion(-) create mode 100644 commands/cluster getkeysinslot.md diff --git a/commands.json b/commands.json index c823728de3..23b963b741 100644 --- a/commands.json +++ b/commands.json @@ -267,7 +267,7 @@ }, "CLUSTER GETKEYSINSLOT": { "summary": "Return local key names in the specified hash slot", - "complexity": "O(1)", + "complexity": "O(log(N)) where N is the number of requested keys", "arguments": [ { "name": "slot", diff --git a/commands/cluster getkeysinslot.md b/commands/cluster getkeysinslot.md new file mode 100644 index 0000000000..4a0f6b7fd9 --- /dev/null +++ b/commands/cluster getkeysinslot.md @@ -0,0 +1,20 @@ +The command returns an array of keys names stored in the contacted node and +hashing to the specified hash slot. The maximum number of keys to return +is specified via the `count` argument, so that it is possible for the user +of this API to batch-processing keys. + +The main usage of this command is during rehashing of cluster slots from one +node to another. The way the rehashing is performed is exposed in the Redis +Cluster specification, or in a more simple to digest form, as an appendix +of the `CLUSTER SETSLOT` command documentation. + +``` +> CLUSTER GETKEYSINSLOT 7000 3 +"47344|273766|70329104160040|key_39015" +"47344|273766|70329104160040|key_89793" +"47344|273766|70329104160040|key_92937" +``` + +@return + +@array-reply: From 0 to *count* key names in a Redis array reply. From a7e072ce85d863f6f119b836bcf20a7d6021bb4a Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 18 Mar 2015 12:03:27 +0100 Subject: [PATCH 0206/2314] CLUSTER FORGET documented. --- commands.json | 12 ++++++++++ commands/cluster forget.md | 47 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 59 insertions(+) create mode 100644 commands/cluster forget.md diff --git a/commands.json b/commands.json index 23b963b741..89b46c7c00 100644 --- a/commands.json +++ b/commands.json @@ -265,6 +265,18 @@ "since": "3.0.0", "group": "cluster" }, + "CLUSTER FORGET": { + "summary": "Remove a node from the nodes table", + "complexity": "O(1)", + "arguments": [ + { + "name": "node-id", + "type": "string" + } + ], + "since": "3.0.0", + "group": "cluster" + }, "CLUSTER GETKEYSINSLOT": { "summary": "Return local key names in the specified hash slot", "complexity": "O(log(N)) where N is the number of requested keys", diff --git a/commands/cluster forget.md b/commands/cluster forget.md new file mode 100644 index 0000000000..95c82de0f7 --- /dev/null +++ b/commands/cluster forget.md @@ -0,0 +1,47 @@ +The command is used in order to remove the node, specified via its node ID, +from the set of nodes known by the Redis Cluster node receiving the command. +In other words the specified node is removed from the *nodes table* of the +node receiving the command. + +However the command cannot simply drop the node from its internal configuration, +it also implements a ban-list, not allowing the same node to be added again +as a side effect of processing the *gossip section* of the heartbeat packets +received from other nodes. + +## Details on the command behavior + +For example, let's assume we have four nodes, A, B, C and D. In order to +end with just a three nodes cluster A, B, C we may follow these steps: + +1. Reshard all the hash slots from D to nodes A, B, C. +2. D is now empty, but still listed in the nodes table of A, B and C. +3. We contact A, and send `CLUSTER FORGET D`. +4. B sends A a heartbeat packet, where node D is listed. +5. A does no longer known node D (see step 3), so it starts an handshake with D. +6. D ends re-added in the nodes table of A. + +As you can say in this way removing a node is fragile, we need to send +`CLUSTER FORGET` commands to all the nodes ASAP hoping there are no +gossip sections processing in the meantime. Because of this problem the +command implements a ban-list with an expire time for each entry. + +So what the command really does is: + +1. The specified node gets removed from the nodes table. +2. The node ID of the removed node gets added to the ban-list, for 1 minute. +3. The node will skip all the node IDs listed in the ban-list when processing gossip sections received in heartbeat packets from other nodes. + +This way we have a 60 second window to inform all the nodes in the cluster that +we want to remove a node. + +## Special conditions not allowing the command execution + +The command does not succeed and returns an error in the following cases: + +1. The specified node ID is not found in the nodes table. +2. The node receiving the command is a salve, and the specified node ID identifies its current master. +3. The node ID identifies the same node we are sending the command to. + +@return + +@simple-string-reply: `OK` if the command was executed successfully, otherwise an error is returned. From e14d378167e4b9bdbbe0487cc3035cc7494df253 Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 18 Mar 2015 12:15:46 +0100 Subject: [PATCH 0207/2314] CLUSTER REPLICATE documented. --- commands.json | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/commands.json b/commands.json index 89b46c7c00..af4d147213 100644 --- a/commands.json +++ b/commands.json @@ -333,6 +333,18 @@ "since": "3.0.0", "group": "cluster" }, + "CLUSTER REPLICATE": { + "summary": "Reconfigure a node as a slave of the specified master node", + "complexity": "O(1)", + "arguments": [ + { + "name": "node-id", + "type": "string" + } + ], + "since": "3.0.0", + "group": "cluster" + }, "CLUSTER SAVECONFIG": { "summary": "Forces the node to save cluster state on disk", "complexity": "O(1)", From 57a3311a7c210274e5afd0cc2d8ed8e438ec6b59 Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 18 Mar 2015 12:16:44 +0100 Subject: [PATCH 0208/2314] Actual markdown file added for cluster replicate. --- commands/cluster replicate.md | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) create mode 100644 commands/cluster replicate.md diff --git a/commands/cluster replicate.md b/commands/cluster replicate.md new file mode 100644 index 0000000000..555634acd6 --- /dev/null +++ b/commands/cluster replicate.md @@ -0,0 +1,26 @@ +The command reconfigures a node as a slave of the specified master. +If the node receiving the command is an *empty master*, as a side effect +of the command, the node role is changed from master to slave. + +Once a node is turned into the slave of another master node, there is no need +to inform the other cluster nodes about the change: heartbeat packets exchanged +between nodes will propagate the new configuration automatically. + +A slave will always accept the command, assuming that: + +1. The specified node ID exists in its nodes table. +2. The specified node ID does not identify the instance we are sending the command to. +3. The specified node ID is a master. + +If the node receiving the command is not already a slave, but is a master, +the command will only succeed, and the node will be converted into a slave, +only if the following additional conditions are met: + +1. The node is not serving any hash slots. +2. The node is empty, no keys are stored at all in the key space. + +If the command succeeds the new slave will immediately try to contact its master in order to replicate from it. + +@return + +@simple-string-reply: `OK` if the command was executed successfully, otherwise an error is returned. From c97fe778084c7375fc93c71451649093aad79a2b Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Wed, 18 Mar 2015 12:19:51 +0100 Subject: [PATCH 0209/2314] Typos --- commands/cluster forget.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/commands/cluster forget.md b/commands/cluster forget.md index 95c82de0f7..cd356ee7c9 100644 --- a/commands/cluster forget.md +++ b/commands/cluster forget.md @@ -20,7 +20,7 @@ end with just a three nodes cluster A, B, C we may follow these steps: 5. A does no longer known node D (see step 3), so it starts an handshake with D. 6. D ends re-added in the nodes table of A. -As you can say in this way removing a node is fragile, we need to send +As you can see in this way removing a node is fragile, we need to send `CLUSTER FORGET` commands to all the nodes ASAP hoping there are no gossip sections processing in the meantime. Because of this problem the command implements a ban-list with an expire time for each entry. @@ -39,7 +39,7 @@ we want to remove a node. The command does not succeed and returns an error in the following cases: 1. The specified node ID is not found in the nodes table. -2. The node receiving the command is a salve, and the specified node ID identifies its current master. +2. The node receiving the command is a slave, and the specified node ID identifies its current master. 3. The node ID identifies the same node we are sending the command to. @return From 62200e372b41e0c19eb9dbac38e85b13a3f9612e Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 18 Mar 2015 12:29:26 +0100 Subject: [PATCH 0210/2314] CLUSTER SLAVES documented. --- commands.json | 12 ++++++++++++ commands/cluster slaves.md | 15 +++++++++++++++ 2 files changed, 27 insertions(+) create mode 100644 commands/cluster slaves.md diff --git a/commands.json b/commands.json index af4d147213..8fd1a0af76 100644 --- a/commands.json +++ b/commands.json @@ -373,6 +373,18 @@ "since": "3.0.0", "group": "cluster" }, + "CLUSTER SLAVES": { + "summary": "List slave nodes of the specified master node", + "complexity": "O(1)", + "arguments": [ + { + "name": "node-id", + "type": "string" + } + ], + "since": "3.0.0", + "group": "cluster" + }, "CLUSTER SLOTS": { "summary": "Get array of Cluster slot to node mappings", "complexity": "O(N) where N is the total number of Cluster nodes", diff --git a/commands/cluster slaves.md b/commands/cluster slaves.md new file mode 100644 index 0000000000..bec20fa29c --- /dev/null +++ b/commands/cluster slaves.md @@ -0,0 +1,15 @@ +The command provides a list of slave nodes replicating from the specified +master node. The list is provided in the same format used by `CLUSTER NODES` (please refer to its documentation for the specification of the format). + +The command will fail if the specified node is not known or if it is not +a master according to the node table of the node receiving the command. + +Note that if a slave is added, moved, or removed from a given master node, +and we ask `CLUSTER SLAVES` to a node that has not yet received the +configuration update, it may show stale information. However eventually +(in a matter of seconds if there are no network partitions) all the nodes +will agree about the set of nodes associated with a given master. + +@return + +The command returns data in the same format as `CLUSTER NODES`. From d5b9dd55e1f890fb77aef269819ae6dfdb736ec8 Mon Sep 17 00:00:00 2001 From: compwron Date: Wed, 18 Mar 2015 09:02:19 -0700 Subject: [PATCH 0211/2314] Fixing typo in persistence doc --- topics/persistence.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/persistence.md b/topics/persistence.md index b3965c0e47..81c73a5289 100644 --- a/topics/persistence.md +++ b/topics/persistence.md @@ -275,7 +275,7 @@ add it in the authorized_keys file of your small VPS. You are ready to transfer backups in an automated fashion. Get at least two VPS in two different providers for best results. -It is important to understand that this systems can easily fail if not coded +It is important to understand that this system can easily fail if not coded in the right way. At least make absolutely sure that after the transfer is completed you are able to verify the file size (that should match the one of the file you copied) and possibly the SHA1 digest if you are using a VPS. From e014922ee60f553ff8c018f7e3210c8680590d51 Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 18 Mar 2015 17:42:54 +0100 Subject: [PATCH 0212/2314] CLUSTER COUNT-FAILURE-REPORTS command documented. --- commands.json | 12 ++++++++++++ commands/cluster count-failure-reports.md | 22 ++++++++++++++++++++++ 2 files changed, 34 insertions(+) create mode 100644 commands/cluster count-failure-reports.md diff --git a/commands.json b/commands.json index 8fd1a0af76..74985c34fc 100644 --- a/commands.json +++ b/commands.json @@ -240,6 +240,18 @@ "since": "3.0.0", "group": "cluster" }, + "CLUSTER COUNT-FAILURE-REPORTS": { + "summary": "Return the number of failure reports active for a given node", + "complexity": "O(N) where N is the number of failure reports", + "arguments": [ + { + "name": "node-id", + "type": "string" + } + ], + "since": "3.0.0", + "group": "cluster" + }, "CLUSTER COUNTKEYSINSLOT": { "summary": "Return the number of local keys in the specified hash slot", "complexity": "O(1)", diff --git a/commands/cluster count-failure-reports.md b/commands/cluster count-failure-reports.md new file mode 100644 index 0000000000..e6475fe6bc --- /dev/null +++ b/commands/cluster count-failure-reports.md @@ -0,0 +1,22 @@ +The command returns the number of *failure reports* for the specified node. +Failure reports are the way Redis Cluster uses in order to promote a +`PFAIL` state, that means a node is not reachable, to a `FAIL` state, +that means that the majority of masters in the cluster agreed within +a window of time that the node is not reachable. + +A few more details: + +* A node flags another node with `PFAIL` when the node is not reachable for a time greater than the configured *node timeout*, which is a fundamental configuration parameter of a Redis Cluster. +* Nodes in `PFAIL` state are provided in gossip sections of heartbeat packets. +* Every time a node processes gossip packets from other nodes, it creates (and refreshes the TTL if needed) **failure reports**, remembering that a given node said another given node is in `PFAIL` condition. +* Each failure report has a time to live of two times the *node timeout* time. +* If at a given time a node has another node flagged with `PFAIL`, and at the same time collected the majority of other master nodes *failre reports* about this node (including itself if it is a master), then it elevates the failure state of the node from `PFAIL` to `FAIL`, and broadcasts a message forcing all the nodes that can be reached to flag the node as `FAIL`. + +This command returns the number of failure reports for the current node which are currently not expired (so received within two times the *node timeout* time). The count does not include what the node we are asking this count belives about the node ID we pass as argument, the count *only* includes the failure reports the node received from other nodes. + +This command is mainly useful for debugging, when the failure detector of +Redis Cluster is not operating as we believe it should. + +@return + +@integer-reply: the number of active failure reports for the node. From 0558e9d34af7b004b44c265404a06824af8a3693 Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 19 Mar 2015 15:28:08 +0100 Subject: [PATCH 0213/2314] CLUSTER SET-CONFIG-EPOCH documented. --- commands.json | 12 ++++++++++++ commands/cluster set-config-epoch.md | 25 +++++++++++++++++++++++++ 2 files changed, 37 insertions(+) create mode 100644 commands/cluster set-config-epoch.md diff --git a/commands.json b/commands.json index 74985c34fc..9ec536fa0d 100644 --- a/commands.json +++ b/commands.json @@ -363,6 +363,18 @@ "since": "3.0.0", "group": "cluster" }, + "CLUSTER SET-CONFIG-EPOCH": { + "summary": "Set the configuration epoch in a new node", + "complexity": "O(1)", + "arguments": [ + { + "name": "config-epoch", + "type": "integer" + } + ], + "since": "3.0.0", + "group": "cluster" + }, "CLUSTER SETSLOT": { "summary": "Bind an hash slot to a specific node", "complexity": "O(1)", diff --git a/commands/cluster set-config-epoch.md b/commands/cluster set-config-epoch.md new file mode 100644 index 0000000000..560bf778c8 --- /dev/null +++ b/commands/cluster set-config-epoch.md @@ -0,0 +1,25 @@ +This command sets a specific *config epoch* in a fresh node. It only works when: + +1. The nodes table of the node is empty. +2. The node current *config epoch* is zero. + +These prerequisites are needed since usually, manually altering the +configuration epoch of a node is unsafe, we want to be sure that the node with +the higher configuration epoch value (that is the last that failed over), wins +over other nodes in claiming the hash slots ownership. + +However there is an exception to this rule, and it is when a new +cluster is created from scratch. Redis Cluster *config epoch collision +resolution* algorithm can deal with new nodes all configured with the +same configuration at startup, but this process is slow and should be +the exception, only to make sure that whatever happens, two more more +nodes eventually always move away from the state of having the same +configuration epoch. + +So, using `CONFIG SET-CONFIG-EPOCH`, when a new cluster is crated, we can +assign a different progressive configuration epoch to each node before +joining the cluster together. + +@return + +@simple-string-reply: `OK` if the command was executed successfully, otherwise an error is returned. From 1fcf35d78564312401c2705cbd64296b9cbdb371 Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 19 Mar 2015 15:44:11 +0100 Subject: [PATCH 0214/2314] CLUSTER RESET documented. --- commands.json | 13 +++++++++++++ commands/cluster reset.md | 23 +++++++++++++++++++++++ 2 files changed, 36 insertions(+) create mode 100644 commands/cluster reset.md diff --git a/commands.json b/commands.json index 9ec536fa0d..9d9114432d 100644 --- a/commands.json +++ b/commands.json @@ -357,6 +357,19 @@ "since": "3.0.0", "group": "cluster" }, + "CLUSTER RESET": { + "summary": "Reset a Redis Cluster node", + "complexity": "O(N) where N is the number of known nodes. The command may execute a FLUSHALL as a side effect.", + "arguments": [ + { + "name": "reset-type", + "type": "enum", + "enum": ["HARD", "SOFT"] + } + ], + "since": "3.0.0", + "group": "cluster" + }, "CLUSTER SAVECONFIG": { "summary": "Forces the node to save cluster state on disk", "complexity": "O(1)", diff --git a/commands/cluster reset.md b/commands/cluster reset.md new file mode 100644 index 0000000000..2b4f0bd425 --- /dev/null +++ b/commands/cluster reset.md @@ -0,0 +1,23 @@ +Reset a Redis Cluster node, in a less or more drastic way depending on the +reset type, that can be **hard** or **soft**. Note that this command +**does not work for masters if they hold one or more keys**, in that case +to completely reset a master node there is to call `FLUSHALL` first, and then +`CLUSTER RESET`. + +Effects on the node: + +1. All the other nodes in the cluster are forget. +2. All the assigned / open slots are reset, so the hash slos to nodes map is totally cleared. +3. If the node is a slave, is turned into an (empty) master. Its dataset is flushed, so at the end the node will be an empty master. +4. **Hard reset only**: a new Node ID is generated. +5. **Hard reset only**: `currentEpoch` and `configEpoch` vars are set to 0. +6. The new configuration is presisted on disk in the node cluster configuration file. + +This command is mainly useful in order to re-provision a Redis Cluster node +in order to be used in the context of a new, different cluster. The command +is also extensively used by the Redis Cluster testing framework in order to +reset the state of the cluster every time a new test unit is executed. + +@return + +@simple-string-reply: `OK` if the command was successful. Otherwise an error is returned. From 3514d397265f96a64bb9f83ca7d3a1a7e7a422ba Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 19 Mar 2015 15:46:31 +0100 Subject: [PATCH 0215/2314] Reset type is optional in CLUSTER RESET. --- commands.json | 3 ++- commands/cluster reset.md | 2 ++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/commands.json b/commands.json index 9d9114432d..672b467baa 100644 --- a/commands.json +++ b/commands.json @@ -364,7 +364,8 @@ { "name": "reset-type", "type": "enum", - "enum": ["HARD", "SOFT"] + "enum": ["HARD", "SOFT"], + "optional": true } ], "since": "3.0.0", diff --git a/commands/cluster reset.md b/commands/cluster reset.md index 2b4f0bd425..f45819cfb0 100644 --- a/commands/cluster reset.md +++ b/commands/cluster reset.md @@ -18,6 +18,8 @@ in order to be used in the context of a new, different cluster. The command is also extensively used by the Redis Cluster testing framework in order to reset the state of the cluster every time a new test unit is executed. +If no reset type is specified, the default is **soft**. + @return @simple-string-reply: `OK` if the command was successful. Otherwise an error is returned. From 5ac677429b430fe159f01d0a120c41b713458dbd Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Thu, 19 Mar 2015 23:03:39 +0100 Subject: [PATCH 0216/2314] Remove unnecessary comma --- commands/cluster set-config-epoch.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands/cluster set-config-epoch.md b/commands/cluster set-config-epoch.md index 560bf778c8..0e1be3b724 100644 --- a/commands/cluster set-config-epoch.md +++ b/commands/cluster set-config-epoch.md @@ -5,7 +5,7 @@ This command sets a specific *config epoch* in a fresh node. It only works when: These prerequisites are needed since usually, manually altering the configuration epoch of a node is unsafe, we want to be sure that the node with -the higher configuration epoch value (that is the last that failed over), wins +the higher configuration epoch value (that is the last that failed over) wins over other nodes in claiming the hash slots ownership. However there is an exception to this rule, and it is when a new From d3149e09218ebd93c4cc3656bb1fa55b906f3c3a Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Thu, 19 Mar 2015 23:05:15 +0100 Subject: [PATCH 0217/2314] =?UTF-8?q?Typo:=20crated=20=E2=86=92=20created?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- commands/cluster set-config-epoch.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands/cluster set-config-epoch.md b/commands/cluster set-config-epoch.md index 0e1be3b724..00c79d697b 100644 --- a/commands/cluster set-config-epoch.md +++ b/commands/cluster set-config-epoch.md @@ -16,7 +16,7 @@ the exception, only to make sure that whatever happens, two more more nodes eventually always move away from the state of having the same configuration epoch. -So, using `CONFIG SET-CONFIG-EPOCH`, when a new cluster is crated, we can +So, using `CONFIG SET-CONFIG-EPOCH`, when a new cluster is created, we can assign a different progressive configuration epoch to each node before joining the cluster together. From ab9834ae70e54bbb655ff5af17ed9f233344f03c Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 20 Mar 2015 10:52:27 +0100 Subject: [PATCH 0218/2314] CLUSTER FAILOVER documented. --- commands.json | 14 ++++++++++++ commands/cluster failover.md | 41 ++++++++++++++++++++++++++++++++++++ 2 files changed, 55 insertions(+) create mode 100644 commands/cluster failover.md diff --git a/commands.json b/commands.json index 672b467baa..6a88e1be09 100644 --- a/commands.json +++ b/commands.json @@ -277,6 +277,20 @@ "since": "3.0.0", "group": "cluster" }, + "CLUSTER FAILOVER": { + "summary": "Forces a slave to perform a manual failover of its master.", + "complexity": "O(1)", + "arguments": [ + { + "name": "options", + "type": "enum", + "enum": ["FORCE"], + "optional": true + } + ], + "since": "3.0.0", + "group": "cluster" + }, "CLUSTER FORGET": { "summary": "Remove a node from the nodes table", "complexity": "O(1)", diff --git a/commands/cluster failover.md b/commands/cluster failover.md new file mode 100644 index 0000000000..899ab1e9fe --- /dev/null +++ b/commands/cluster failover.md @@ -0,0 +1,41 @@ +This command, that can only be send to a Redis Cluster slave node, forces +the slave to start a manual failover of its master instance. + +A manual failover is a special kind of failover that is usually executed when +there are no actual failures, but we wish to swap the current master with one +of its slaves (which is the node we send the command to), in a safe way, +without any window for data loss. It works in the following way: + +1. The slave tells the master to stop porcessing queries from clients. +2. The master replies to the slave with the current *replication offset*. +3. The slave waits for the replication offset to match on its side, to make sure it processed all the data from the slave before to continue. +4. The slave starts a failover, obtains a new configuration epoch from the majority of the masters, and broadcast the new configuration. +5. The old master receives the configuration update: unblocks its clients and start replying with redirection messages so that they'll continue the chat with the new master. + +This way clients are moved away from the old master to the new master +atomically and only when the slave that is turning in the new master +processed all the replication stream from the old master. + +If the **FORCE** option is given, the slave does not perform any handshake +with the master, that may be not reachable, but instead just starts a +failover ASAP starting from point 4. This is useful when we want to start +a manual failover while the master is no longer reachable. + +Note that a manual failover is different than a normal failover triggered +by the Redis Cluster failure detection algorithm in a few ways: + +1. The data validity of the slave is not checked. Even if the slave has not recently updated data, it will failover anyway if we use `CLUSTER FAILOVER FORCE`. +2. There is no random delay before the failover starts. + +Note that currently a manual failover is not able to promote a slave into +master if it can't receive votes from the majority of masters in order to +create a new unique configuration epoch number. + +`CLUSTER FAILOVER` does not execute a failover synchronously, it only +*schedules* a manual failover, bypassing the failure detection stage, so to +check if the failover actually happened, `CLUSTER NODES` or other means +should be used to check the state change. + +@return + +@simple-string-reply: `OK` if the command was accepted and a manual failover is going to be attempted. An error if the operation cannot be executed, for example if we are talking with a node which is already a master. From aa7af7a059624def8159ea99b39f1ac8474cc334 Mon Sep 17 00:00:00 2001 From: Pascal Borreli Date: Fri, 20 Mar 2015 09:59:23 +0000 Subject: [PATCH 0219/2314] Fixed typos --- commands/cluster count-failure-reports.md | 4 ++-- commands/cluster failover.md | 2 +- commands/cluster info.md | 2 +- commands/cluster reset.md | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/commands/cluster count-failure-reports.md b/commands/cluster count-failure-reports.md index e6475fe6bc..ac1ef71c0e 100644 --- a/commands/cluster count-failure-reports.md +++ b/commands/cluster count-failure-reports.md @@ -10,9 +10,9 @@ A few more details: * Nodes in `PFAIL` state are provided in gossip sections of heartbeat packets. * Every time a node processes gossip packets from other nodes, it creates (and refreshes the TTL if needed) **failure reports**, remembering that a given node said another given node is in `PFAIL` condition. * Each failure report has a time to live of two times the *node timeout* time. -* If at a given time a node has another node flagged with `PFAIL`, and at the same time collected the majority of other master nodes *failre reports* about this node (including itself if it is a master), then it elevates the failure state of the node from `PFAIL` to `FAIL`, and broadcasts a message forcing all the nodes that can be reached to flag the node as `FAIL`. +* If at a given time a node has another node flagged with `PFAIL`, and at the same time collected the majority of other master nodes *failure reports* about this node (including itself if it is a master), then it elevates the failure state of the node from `PFAIL` to `FAIL`, and broadcasts a message forcing all the nodes that can be reached to flag the node as `FAIL`. -This command returns the number of failure reports for the current node which are currently not expired (so received within two times the *node timeout* time). The count does not include what the node we are asking this count belives about the node ID we pass as argument, the count *only* includes the failure reports the node received from other nodes. +This command returns the number of failure reports for the current node which are currently not expired (so received within two times the *node timeout* time). The count does not include what the node we are asking this count believes about the node ID we pass as argument, the count *only* includes the failure reports the node received from other nodes. This command is mainly useful for debugging, when the failure detector of Redis Cluster is not operating as we believe it should. diff --git a/commands/cluster failover.md b/commands/cluster failover.md index 899ab1e9fe..f6c767703d 100644 --- a/commands/cluster failover.md +++ b/commands/cluster failover.md @@ -6,7 +6,7 @@ there are no actual failures, but we wish to swap the current master with one of its slaves (which is the node we send the command to), in a safe way, without any window for data loss. It works in the following way: -1. The slave tells the master to stop porcessing queries from clients. +1. The slave tells the master to stop processing queries from clients. 2. The master replies to the slave with the current *replication offset*. 3. The slave waits for the replication offset to match on its side, to make sure it processed all the data from the slave before to continue. 4. The slave starts a failover, obtains a new configuration epoch from the majority of the masters, and broadcast the new configuration. diff --git a/commands/cluster info.md b/commands/cluster info.md index 51ccfbf94f..f8138486f9 100644 --- a/commands/cluster info.md +++ b/commands/cluster info.md @@ -32,4 +32,4 @@ More information about the Current Epoch and Config Epoch variables are availabl @return -@bulk-string-reply: A map between named fields and values in the form of `:` lines separated by newlines compoesd by the two bytes `CRLF`. +@bulk-string-reply: A map between named fields and values in the form of `:` lines separated by newlines composed by the two bytes `CRLF`. diff --git a/commands/cluster reset.md b/commands/cluster reset.md index f45819cfb0..1873fc6d28 100644 --- a/commands/cluster reset.md +++ b/commands/cluster reset.md @@ -7,11 +7,11 @@ to completely reset a master node there is to call `FLUSHALL` first, and then Effects on the node: 1. All the other nodes in the cluster are forget. -2. All the assigned / open slots are reset, so the hash slos to nodes map is totally cleared. +2. All the assigned / open slots are reset, so the hash slots to nodes map is totally cleared. 3. If the node is a slave, is turned into an (empty) master. Its dataset is flushed, so at the end the node will be an empty master. 4. **Hard reset only**: a new Node ID is generated. 5. **Hard reset only**: `currentEpoch` and `configEpoch` vars are set to 0. -6. The new configuration is presisted on disk in the node cluster configuration file. +6. The new configuration is persisted on disk in the node cluster configuration file. This command is mainly useful in order to re-provision a Redis Cluster node in order to be used in the context of a new, different cluster. The command From 8ab8cf9c9d697ce2ea14a22c7fffbfbc3e1d7353 Mon Sep 17 00:00:00 2001 From: antirez Date: Sat, 21 Mar 2015 12:11:42 +0100 Subject: [PATCH 0220/2314] Added a note about config epochs in CLUSTER NODES. --- commands/cluster nodes.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/commands/cluster nodes.md b/commands/cluster nodes.md index 11bb0d1a01..9d318bea25 100644 --- a/commands/cluster nodes.md +++ b/commands/cluster nodes.md @@ -54,6 +54,16 @@ Meaning of the flags (field number 3): * **noaddr** No address known for this node. * **noflags** No flags at all. +## Notes on published config epochs + +Slaves broadcast their master's config epochs (in order to get an `UPDATE` +message if they are found to be stale), so the real config epoch of the +slave (which is meaningless more or less, since they don't serve hash slots) +can be only obtained checking the node flagged as `myself`, which is the entry +of the node we are asking to generate `CLUSTER NODES` output. The other slaves +epochs reflect what they publish in heartbeat packets, which is, the +configuration epoch of the masters they are currently replicating. + ## Special slot entries Normally hash slots associated to a given node are in one of the following formats, From ac98bc20ea7afa5b41d7b673cdea4c34635128a9 Mon Sep 17 00:00:00 2001 From: Damian Janowski Date: Sat, 21 Mar 2015 18:29:34 -0300 Subject: [PATCH 0221/2314] Fix spell checker warning. --- Rakefile | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/Rakefile b/Rakefile index 1c00b9962b..b6f2dc7655 100644 --- a/Rakefile +++ b/Rakefile @@ -20,7 +20,12 @@ task :spellcheck do `mkdir -p tmp` IO.popen("aspell --lang=en create master ./tmp/dict", "w") do |io| - io.puts(JSON.parse(File.read("commands.json")).keys.map(&:split).flatten.join("\n")) + words = JSON.parse(File.read("commands.json")). + keys. + map { |str| str.split(/[ -]/) }. + flatten(1) + + io.puts(words.join("\n")) io.puts(File.read("wordlist")) end From f257415df3ff7a8761315ce55847dee5886d491d Mon Sep 17 00:00:00 2001 From: Damian Janowski Date: Sat, 21 Mar 2015 18:30:13 -0300 Subject: [PATCH 0222/2314] Fix typo in CLUSTER SET-CONFIG-EPOCH. --- commands/cluster set-config-epoch.md | 2 +- wordlist | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/commands/cluster set-config-epoch.md b/commands/cluster set-config-epoch.md index 00c79d697b..4c12458457 100644 --- a/commands/cluster set-config-epoch.md +++ b/commands/cluster set-config-epoch.md @@ -16,7 +16,7 @@ the exception, only to make sure that whatever happens, two more more nodes eventually always move away from the state of having the same configuration epoch. -So, using `CONFIG SET-CONFIG-EPOCH`, when a new cluster is created, we can +So, using `CONFIG SET-CONFIG-EPOCH`, when a new cluster is created, we can assign a different progressive configuration epoch to each node before joining the cluster together. diff --git a/wordlist b/wordlist index 36b0d414a5..7ba32f391c 100644 --- a/wordlist +++ b/wordlist @@ -27,6 +27,7 @@ boolean btw cardinality checksum +config dataset datasets decrement @@ -50,6 +51,7 @@ pipelining scalable semantical snapshotting +startup subcommand subcommands substring @@ -61,4 +63,4 @@ unordered unsubscribe unsubscribed unsubscribes -unwatch/ES +unwatch From d476a5e5719cdf95c0f388b97b9769e66e76ee04 Mon Sep 17 00:00:00 2001 From: antirez Date: Sun, 22 Mar 2015 17:12:14 +0100 Subject: [PATCH 0223/2314] Document the TAKEOVER option of CLUSTER FAILOVER. --- commands.json | 2 +- commands/cluster failover.md | 45 +++++++++++++++++++++++++++--------- 2 files changed, 35 insertions(+), 12 deletions(-) diff --git a/commands.json b/commands.json index 6a88e1be09..78efa036c2 100644 --- a/commands.json +++ b/commands.json @@ -284,7 +284,7 @@ { "name": "options", "type": "enum", - "enum": ["FORCE"], + "enum": ["FORCE","TAKEOVER"], "optional": true } ], diff --git a/commands/cluster failover.md b/commands/cluster failover.md index 899ab1e9fe..f238a79502 100644 --- a/commands/cluster failover.md +++ b/commands/cluster failover.md @@ -16,25 +16,48 @@ This way clients are moved away from the old master to the new master atomically and only when the slave that is turning in the new master processed all the replication stream from the old master. +## FORCE option: manual failover when the master is down + +The command behavior can be modified by two options: **FORCE** and **TAKEOVER**. + If the **FORCE** option is given, the slave does not perform any handshake with the master, that may be not reachable, but instead just starts a failover ASAP starting from point 4. This is useful when we want to start a manual failover while the master is no longer reachable. -Note that a manual failover is different than a normal failover triggered -by the Redis Cluster failure detection algorithm in a few ways: +However using **FORCE** we still need the majority of masters to be available +in order to authorize the failover and genereate a new configuration epoch +for the slave that is going to become master. + +## TAKEOVER option: manual failover without cluster consensus + +There are situations where this is not enough, and we want a slave to failover +without any agreement with the rest of the cluster. A real world use case +for this is to mass promote slaves in a different data center to masters +in order to perform a data center switch, while all the masters are down +or partitioned away. + +For **TAKEOVER** option implies everything **FORCE** implies, but also does +not uses any cluster authorization in order to failover. A slave receiving +`CLUSTER FAILOVER TAKEOVER` will instead: + +1. Generate a new `configEpoch` unilaterally, just taking the current greatest epoch available and incrementing it if its local configuration epoch is not already the greatest. +2. Assign itself all the hash slots of its master, and propagate the new configuraiton to every node which is reachable ASAP, and eventually to every other node. + +Note that **TAKEOVER violates the last-failover-wins principle** of Redis Cluster, since the configuration epoch generated by the slave violates the normal generation of configuration epochs in several ways: + +1. There is no guarantee that it is actually the higher configuration epoch, since, for example, we can use the **TAKEOVER** option within a minority, nor any message exchange is performed to generate the new configuration epoch. +2. If we generate a configuration epoch which happens to collide with another instance, eventually our configuration epoch, or the one of another instance with our same epoch, will be moved away using the *configuration epoch collision resolution algorithm*. -1. The data validity of the slave is not checked. Even if the slave has not recently updated data, it will failover anyway if we use `CLUSTER FAILOVER FORCE`. -2. There is no random delay before the failover starts. +Because of this the **TAKEOVER** option should be used with care. -Note that currently a manual failover is not able to promote a slave into -master if it can't receive votes from the majority of masters in order to -create a new unique configuration epoch number. +## Implementation details and notes -`CLUSTER FAILOVER` does not execute a failover synchronously, it only -*schedules* a manual failover, bypassing the failure detection stage, so to -check if the failover actually happened, `CLUSTER NODES` or other means -should be used to check the state change. +`CLUSTER FAILOVER`, unless the **TAKEOVER** option is specified, does not +execute a failover synchronously, it only *schedules* a manual failover, +bypassing the failure detection stage, so to check if the failover actually +happened, `CLUSTER NODES` or other means should be used in order to verify +that the state of the cluster changes after some time the command was sent. @return From 13f1b7c3d5f651f27d9fcb837418afda713728c0 Mon Sep 17 00:00:00 2001 From: Juarez Bochi Date: Mon, 23 Mar 2015 16:30:47 -0300 Subject: [PATCH 0224/2314] Fix parameter name in CONFIG SET --- commands/config set.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands/config set.md b/commands/config set.md index dd6a936e8e..85969a7968 100644 --- a/commands/config set.md +++ b/commands/config set.md @@ -34,7 +34,7 @@ save 300 10 that means, save after 900 seconds if there is at least 1 change to the dataset, and after 300 seconds if there are at least 10 changes to the datasets, should -be set using `CONFIG SET` as "900 1 300 10". +be set using `CONFIG SET SAVE "900 1 300 10"`. It is possible to switch persistence from RDB snapshotting to append-only file (and the other way around) using the `CONFIG SET` command. From 31c4157627d53c699a32dcbd868430674cd7f5bf Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Thu, 19 Mar 2015 23:32:38 +0100 Subject: [PATCH 0225/2314] Some wording changed in CLUSTER RESET --- commands/cluster reset.md | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/commands/cluster reset.md b/commands/cluster reset.md index 1873fc6d28..5eb4e0fac7 100644 --- a/commands/cluster reset.md +++ b/commands/cluster reset.md @@ -1,19 +1,19 @@ -Reset a Redis Cluster node, in a less or more drastic way depending on the +Reset a Redis Cluster node, in a more or less drastic way depending on the reset type, that can be **hard** or **soft**. Note that this command **does not work for masters if they hold one or more keys**, in that case -to completely reset a master node there is to call `FLUSHALL` first, and then -`CLUSTER RESET`. +to completely reset a master node keys must be removed first, e.g. by using `FLUSHALL` first, +and then `CLUSTER RESET`. Effects on the node: -1. All the other nodes in the cluster are forget. -2. All the assigned / open slots are reset, so the hash slots to nodes map is totally cleared. -3. If the node is a slave, is turned into an (empty) master. Its dataset is flushed, so at the end the node will be an empty master. +1. All the other nodes in the cluster are forgotten. +2. All the assigned / open slots are reset, so the slots-to-nodes mapping is totally cleared. +3. If the node is a slave it is turned into an (empty) master. Its dataset is flushed, so at the end the node will be an empty master. 4. **Hard reset only**: a new Node ID is generated. 5. **Hard reset only**: `currentEpoch` and `configEpoch` vars are set to 0. 6. The new configuration is persisted on disk in the node cluster configuration file. -This command is mainly useful in order to re-provision a Redis Cluster node +This command is mainly useful to re-provision a Redis Cluster node in order to be used in the context of a new, different cluster. The command is also extensively used by the Redis Cluster testing framework in order to reset the state of the cluster every time a new test unit is executed. From 17f6661970c6ebf2cd6c55e4733eca34d9ec96af Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Mon, 23 Mar 2015 21:30:17 +0100 Subject: [PATCH 0226/2314] Correct link to DECR command Closes #357 --- topics/data-types-intro.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/topics/data-types-intro.md b/topics/data-types-intro.md index b4a1e0473b..67de359aba 100644 --- a/topics/data-types-intro.md +++ b/topics/data-types-intro.md @@ -109,7 +109,7 @@ you can perform with them. For instance, one is atomic increment: The [INCR](/commands/incr) command parses the string value as an integer, increments it by one, and finally sets the obtained value as the new value. There are other similar commands like [INCRBY](/commands/incrby), -[DECR](commands/decr) and [DECRBY](/commands/decrby). Internally it's +[DECR](/commands/decr) and [DECRBY](/commands/decrby). Internally it's always the same command, acting in a slightly different way. What does it mean that INCR is atomic? @@ -492,7 +492,7 @@ Example of rule 2: > lpop mylist "1" > exists mylist - (integer) 0 + (integer) 0 The key no longer exists after all the elements are popped. From 7b204dba518766200089d62307558d0ef06b2ad9 Mon Sep 17 00:00:00 2001 From: jerational Date: Tue, 24 Mar 2015 23:38:20 -0700 Subject: [PATCH 0227/2314] Update distlock.md --- topics/distlock.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/topics/distlock.md b/topics/distlock.md index c60456d3a4..5e6cbeff87 100644 --- a/topics/distlock.md +++ b/topics/distlock.md @@ -20,7 +20,7 @@ complex or alternative designs. Implementations --- -Before to describe the algorithm, here there are a few links at implementations +Before describing the algorithm, here there are a few links at implementations already available, that can be used as a reference. * [Redlock-rb](https://github.com/antirez/redlock-rb) (Ruby implementation). @@ -64,7 +64,7 @@ If this is the case, you can use your replication based solution. Otherwise we s Correct implementation with a single instance --- -Before to try to overcome the limitation of the single instance setup described above, let’s check how to do it correctly in this simple case, since this is actually a viable solution in applications where a race condition from time to time is acceptable, and because locking into a single instance is the foundation we’ll use for the distributed algorithm described here. +Before trying to overcome the limitation of the single instance setup described above, let’s check how to do it correctly in this simple case, since this is actually a viable solution in applications where a race condition from time to time is acceptable, and because locking into a single instance is the foundation we’ll use for the distributed algorithm described here. To acquire the lock, the way to go is the following: From c15c429e80d010217ce39fe429a2ddf4b9d820ec Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 25 Mar 2015 14:12:25 +0100 Subject: [PATCH 0228/2314] Added Redlock-rb fork link. --- topics/distlock.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/distlock.md b/topics/distlock.md index c60456d3a4..c131f29b62 100644 --- a/topics/distlock.md +++ b/topics/distlock.md @@ -23,7 +23,7 @@ Implementations Before to describe the algorithm, here there are a few links at implementations already available, that can be used as a reference. -* [Redlock-rb](https://github.com/antirez/redlock-rb) (Ruby implementation). +* [Redlock-rb](https://github.com/antirez/redlock-rb) (Ruby implementation). There is also a [fork of Redlock-rb](https://github.com/leandromoreira/redlock-rb) that adds a gem for easy distribution and perhaps more. * [Redlock-py](https://github.com/SPSCommerce/redlock-py) (Python implementation). * [Redlock-php](https://github.com/ronnylt/redlock-php) (PHP implementation). * [Redsync.go](https://github.com/hjr265/redsync.go) (Go implementation). From 5f57a67fe0ecc7d09a5cf17748253c07f13ceacf Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 26 Mar 2015 10:23:16 +0100 Subject: [PATCH 0229/2314] Cluster specification update. --- topics/cluster-spec.md | 603 ++++++++++++++++++++++++++--------------- 1 file changed, 381 insertions(+), 222 deletions(-) diff --git a/topics/cluster-spec.md b/topics/cluster-spec.md index 64c7133ea4..b84a0f8beb 100644 --- a/topics/cluster-spec.md +++ b/topics/cluster-spec.md @@ -1,4 +1,9 @@ -Redis cluster Specification (work in progress) +Welcome to the **Redis Cluster Specification**. Here you'll find information +about algorithms and design rationales of Redis Cluster. This document is a work +in progress as it is continuously synchronized with the actual implementation +of Redis. + +Overview and rational of the design === Redis Cluster goals @@ -8,9 +13,9 @@ Redis Cluster is a distributed implementation of Redis with the following goals, * High performance and linear scalability up to 1000 nodes. There are no proxies, asynchronous replication is used, and no merge operations are performed on values. * Acceptable degree of write safety: the system tries (in a best-effort way) to retain all the writes originating from clients connected with the majority of the master nodes. Usually there are small windows where acknowledged writes can be lost. Windows to lose acknowledged writes are larger when clients are in a minority partition. -* Availability: Redis Cluster is able to survive to partitions where the majority of the master nodes are reachable and there is at least a reachable slave for every master node that is no longer reachable. +* Availability: Redis Cluster is able to survive to partitions where the majority of the master nodes are reachable and there is at least a reachable slave for every master node that is no longer reachable. Moreover using *replicas migration*, masters no longer replicated by any slave, will receive one from a master which is covered by multiple slaves. -What is described in this document is implemented in the `unstable` branch of the GitHub Redis repository. Redis Cluster has now entered the beta stage, so new betas are released every month and can be found in the [download page](http://redis.io/download) of the Redis web site. +What is described in this document is implemented in Redis 3.0 or greater. Implemented subset --- @@ -26,7 +31,7 @@ manual reshardings, multi-key operations may become unavailable for some time while single keys operations are always available. Redis Cluster does not support multiple databases like the stand alone version -of Redis, there is just database 0, and `SELECT` is not allowed. +of Redis, there is just database 0, and the `SELECT` command is not allowed. Clients and Servers roles in the Redis cluster protocol --- @@ -34,50 +39,56 @@ Clients and Servers roles in the Redis cluster protocol In Redis cluster nodes are responsible for holding the data, and taking the state of the cluster, including mapping keys to the right nodes. Cluster nodes are also able to auto-discover other nodes, detect non working -nodes, and performing slave nodes election to master when needed. +nodes, and performing slave nodes promotion to master when needed, in order +to continue to operate when a failure occurs. To perform their tasks all the cluster nodes are connected using a -TCP bus and a binary protocol (the **cluster bus**). +TCP bus and a binary protocol, called the **Redis Cluster Bus**. Every node is connected to every other node in the cluster using the cluster bus. Nodes use a gossip protocol to propagate information about the cluster in order to discover new nodes, to send ping packets to make sure all the other nodes are working properly, and to send cluster messages needed to signal specific conditions. The cluster bus is also used in order to -propagate Pub/Sub messages across the cluster. +propagate Pub/Sub messages across the cluster, and to orchestrate manual +failovers when requested by users (manual failovers are failovers which +are not initiated by the Redis Cluster failure detector, but by the +system administrator directly). -Since cluster nodes are not able to proxy requests clients may be redirected +Since cluster nodes are not able to proxy requests, clients may be redirected to other nodes using redirection errors `-MOVED` and `-ASK`. The client is in theory free to send requests to all the nodes in the cluster, -getting redirected if needed, so the client is not required to take the +getting redirected if needed, so the client is not required to hold the state of the cluster. However clients that are able to cache the map between keys and nodes can improve the performance in a sensible way. Write safety --- -Redis Cluster uses asynchronous replication between nodes, and last-elected-master-dataset-wins implicit merge function, so there are always windows when it is possible to lose writes during partitions. However these windows are very different in the case of a client that is connected to the majority of masters, and a client that is connected to the minority of masters. +Redis Cluster uses asynchronous replication between nodes, and **last failover wins** implicit merge function. It means that the last elected master dataset eventually replaces all the other replicas. This means that there are always windows when it is possible to lose writes during partitions. However these windows are very different in the case of a client that is connected to the majority of masters, and a client that is connected to the minority of masters. -Redis Cluster tries hard to retain all the writes that are performed by clients connected to the majority of masters, with two exceptions: +Redis Cluster tries harder to retain writes that are performed by clients connected to the majority of masters, compared to writes performed into the minority +side. The following are examples of scenarios that lead to lose of acknowledged +writes received in the majority partitions, during failures: -1) A write may reach a master, but while the master may be able to reply to the client, the write may not be propagated to slaves via the asynchronous replication used between master and slave nodes. If the master dies without the write reaching the slaves, the write is lost forever in case the master is unreachable for a long enough period that one of its slaves is promoted. +1. A write may reach a master, but while the master may be able to reply to the client, the write may not be propagated to slaves via the asynchronous replication used between master and slave nodes. If the master dies without the write reaching the slaves, the write is lost forever in case the master is unreachable for a long enough period that one of its slaves is promoted. This is usually hard to observe in case of total, sudden failure of a master node, since masters try to reply to clients (with the acknowledge of the write) and slaves (propagating the write) about at the same time. However it is a real world failure mode. -2) Another theoretically possible failure mode where writes are lost is the following: +2. Another theoretically possible failure mode where writes are lost is the following: * A master is unreachable because of a partition. * It gets failed over by one of its slaves. * After some time it may be reachable again. -* A client with a not updated routing table may write to it before the master is converted to a slave (of the new master) by the cluster. +* A client with a not updated routing table may write to the old master before it is converted into a slave (of the new master) by the cluster. -The second failure mode is unlikely to happen because master nodes not able to communicate with the majority of the other masters for enough time to be failed over, no longer accept writes, and when the partition is fixed writes are still refused for a small amount of time to allow other nodes to inform about configuration changes. +The second failure mode is unlikely to happen because master nodes not able to communicate with the majority of the other masters for enough time to be failed over, no longer accept writes, and when the partition is fixed writes are still refused for a small amount of time to allow other nodes to inform about configuration changes. This failure mode also requires that the client has a yet not updated table. -Redis Cluster loses a non trivial amount of writes on partitions where there is a minority of masters and at least one or more clients, since all the writes sent to the masters may potentially get lost if the masters are failed over in the majority side. +Writes targeting the minority side of a partition has fairly larger windows to get lost. For example Redis Cluster loses a non trivial amount of writes on partitions where there is a minority of masters and at least one or more clients, since all the writes sent to the masters may potentially get lost if the masters are failed over in the majority side. -Specifically, for a master to be failed over, it must be not reachable by the majority of masters for at least `NODE_TIMEOUT`, so if the partition is fixed before that time, no write is lost. When the partition lasts for more than `NODE_TIMEOUT`, the minority side of the cluster will start refusing writes as soon as `NODE_TIMEOUT` time has elapsed, so there is a maximum window after which the minority becomes no longer available, hence no write is accepted and lost after that time. +Specifically, for a master to be failed over, it must be not reachable by the majority of masters for at least `NODE_TIMEOUT`, so if the partition is fixed before that time, no write is lost. When the partition lasts for more than `NODE_TIMEOUT`, all the writes performed in the minority side up to that point may be lost. However the minority side of a Redis Cluster will start refusing writes as soon as `NODE_TIMEOUT` time has elapsed without contact with the majority, so there is a maximum window after which the minority becomes no longer available, hence no write is accepted and lost after that time. Availability --- -Redis Cluster is not available in the minority side of the partition. In the majority side of the partition assuming that there are at least the majority of masters and a slave for every unreachable master, the cluster returns available after `NODE_TIMEOUT` time, plus a few more seconds required for a slave to get elected and failover its master. +Redis Cluster is not available in the minority side of the partition. In the majority side of the partition assuming that there are at least the majority of masters and a slave for every unreachable master, the cluster returns available after `NODE_TIMEOUT` time, plus a few more seconds required for a slave to get elected and failover its master (failovers usually are executed in a matter of 1 or 2 seconds). This means that Redis Cluster is designed to survive to failures of a few nodes in the cluster, but is not a suitable solution for applications that require availability in the event of large net splits. @@ -88,6 +99,8 @@ For example in a cluster with 5 nodes and a single slave per node, there is a `1 Thanks to a Redis Cluster feature called **replicas migration** the Cluster availability is improved in many real world scenarios by the fact that replicas migrate to orphaned masters (masters no longer having replicas). +So at every successful failure event, the cluster may reconfigure the slaves +layout in order to resist better to the next failure. Performance --- @@ -96,11 +109,11 @@ In Redis Cluster nodes don't proxy commands to the right node in charge for a gi Eventually clients obtain an up to date representation of the cluster and which node serves which subset of keys, so during normal operations clients directly contact the right nodes in order to send a given command. -Because of the use of asynchronous replication, nodes does not wait for other nodes acknowledgment of writes (optional synchronous replication is a work in progress and will be likely added in future releases). +Because of the use of asynchronous replication, nodes does not wait for other nodes acknowledgment of writes (if not explicitly requested using the `WAIT` command). Also, because multiple keys commands are only limited to *near* keys, data is never moved between nodes if not in case of resharding. -So normal operations are handled exactly as in the case of a single Redis instance. This means that in a Redis Cluster with N master nodes you can expect the same performance as a single Redis instance multiplied by N as the design allows to scale linearly. At the same time the query is usually performed in a single round trip, since clients usually retain persistent connections with the nodes, so latency figures are also the same as the single stand alone Redis node case. +So normal operations are handled exactly as in the case of a single Redis instance. This means that in a Redis Cluster with N master nodes you can expect the same performance as a single Redis instance multiplied by N as the design allows to scale linearly. At the same time the query is usually performed in a single round trip, since clients usually retain persistent connections with the nodes, so latency figures are also the same as the single standalone Redis node case. Very high performances and scalability while preserving weak but reasonable forms of data safety and availability is the main goal of @@ -111,19 +124,29 @@ Why merge operations are avoided Redis Cluster design avoids conflicting versions of the same key-value pair in multiple nodes since in the case of the Redis data model this is not always desirable: values in Redis are often very large, it is common to see lists or sorted sets with millions of elements. Also data types are semantically complex. Transferring and merging these kind of values can be a major bottleneck and/or may require a non trivial involvement of application-side logic, additional memory to store meta-data, and so forth. +There are no strict technological limits here, CRDTs or synchronously replicated +state machines can model complex data types similar to Redis, however the +actual run time behavior of such systems would not be similar to Redis Cluster. +Redis Cluster was designed in order to cover the exact use cases of the +non clustered Redis version. + +Overview of Redis Cluster main components +=== + Keys distribution model --- The key space is split into 16384 slots, effectively setting an upper limit -for the cluster size of 16384 nodes (however the suggested max size of +for the cluster size of 16384 master nodes (however the suggested max size of nodes is in the order of ~ 1000 nodes). -All the master nodes will handle a percentage of the 16384 hash slots. +Each master nodes in a cluster handles a subset of the 16384 hash slots. When the cluster is **stable**, that means that there is no a cluster reconfiguration in progress (where hash slots are moved from one node to another) a single hash slot will be served exactly by a single node (however the serving node can have one or more slaves that will replace -it in the case of net splits or failures). +it in the case of net splits or failures, and that can be used in order +to scale read operations where reading stale data is acceptable). The base algorithm used to map keys to hash slots is the following (read the next paragraph for the hash tag exception to this rule): @@ -153,21 +176,22 @@ Keys hash tags --- There is an exception for the computation of the hash slot that is used in order -to implement **hash tags**. Hash tags are a way to ensure that two keys +to implement **hash tags**. Hash tags are a way to ensure that multiple keys are allocated in the same hash slot. This is used in order to implement multi-key operations in Redis Cluster. -In order to implement hash tags, the hash slot is computed in a different -way. Basically if the key contains a "{...}" pattern only the substring between +In order to implement hash tags, the hash slot for a key is computed in a +slightly different way in certain conditions. +Basically if the key contains a "{...}" pattern only the substring between `{` and `}` is hashed in order to obtain the hash slot. However since it is possible that there are multiple occurrences of `{` or `}` the algorithm is well specified by the following rules: -* If the key contains a `{` character. -* There is a `}` character on the right of `{` -* There are one or more characters between the first occurrence of `{` and the first occurrence of `}` after the first occurrence of `{`. +* IF the key contains a `{` character. +* AND IF there is a `}` character on the right of `{` +* AND IF there are one or more characters between the first occurrence of `{` and the first occurrence of `}`. -Then instead of hashing the key, only what is between the first occurrence of `{` and the first occurrence of `}` on its right are hashed. +Then instead of hashing the key, only what is between the first occurrence of `{` and the following first occurrence of `}` is hashed. Examples: @@ -224,38 +248,43 @@ hex representation of a 160 bit random number, obtained the first time a node is started (usually using /dev/urandom). The node will save its ID in the node configuration file, and will use the same ID forever, or at least as long as the node configuration file is not -deleted by the system administrator. +deleted by the system administrator, or an *hard reset* is requested +via the `CLUSTER RESET` command. The node ID is used to identify every node across the whole cluster. It is possible for a given node to change IP and address without any need to also change the node ID. The cluster is also able to detect the change -in IP/port and reconfigure broadcast the information using the gossip -protocol running over the cluster bus. +in IP/port and reconfigure using the gossip protocol running over the cluster +bus. + +The node ID is not the only information associated with each node, but is +the only one that is always globally consistent. Every node has also the +following set of information associated. Some information is about the +cluster configuration detail of this specific node, and is eventually +consistent across the cluster. Some other information, like the last time +a node was pinged, is instead local to each node. -Every node has other associated information that all the other nodes -know: +This is a list of information each node has associated in each other node +that knows it: The node ID, IP and port of the node, a set of flags, what is +the master of the node if it is flagged as `slave`, last time the node +was pinged and the last time the pong was received, the current *configuration +epoch* of the node (explained later in this specification), the link state +and finally the set of hash slots served. -* The IP address and TCP port where the node is located. -* A set of flags. -* A set of hash slots served by the node. -* Last time we sent a ping packet using the cluster bus. -* Last time we received a pong packet in reply. -* The time at which we flagged the node as failing. -* The number of slaves of this node. -* The master node ID, if this node is a slave (or 0000000... if it is a master). +A detailed [explanation of all the node fields](http://redis.io/commands/cluster-nodes) is described in the `CLUSTER NODES` documentation. -Some of this information is available using the `CLUSTER NODES` command that -can be sent to all the nodes in the cluster, both master and slave nodes. +The `CLUSTER NODES` command, that can be sent to each the nodes in the cluster, provides as output the state of the cluster and the informations for each node +according to the local view the queries node has of the cluster. The following is an example of output of `CLUSTER NODES` sent to a master node in a small cluster of three nodes. $ redis-cli cluster nodes - d1861060fe6a534d42d8a19aeb36600e18785e04 127.0.0.1:6379 myself - 0 1318428930 connected 0-1364 - 3886e65cc906bfd9b1f7e7bde468726a052d1dae 127.0.0.1:6380 master - 1318428930 1318428931 connected 1365-2729 - d289c575dcbc4bdd2931585fd4339089e461a27d 127.0.0.1:6381 master - 1318428931 1318428931 connected 2730-4095 + d1861060fe6a534d42d8a19aeb36600e18785e04 127.0.0.1:6379 myself - 0 1318428930 1 connected 0-1364 + 3886e65cc906bfd9b1f7e7bde468726a052d1dae 127.0.0.1:6380 master - 1318428930 1318428931 2 connected 1365-2729 + d289c575dcbc4bdd2931585fd4339089e461a27d 127.0.0.1:6381 master - 1318428931 1318428931 3 connected 2730-4095 -In the above listing the different fields are in order: node id, address:port, flags, last ping sent, last pong received, link state, slots. +In the above listing the different fields are in order: node id, address:port, flags, last ping sent, last pong received, configuration epoch, link state, slots. Details about the above fields will be covered as soon as we talk of specific parts of Redis Cluster. The Cluster bus --- @@ -287,17 +316,18 @@ These TCP connections are kept alive all the time and are not created on demand. When a node expects a pong reply in response to a ping in the cluster bus, before to wait for enough time to mark the node as unreachable, it will try to refresh the connection with the node by reconnecting from scratch. -While Redis Cluster nodes form a full mesh, nodes use a gossip protocol and +While Redis Cluster nodes form a full mesh, **nodes use a gossip protocol and a configuration update mechanism in order to avoid exchanging too many -packets between nodes during normal conditions. +messages between nodes during normal conditions**, so the number of message +exchanged is not exponential. Nodes handshake --- Nodes always accept connection in the cluster bus port, and even reply to pings when received, even if the pinging node is not trusted. -However all the other packets will be discarded by the node if the node -is not considered part of the cluster. +However all the other packets will be discarded by the receiving node if the +sending node is not considered part of the cluster. A node will accept another node as part of the cluster only in two ways: @@ -313,84 +343,106 @@ This means that as long as we join nodes in any connected graph, they'll eventua This mechanism makes the cluster more robust but prevents that different Redis clusters will accidentally mix after change of IP addresses or other network related events. -All the nodes actively try to connect to all the other known nodes if the link is down. +Redirection and resharding +=== MOVED Redirection --- A Redis client is free to send queries to every node in the cluster, including slave nodes. The node will analyze the query, and if it is acceptable -(that is, only a single key is mentioned in the query) it will see what -node is responsible for the hash slot where the key belongs. +(that is, only a single key is mentioned in the query, or the multiple keys +mentioned are all to the same hash slot) it will lookup what +node is responsible for the hash slot where the key or keys belong. If the hash slot is served by the node, the query is simply processed, otherwise -the node will check its internal hash slot -> node ID map and will reply -to the client with a MOVED error. - -A MOVED error is like the following: +the node will check its internal hash slot to node map, and will reply +to the client with a MOVED error, like in the following example: GET x -MOVED 3999 127.0.0.1:6381 The error includes the hash slot of the key (3999) and the ip:port of the instance that can serve the query. The client need to reissue the query -to the specified ip address and port. Note that even if the client waits -a long time before reissuing the query, and in the meantime the cluster -configuration changed, the destination node will reply again with a MOVED -error if the hash slot 3999 is now served by another node. +to the specified node, specified by IP address and port. +Note that even if the client waits a long time before reissuing the query, +and in the meantime the cluster configuration changed, the destination node +will reply again with a MOVED error if the hash slot 3999 is now served by +another node. The same happens if the contacted node had no updated information. So while from the point of view of the cluster nodes are identified by IDs we try to simply our interface with the client just exposing a map -between hash slots and Redis nodes identified by ip:port pairs. +between hash slots and Redis nodes identified by IP:port pairs. The client is not required to, but should try to memorize that hash slot 3999 is served by 127.0.0.1:6381. This way once a new command needs to be issued it can compute the hash slot of the target key and pick the right node with higher chances. -Note that when the Cluster is stable, eventually all the clients will obtain -a map of hash slots -> nodes, making the cluster efficient, with clients -directly addressing the right nodes without redirections nor proxies or -other single point of failure entities. +An alternative is to just refresh the whole client-side cluster layout +when a MOVED redirection is received, using the `CLUSTER NODES` or +`CLUSTER SLOTS` commands, since when a redirection is encountered, likely +multiple slots were reconfigured, not just one, so to update the configuration +ASAP for the client is often the best strategy. + +Note that when the Cluster is stable (no ongoing changes in the configuration), +eventually all the clients will obtain a map of hash slots -> nodes, making +the cluster efficient, with clients directly addressing the right nodes +without redirections nor proxies or other single point of failure entities. -A client should be also able to handle -ASK redirections that are described -later in this document. +A client **must be also able to handle -ASK redirections** that are described +later in this document, otherwise it is not a complete Redis Cluster client. Cluster live reconfiguration --- Redis cluster supports the ability to add and remove nodes while the cluster is running. Actually adding or removing a node is abstracted into the same -operation, that is, moving a hash slot from a node to another. +operation, that is, moving a hash slot from a node to another. This means +that the same basic mechanism can use in order to rebalance the cluster, add +or remove nodes, and so forth. * To add a new node to the cluster an empty node is added to the cluster and some hash slot is moved from existing nodes to the new node. * To remove a node from the cluster the hash slots assigned to that node are moved to other existing nodes. +* To rebalance the cluster a given set of hash slots are moved between nodes. -So the core of the implementation is the ability to move slots around. +So the core of the implementation is the ability to move hash slots around. Actually from a practical point of view a hash slot is just a set of keys, so what Redis cluster really does during *resharding* is to move keys from -an instance to another instance. +an instance to another instance. Moving an hash slot means moving all the keys +that happen to hash into this hash slot. To understand how this works we need to show the `CLUSTER` subcommands that are used to manipulate the slots translation table in a Redis cluster node. -The following subcommands are available: +The following subcommands are available (among others not useful in this case): -* CLUSTER ADDSLOTS slot1 [slot2] ... [slotN] -* CLUSTER DELSLOTS slot1 [slot2] ... [slotN] -* CLUSTER SETSLOT slot NODE node -* CLUSTER SETSLOT slot MIGRATING node -* CLUSTER SETSLOT slot IMPORTING node +* `CLUSTER ADDSLOTS` slot1 [slot2] ... [slotN] +* `CLUSTER DELSLOTS` slot1 [slot2] ... [slotN] +* `CLUSTER SETSLOT` slot NODE node +* `CLUSTER SETSLOT` slot MIGRATING node +* `CLUSTER SETSLOT` slot IMPORTING node The first two commands, `ADDSLOTS` and `DELSLOTS`, are simply used to assign -(or remove) slots to a Redis node. After the hash slots are assigned they -will propagate across all the cluster using the gossip protocol. -The `ADDSLOTS` command is usually used when a new cluster is configured -from scratch to assign slots to all the nodes in a fast way. +(or remove) slots to a Redis node. Assigning a slot means to tell a given +master node, that it will be in charge of storing and serving content for +the specified hash slot. + +After the hash slots are assigned they will propagate across all the cluster +using the gossip protocol, as specified later in the +*confiugration propagation* section. + +The `ADDSLOTS` command is usually used when a new cluster is created +from scratch to assign each master node a subset of all the 16384 hash +slots available. + +The `DELSLOTS` is mainly used for manual modification of a cluster configuration +or for debugging tasks: in practice it is rarely used. The `SETSLOT` subcommand is used to assign a slot to a specific node ID if -the `NODE` form is used. Otherwise the slot can be set in the two special -states `MIGRATING` and `IMPORTING`: +the `SETSLOT NODE` form is used. Otherwise the slot can be set in the +two special states `MIGRATING` and `IMPORTING`. Those two special states +are used in order to migrate an hash slot from one node to another. * When a slot is set as MIGRATING, the node will accept all the requests for queries that are about this hash slot, but only if the key in question @@ -398,12 +450,12 @@ exists, otherwise the query is forwarded using a `-ASK` redirection to the node that is target of the migration. * When a slot is set as IMPORTING, the node will accept all the requests for queries that are about this hash slot, but only if the request is -preceded by an ASKING command. Otherwise if not ASKING command was given +preceded by an `ASKING` command. Otherwise if not `ASKING` command was given by the client, the query is redirected to the real hash slot owner via -a `-MOVED` redirection error. +a `-MOVED` redirection error, like would happen normally. -At first this may appear strange, but now we'll make it more clear. -Assume that we have two Redis nodes, called A and B. +Let's make this more clear with an example of hash slot migration. +Assume that we have two Redis master nodes, called A and B. We want to move hash slot 8 from A to B, so we issue commands like this: * We send B: CLUSTER SETSLOT 8 IMPORTING A @@ -414,20 +466,21 @@ they are queried with a key that belongs to hash slot 8, so what happens is that: * All the queries about already existing keys are processed by "A". -* All the queries about non existing keys in A are processed by "B". +* All the queries about non existing keys in A are processed by "B", because "A" will redirect clients to "B". This way we no longer create new keys in "A". -In the meantime, a special client that is called `redis-trib` and is -the Redis cluster configuration utility will make sure to migrate existing -keys from A to B. This is performed using the following command: +In the meantime, a special program used during reshardings, that is +called `redis-trib`, and is the default Redis cluster configuration utility, +will make sure to migrate existing keys in hash slot 8 from A to B. +This is performed using the following command: CLUSTER GETKEYSINSLOT slot count The above command will return `count` keys in the specified hash slot. -For every key returned, redis-trib sends node A a `MIGRATE` command, that +For every key returned, `redis-trib` sends node "A" a `MIGRATE` command, that will migrate the specified key from A to B in an atomic way (both instances -are locked for the time needed to migrate a key so there are no race -conditions). This is how `MIGRATE` works: +are locked for the time (usually very small time) needed to migrate a key so +there are no race conditions). This is how `MIGRATE` works: MIGRATE target_host target_port key target_database id timeout @@ -440,10 +493,15 @@ In Redis cluster there is no need to specify a database other than 0, but `MIGRATE` can be used for other tasks as well not involving Redis cluster so it is a general enough command. `MIGRATE` is optimized to be as fast as possible even when moving complex -keys such as long lists, but of course in Redis cluster reconfiguring the +keys such as long lists, but of course in Redis Cluster reconfiguring the cluster where big keys are present is not considered a wise procedure if there are latency constraints in the application using the database. +When finally the migration process is finished, the `SETSLOT NODE ` command is send to the two nodes involved in the migration in order to +set the slots in normal state again. Moreover the same command is usually +send to all the other instances in order to don't want for the natural +propagation of the new configuration across the cluster. + ASK redirection --- @@ -459,8 +517,8 @@ then B if needed. Since this happens only for one hash slot out of 16384 available the performance hit on the cluster is acceptable. However we need to force that client behavior, so in order to make sure -that clients will only try slot B after A was tried, node B will only -accept queries of a slot that is set as IMPORTING if the client send the +that clients will only try node B after A was tried, node B will only +accept queries of a slot that is set as IMPORTING if the client sends the ASKING command before sending the query. Basically the ASKING command set a one-time flag on the client that forces @@ -469,27 +527,32 @@ a node to serve a query about an IMPORTING slot. So the full semantics of the ASK redirection is the following, from the point of view of the client. -* If ASK redirection is received send only the query in object to the specified node. -* Start the query with the ASKING command. +* If ASK redirection is received send only the query that was redirected to the specified node, but continue sending the next queries to the old node. +* Start the redirected query with the ASKING command. * Don't update local client tables to map hash slot 8 to B for now. Once the hash slot 8 migration is completed, A will send a MOVED message and -the client may permanently map hash slot 8 to the new ip:port pair. +the client may permanently map hash slot 8 to the new IP and port pair. Note that however if a buggy client will perform the map earlier this is not -a problem since it will not send the ASKING command before the query and B -will redirect the client to A using a MOVED redirection error. +a problem since it will not send the ASKING command before issuing the query, +so B will redirect the client to A using a MOVED redirection error. + +Slots migration is explained in similar terms but with a different wording, +for the sake of redundancy in the documentation, in the `CLUSTER SETSLOT` +command documentation. Clients first connection and handling of redirections. --- While it is possible to have a Redis Cluster client implementation that does not -takes the slots configuration (the map between slot numbers and addresses of +remembers the slots configuration (the map between slot numbers and addresses of nodes serving it) in memory, and only works contacting random nodes waiting to be redirected, such a client would be very inefficient. Redis Cluster clients should try to be smart enough to memorize the slots -configuration. However this configuration does not *require* to be updated, -since contacting the wrong node will simply result in a redirection. +configuration. However this configuration does not *require* to be up to date, +since contacting the wrong node will simply result in a redirection, that will +trigger an update of the client view. Clients usually need to fetch a complete list of slots and mapped node addresses in two different moments: @@ -502,10 +565,10 @@ slot in its table, however this is usually not efficient since often the configuration of multiple slots is modified at once (for example if a slave is promoted to master, all the slots served by the old master will be remapped). It is much simpler to react to a `MOVED` redirection fetching the full map -of slots - nodes from scratch. +of slots to nodes from scratch. -In order to retrieve the slots configuration Redis Cluster offers (starting -with 3.0.0 beta-7) an alternative to the `CLUSTER NODES` command that does not +In order to retrieve the slots configuration Redis Cluster offers +an alternative to the `CLUSTER NODES` command that does not require parsing, and only provides the information strictly needed to clients. The new command is called `CLUSTER SLOTS` and provides an array of slots @@ -536,7 +599,7 @@ The following is an example of output of `CLUSTER SLOTS`: ``` The first two sub-elements of every element of the returned array are the -start-end slots of the range, the additional elements represent address-port +start-end slots of the range. The additional elements represent address-port pairs. The first address-port pair is the master serving the slot, and the additional address-port pairs are all the slaves serving the same slot that are not in an error condition (the FAIL flag is not set). @@ -546,10 +609,10 @@ For example the first element of the output says that slots from 5461 to 10922 to scale read-only load contacting the slave at 127.0.0.1:7004. `CLUSTER SLOTS` does not guarantee to return ranges that will cover all the -16k slots if the cluster is misconfigured, so clients should initialize the +16384 slots if the cluster is misconfigured, so clients should initialize the slots configuration map filling the target nodes with NULL objects, and report an error if the user will try to execute commands about keys -that belong to misconfigured (unassigned) slots. +that belong to unassigned slots. However before to return an error to the caller, when a slot is found to be be unassigned, the client should try to fetch the slots configuration @@ -563,9 +626,8 @@ For example the following operation is valid: MSET {user:1000}.name Angela {user:1000}.surname White -However multi-key operations become unavailable when a resharding of the -hash slot the keys are hashing to is being moved form a node to another -(because of a manual resharding). +However multi-key operations may become unavailable when a resharding of the +hash slot the keys belong in progress. More specifically, even during a resharding, the multi-key operations targeting keys that all exist and are still all in the same node (either @@ -575,6 +637,9 @@ Operations about keys that don't exist or are, during the resharding, split between the source and destination node, will generate a `-TRYAGAIN` error. The client can try the operation after some time, or report back the error. +As soon the migration of the specified hash slot has terminated, all the +multi key operations are available again for this hash slot. + Scaling reads using slave nodes --- @@ -585,7 +650,7 @@ in order to scale reads using the `READONLY` command. `READONLY` tells a Redis cluster slave node that the client is ok reading possibly stale data and is not interested in running write queries. -When the connection is in *readonly* mode, the cluster will send a redirection +When the connection is in readonly mode, the cluster will send a redirection to the client only in the context of an operation involving keys not served by the slave's master node. This may happen because: @@ -595,7 +660,7 @@ by the slave's master node. This may happen because: When this happens the client should update its hashslot map as explained in the previous sections. -The *readonly* state of the connection can be undone using the `READWRITE` command. +The readonly state of the connection can be cleared using the `READWRITE` command. Fault Tolerance === @@ -603,34 +668,39 @@ Fault Tolerance Nodes heartbeat and gossip messages --- -Nodes in the cluster exchange ping / pong packets. +Nodes in the cluster exchange ping / pong packets, generally just called +heartbeat packets. -Usually a node will ping a few random nodes every second so that the total number of ping packets send (and pong packets received) is a constant amount regardless of the number of nodes in the cluster. +Usually a node will ping a few random nodes every second so that the total number of ping packets send (and pong packets received) by each node is a constant amount regardless of the number of nodes in the cluster. However every node makes sure to ping every other node that we don't either sent a ping or received a pong for longer than half the `NODE_TIMEOUT` time. Before `NODE_TIMEOUT` has elapsed, nodes also try to reconnect the TCP link with another node to make sure nodes are not believed to be unreachable only because there is a problem in the current TCP connection. -The amount of messages exchanged can be bigger than O(N) if `NODE_TIMEOUT` is set to a small figure and the number of nodes (N) is very large, since every node will try to ping every other node for which we don't have fresh information for half the `NODE_TIMEOUT` time. +The amount of messages globally exchanged can be sensible if `NODE_TIMEOUT` is set to a small figure and the number of nodes (N) is very large, since every node will try to ping every other node for which we don't have fresh information for half the `NODE_TIMEOUT` time. For example in a 100 nodes cluster with a node timeout set to 60 seconds, every node will try to send 99 pings every 30 seconds, with a total amount of pings of 3.3 per second, that multiplied for 100 nodes is 330 pings per second in the total cluster. -There are ways to use the gossip information already exchanged by Redis Cluster to reduce the amount of messages exchanged in a significant way. For example we may ping within half `NODE_TIMEOUT` only nodes that are already reported to be in "possible failure" state (see later) by other nodes, and ping the other nodes that are reported as working only in a best-effort way within the limit of the few packets per second. However in real-world tests large clusters with very small `NODE_TIMEOUT` settings used to work reliably so this change will be considered in the future as actual deployments of large clusters will be tested. +There are ways to lower the number of messages, however no issue currently was +even reported with the bandwidth used by Redis Cluster failure detection, so +for now the obvious and direct design is used. Note that even in the above +example, the 330 packets per seconds exchanged are evenly divided among 100 +different nodes, so the traffic each node receives is acceptable. -Ping and Pong packets content +Heartbeat packets content --- -Ping and Pong packets contain a header that is common to all the kind of packets (for instance packets to request a vote), and a special Gossip Section that is specific of Ping and Pong packets. +Ping and pong packets contain a header that is common to all the kind of packets (for instance packets to request a failover vote), and a special Gossip Section that is specific of Ping and Pong packets. The common header has the following information: * Node ID, that is a 160 bit pseudorandom string that is assigned the first time a node is created and remains the same for all the life of a Redis Cluster node. -* The `currentEpoch` and `configEpoch` field, that are used in order to mount the distributed algorithms used by Redis Cluster (this is explained in details in the next sections). If the node is a slave the `configEpoch` is the last known `configEpoch` of the master. +* The `currentEpoch` and `configEpoch` field of the sending node, that are used in order to mount the distributed algorithms used by Redis Cluster (this is explained in details in the next sections). If the node is a slave the `configEpoch` is the last known `configEpoch` of its master. * The node flags, indicating if the node is a slave, a master, and other single-bit node information. -* A bitmap of the hash slots served by a given node, or if the node is a slave, a bitmap of the slots served by its master. -* Port: the sender TCP base port (that is, the port used by Redis to accept client commands, add 10000 to this to obtain the cluster port). -* State: the state of the cluster from the point of view of the sender (down or ok). -* The master node ID, if this is a slave. +* A bitmap of the hash slots served by the sending node, or if the node is a slave, a bitmap of the slots served by its master. +* The sender TCP base port (that is, the port used by Redis to accept client commands, add 10000 to this to obtain the cluster port). +* The state of the cluster from the point of view of the sender (down or ok). +* The master node ID of the sending node, if it is a slave. -Ping and pong packets contain a gossip section. This section offers to the receiver a view about what the sender node thinks about other nodes in the cluster. The gossip section only contains information about a few random nodes among the known nodes set of the sender. +Ping and pong packets also contain a gossip section. This section offers to the receiver a view of what the sender node thinks about other nodes in the cluster. The gossip section only contains information about a few random nodes among the set of known nodes of the sender. The amount of nodes mentioned in a gossip section is proportional to the cluster size. For every node added in the gossip section the following fields are reported: @@ -645,7 +715,7 @@ Failure detection Redis Cluster failure detection is used to recognize when a master or slave node is no longer reachable by the majority of nodes, and as a result of this event, either promote a slave to the role of master, of when this is not possible, put the cluster in an error state to stop receiving queries from clients. -Every node takes a list of flags associated with other known nodes. There are two flags that are used for failure detection that are called `PFAIL` and `FAIL`. `PFAIL` means _Possible failure_, and is a non acknowledged failure type. `FAIL` means that a node is failing and that this condition was confirmed by a majority of masters in a fixed amount of time. +As already mentioned, every node takes a list of flags associated with other known nodes. There are two flags that are used for failure detection that are called `PFAIL` and `FAIL`. `PFAIL` means *Possible failure*, and is a non acknowledged failure type. `FAIL` means that a node is failing and that this condition was confirmed by a majority of masters within a fixed amount of time. **PFAIL flag:** @@ -655,7 +725,7 @@ The concept of non reachability for a Redis Cluster node is that we have an **ac **FAIL flag:** -The `PFAIL` flag alone is just some local information every node has about other nodes, but it is not used in order to act and is not sufficient to trigger a slave promotion. For a node to be really considered down the `PFAIL` condition needs to be promoted to a `FAIL` condition. +The `PFAIL` flag alone is just a local information every node has about other nodes, but it is not used in order to act and is not sufficient to trigger a slave promotion. For a node to be really considered down the `PFAIL` condition needs to be escalated to a `FAIL` condition. As outlined in the node heartbeats section of this document, every node sends gossip messages to every other node including the state of a few random known nodes. So every node eventually receives the set of node flags for every other node. This way every node has a mechanism to signal other nodes about failure conditions they detected. @@ -663,36 +733,38 @@ This mechanism is used in order to escalate a `PFAIL` condition to a `FAIL` cond * Some node, that we'll call A, has another node B flagged as `PFAIL`. * Node A collected, via gossip sections, information about the state of B from the point of view of the majority of masters in the cluster. -* The majority of masters signaled the `PFAIL` or `PFAIL` condition within `NODE_TIMEOUT * FAIL_REPORT_VALIDITY_MULT` time. +* The majority of masters signaled the `PFAIL` or `PFAIL` condition within `NODE_TIMEOUT * FAIL_REPORT_VALIDITY_MULT` time. (The validity factor is set to 2 in the current implementation, so this is just two times the `NODE_TIMEOUT` time). If all the above conditions are true, Node A will: * Mark the node as `FAIL`. * Send a `FAIL` message to all the reachable nodes. -The `FAIL` message will force every receiving node to mark the node in `FAIL` state. +The `FAIL` message will force every receiving node to mark the node in `FAIL` state, whatever or not it already flagged the node in `PFAIL` state. Note that *the FAIL flag is mostly one way*, that is, a node can go from `PFAIL` to `FAIL`, but for the `FAIL` flag to be cleared there are only two possibilities: * The node is already reachable, and it is a slave. In this case the `FAIL` flag can be cleared as slaves are not failed over. * The node is already reachable, and it is a master not serving any slot. In this case the `FAIL` flag can be cleared as masters without slots do not really participate to the cluster, and are waiting to be configured in order to join the cluster. -* The node is already reachable, is a master, but a long time (N times the `NODE_TIMEOUT`) has elapsed without any detectable slave promotion. - -**While the `PFAIL` -> `FAIL` transition uses a form of agreement, the agreement used is weak:** +* The node is already reachable, is a master, but a long time (N times the `NODE_TIMEOUT`) has elapsed without any detectable slave promotion. Better for it to rejoin the cluster and continue in this case. -1) Nodes collect views of other nodes during some time, so even if the majority of master nodes need to "agree", actually this is just state that we collected from different nodes at different times and we are not sure this state is stable. +In is useful to note that While the `PFAIL` -> `FAIL` transition uses a form of agreement, the agreement used is weak: -2) While every node detecting the `FAIL` condition will force that condition on other nodes in the cluster using the `FAIL` message, there is no way to ensure the message will reach all the nodes. For instance a node may detect the `FAIL` condition and because of a partition will not be able to reach any other node. +1. Nodes collect views of other nodes during some time, so even if the majority of master nodes need to "agree", actually this is just state that we collected from different nodes at different times and we are not sure, nor we require, that at a given moment the majority of masters agreed. However we discard failure reports which are old, so the failure was signaled by the majority of masters within a window of time. +2. While every node detecting the `FAIL` condition will force that condition on other nodes in the cluster using the `FAIL` message, there is no way to ensure the message will reach all the nodes. For instance a node may detect the `FAIL` condition and because of a partition will not be able to reach any other node. -However the Redis Cluster failure detection has liveness requirement: eventually all the nodes should agree about the state of a given node even in case of partitions, once the partitions heal. There are two cases that can originate from split brain conditions, either some minority of nodes believe the node is in `FAIL` state, or a minority of nodes believe the node is not in `FAIL` state. In both the cases eventually the cluster will have a single view of the state of a given node: +However the Redis Cluster failure detection has a liveness requirement: eventually all the nodes should agree about the state of a given node. There are two cases that can originate from split brain conditions, either some minority of nodes believe the node is in `FAIL` state, or a minority of nodes believe the node is not in `FAIL` state. In both the cases eventually the cluster will have a single view of the state of a given node: -**Case 1**: If an actual majority of masters flagged a node as `FAIL`, for the chain effect every other node will flag the master as `FAIL` eventually. +**Case 1**: If an actual majority of masters flagged a node as `FAIL`, because of the failure detector and the *chain effect* it generates, every other node will flag the master as `FAIL` eventually, since in the specified window of time enough failures will be reported. **Case 2**: When only a minority of masters flagged a node as `FAIL`, the slave promotion will not happen (as it uses a more formal algorithm that makes sure everybody will know about the promotion eventually) and every node will clear the `FAIL` state for the `FAIL` state clearing rules above (no promotion after some time > of N times the `NODE_TIMEOUT`). -**Basically the `FAIL` flag is only used as a trigger to run the safe part of the algorithm** for the slave promotion. In theory a slave may act independently and start a slave promotion when its master is not reachable, and wait for the masters to refuse the provide acknowledgment if the master is actually reachable by the majority. However the added complexity of the `PFAIL -> FAIL` state, the weak agreement, and the `FAIL` message to force the propagation of the state in the shortest amount of time in the reachable part of the cluster, have practical advantages. Because of this mechanisms usually all the nodes will stop accepting writes about at the same time if the cluster is in an error condition, that is a desirable feature from the point of view of applications using Redis Cluster. Also not needed election attempts, initiated by slaves that can't reach its master for local problems (that is otherwise reachable by the majority of the other master nodes), are avoided. +**Basically the `FAIL` flag is only used as a trigger to run the safe part of the algorithm** for the slave promotion. In theory a slave may act independently and start a slave promotion when its master is not reachable, and wait for the masters to refuse to provide the acknowledgment, if the master is actually reachable by the majority. However the added complexity of the `PFAIL -> FAIL` state, the weak agreement, and the `FAIL` message to force the propagation of the state in the shortest amount of time in the reachable part of the cluster, have practical advantages. Because of this mechanisms usually all the nodes will stop accepting writes about at the same time if the cluster is in an error condition, that is a desirable feature from the point of view of applications using Redis Cluster. Also not needed election attempts, initiated by slaves that can't reach its master for local problems (that is otherwise reachable by the majority of the other master nodes), are avoided. -Cluster epoch +Configuration handling, propagation, and failovers +=== + +Cluster current epoch --- Redis Cluster uses a concept similar to the Raft algorithm "term". In Redis Cluster the term is called epoch instead, and it is used in order to give an incremental version to events, so that when multiple nodes provide conflicting information, it is possible for another node to understand which state is the most up to date. @@ -701,15 +773,15 @@ The `currentEpoch` is a 64 bit unsigned number. At node creation every Redis Cluster node, both slaves and master nodes, set the `currentEpoch` to 0. -Every time a ping or pong is received from another node, if the epoch of the sender (part of the cluster bus messages header) is greater than the local node epoch, then `currentEpoch` is updated to the sender epoch. +Every time a packet is received from another node, if the epoch of the sender (part of the cluster bus messages header) is greater than the local node epoch, then `currentEpoch` is updated to the sender epoch. -Because of this semantics eventually all the nodes will agree to the greater epoch in the cluster. +Because of this semantics eventually all the nodes will agree to the greatest `configEpoch` in the cluster. -The way this information is used is when the state is changed and a node seeks agreement in order to perform some action. +The way this information is used is when the state of the cluster is changed and a node seeks agreement in order to perform some action. Currently this happens only during slave promotion, as described in the next section. Basically the epoch is a logical clock for the cluster and dictates whatever a given information wins over one with a smaller epoch. -Config epoch +Configuration epoch --- Every master always advertises its `configEpoch` in ping and pong packets along with a bitmap advertising the set of slots it serves. @@ -725,9 +797,10 @@ As explained in the next sections the `configEpoch` helps to resolve conflicts d Slave nodes also advertise the `configEpoch` field in ping and pong packets, but in case of slaves the field represents the `configEpoch` of its master the last time they exchanged packets. This allows other instances to detect when a slave has an old configuration that needs to be updated (Master nodes will not grant votes to slaves with an old configuration). -Every time the `configEpoch` changes for some known node, it is permanently stored in the nodes.conf file. +Every time the `configEpoch` changes for some known node, it is permanently stored in the nodes.conf file by all the nodes that received this information. The same also happens for the `currentEpoch` value. This two variables are guaranteed to be saved and `fsync-ed` to disk when updated before a node continues its operations. -Currently when a node is restarted its `currentEpoch` is set to the greatest `configEpoch` of the known nodes. This is not safe in a crash-recovery system model, and the system will be modified in order to store the currentEpoch in the persistent configuration as well. +New, incremental, and guaranteed to be unique `configEpoch` values are generated +using a simple algorithm during failovers. Slave election and promotion --- @@ -743,17 +816,20 @@ A slave starts an election when the following conditions are met: * The master was serving a non-zero number of slots. * The slave replication link was disconnected from the master for no longer than a given amount of time, in order to ensure to promote a slave with a reasonable data freshness. This time is user configurable. -In order to be elected the first step for a slave is to increment its `currentEpoch` counter, and request votes from master instances. +In order to be elected, the first step for a slave is to increment its `currentEpoch` counter, and request votes from master instances. -Votes are requested by the slave by broadcasting a `FAILOVER_AUTH_REQUEST` packet to every master node of the cluster. Then it waits for replies to arrive for a maximum time of two times the `NODE_TIMEOUT`, but always for at least for 2 seconds. +Votes are requested by the slave by broadcasting a `FAILOVER_AUTH_REQUEST` packet to every master node of the cluster. Then it waits for replies to arrive for a maximum time of two times the `NODE_TIMEOUT`, but always for at least 2 seconds. -Once a master voted for a given slave, replying positively with a `FAILOVER_AUTH_ACK`, it can no longer vote for another slave of the same master for a period of `NODE_TIMEOUT * 2`. In this period it will not be able to reply to other authorization requests for the same master. This is not needed to guarantee safety, but useful to avoid multiple slaves to get elected (even if with a different `configEpoch`) about at the same time. +Once a master voted for a given slave, replying positively with a `FAILOVER_AUTH_ACK`, it can no longer vote for another slave of the same master for a period of `NODE_TIMEOUT * 2`. In this period it will not be able to reply to other authorization requests for the same master. This is not needed to guarantee safety, but useful to avoid multiple slaves to get elected (even if with a different `configEpoch`) about at the same time, which is usually not wanted. A slave discards all the `AUTH_ACK` replies that are received having an epoch that is less than the `currentEpoch` at the time the vote request was sent, in order to never count as valid votes that are about a previous election. Once the slave receives ACKs from the majority of masters, it wins the election. Otherwise if the majority is not reached within the period of two times `NODE_TIMEOUT` (but always at least 2 seconds), the election is aborted and a new one will be tried again after `NODE_TIMEOUT * 4` (and always at least 4 seconds). +Slave rank +--- + A slave does not try to get elected as soon as the master is in `FAIL` state, but there is a little delay, that is computed as: DELAY = 500 milliseconds + random delay between 0 and 500 milliseconds + @@ -765,14 +841,17 @@ The random delay is used to desynchronize slaves so they'll likely start an elec The `SLAVE_RANK` is the rank of this slave regarding the amount of replication stream it processed from the master. Slaves exchange messages when the master -is failing in order to establish a rank: the slave with the most updated -replication offset is at rank 0, the second must updated at rank 1, and so forth. In this way the most updated slaves try to get elected before others. +is failing in order to establish a (best effort) rank: the slave with the most +updated replication offset is at rank 0, the second most updated at rank 1, and so forth. In this way the most updated slaves try to get elected before others. + +However if a slave of higher rank fails to be elected, the others will try +shortly, so the order is not enforced in a strict way. -Once a slave wins the election, it starts advertising itself as master in ping and pong packets, providing the set of served slots with a `configEpoch` set to the `currentEpoch` at which the election was started. +Once a slave wins the election, it obtains a new unique and incremental `configEpoch` which is higher than any other exisitng master. It starts advertising itself as master in ping and pong packets, providing the set of served slots with a `configEpoch` that will win over the past ones. -In order to speedup the reconfiguration of other nodes, a pong packet is broadcast to all the nodes of the cluster (however nodes not currently reachable will eventually receive a ping or pong packet and will be reconfigured). +In order to speedup the reconfiguration of other nodes, a pong packet is broadcast to all the nodes of the cluster (however nodes not currently reachable will eventually receive a ping or pong packet and will be reconfigured, or will receive an `UPDATE` pakcet is found not upadted by any other node). -The other nodes will detect that there is a new master serving the same slots served by the old master but with a greater `configEpoch`, and will upgrade the configuration. Slaves of the old master, or the failed over master that rejoins the cluster, will not just upgrade the configuration but will also configure to replicate from the new master. +The other nodes will detect that there is a new master serving the same slots served by the old master but with a greater `configEpoch`, and will upgrade the configuration. Slaves of the old master, or the failed over master that rejoins the cluster, will not just upgrade the configuration but will also configure to replicate from the new master. How nodes rejoining the cluster are configured is explained in one of the next sections. Masters reply to slave vote request --- @@ -783,88 +862,152 @@ Masters receive requests for votes in form of `FAILOVER_AUTH_REQUEST` requests f For a vote to be granted the following conditions need to be met: -* 1) A master only votes a single time for a given epoch, and refuses to vote for older epochs: every master has a lastVoteEpoch field and will refuse to vote again as long as the `currentEpoch` in the auth request packet is not greater than the lastVoteEpoch. When a master replies positively to a vote request, the lastVoteEpoch is updated accordingly. -* 2) A master votes for a slave only if the slave's master is flagged as `FAIL`. -* 3) Auth requests with a `currentEpoch` that is less than the master `currentEpoch` are ignored. Because of this the Master reply will always have the same `currentEpoch` as the auth request. If the same slave asks again to be voted, incrementing the `currentEpoch`, it is guaranteed that an old delayed reply from the master can not be accepted for the new vote. +1. A master only votes a single time for a given epoch, and refuses to vote for older epochs: every master has a lastVoteEpoch field and will refuse to vote again as long as the `currentEpoch` in the auth request packet is not greater than the lastVoteEpoch. When a master replies positively to a vote request, the lastVoteEpoch is updated accordingly, and safely stored on disk. +2. A master votes for a slave only if the slave's master is flagged as `FAIL`. +3. Auth requests with a `currentEpoch` that is less than the master `currentEpoch` are ignored. Because of this the Master reply will always have the same `currentEpoch` as the auth request. If the same slave asks again to be voted, incrementing the `currentEpoch`, it is guaranteed that an old delayed reply from the master can not be accepted for the new vote. -Example of the issue caused by not using this rule: +Example of the issue caused by not using rule number 3: Master `currentEpoch` is 5, lastVoteEpoch is 1 (this may happen after a few failed elections) * Slave `currentEpoch` is 3. * Slave tries to be elected with epoch 4 (3+1), master replies with an ok with `currentEpoch` 5, however the reply is delayed. -* Slave tries to be elected again, with epoch 5 (4+1), the delayed reply reaches to slave with `currentEpoch` 5, and is accepted as valid. +* Slave wll try to be elected again, at a latter time, with epoch 5 (4+1), the delayed reply reaches to slave with `currentEpoch` 5, and is accepted as valid. -* 4) Masters don't vote a slave of the same master before `NODE_TIMEOUT * 2` has elapsed since a slave of that master was already voted. This is not strictly required as it is not possible that two slaves win the election in the same epoch, but in practical terms it ensures that normally when a slave is elected it has plenty of time to inform the other slaves avoiding that another slave will win a new election. -* 5) Masters don't try to select the best slave in any way, simply if the slave's master is in `FAIL` state and the master did not voted in the current term, the positive vote is granted. However the best slave is the most likely to start the election and win it before the other slaves. -* 6) When a master refuses to vote for a given slave there is no negative response, the request is simply ignored. -* 7) Masters don't grant the vote to slaves sending a `configEpoch` that is less than any `configEpoch` in the master table for the slots claimed by the slave. Remember that the slave sends the `configEpoch` of its master, and the bitmap of the slots served by its master. What this means is basically that the slave requesting the vote must have a configuration, for the slots it wants to failover, that is newer or equal the one of the master granting the vote. +4. Masters don't vote a slave of the same master before `NODE_TIMEOUT * 2` has elapsed since a slave of that master was already voted. This is not strictly required as it is not possible that two slaves win the election in the same epoch, but in practical terms it ensures that normally when a slave is elected it has plenty of time to inform the other slaves avoiding that another slave will win a new election, doing a new unwanted failover. +5. Masters don't try to select the best slave in any way, simply if the slave's master is in `FAIL` state and the master did not voted in the current term, the positive vote is granted. However the best slave is the most likely to start the election and win it before the other slaves, since it usually will be able to start the voting process earlier, because if its *higher rank* as explained in the previous section. +6. When a master refuses to vote for a given slave there is no negative response, the request is simply ignored. +7. Masters don't grant the vote to slaves sending a `configEpoch` that is less than any `configEpoch` in the master table for the slots claimed by the slave. Remember that the slave sends the `configEpoch` of its master, and the bitmap of the slots served by its master. What this means is basically that the slave requesting the vote must have a configuration, for the slots it wants to failover, that is newer or equal the one of the master granting the vote. -Race conditions during slaves election +Practical example of configuration epoch usefulness during partitions --- This section illustrates how the concept of epoch is used to make the slave promotion process more resistant to partitions. * A master is no longer reachable indefinitely. The master has three slaves A, B, C. -* Slave A wins the election and is promoted as master. -* A partition makes A not available for the majority of the cluster. +* Slave A wins the election and is promoted to master. +* A network partition makes A not available for the majority of the cluster. * Slave B wins the election and is promoted as master. * A partition makes B not available for the majority of the cluster. * The previous partition is fixed, and A is available again. -At this point B is down, and A is available again and will compete with C that will try to get elected in order to fail over B. +At this point B is down, and A is available again, having a role of master (actually `UPDATE` messages would reconfigure it promptly, but here we assume all get lost). At the same time, slave C will try to get elected in order to fail over B. This is what happens: -Both will eventually claim to be promoted slaves for the same set of hash slots, however the `configEpoch` they publish will be different, and the C epoch will be greater, so all the other nodes will upgrade their configuration to C. +1. B will try to get elected and will succeed, since for the majority of masters its master is actually down. It will obtain a new incremental `configEpoch`. +2. A will not be able to claim to be the master for its hash slots, because the other nodes already have the same hash slots associated with an higher configuration epoch (the one of B) compared to the one published by A. +3. So, all the nodes will upgrade their table to assign the hash slots to C, and the cluster will continue its operations. -A itself will detect pings from C serving the same slots with a greater epoch and will reconfigure as a slave of C. +As you'll see in the next sections, actually a stale node rejoining a cluster +will usually get notified ASAP about the configuration change, since as soon +as it pings any other node, the receiver will detect it has stale information +and will send an `UPDATE` message. -Rules for server slots information propagation +Hash slots configuration propagation --- An important part of Redis Cluster is the mechanism used to propagate the information about which cluster node is serving a given set of hash slots. This is vital to both the startup of a fresh cluster and the ability to upgrade the configuration after a slave was promoted to serve the slots of its failing master. -Ping and Pong packets that instances continuously exchange contain a header that is used by the sender in order to advertise the hash slots it claims to be responsible for. This is the main mechanism used in order to propagate change, with the exception of a manual reconfiguration operated by the cluster administrator (for example a manual resharding via redis-trib in order to move hash slots among masters). +The same mechanism allows nodes partitioned away for an indefinite amount of +time to rejoin the cluster in a sensible way. + +The way hash slots configurations are propagate are basically two: + +1. Heartbeat messages. The sender of a ping or pong packet always adds information about the set of hash slots it (or its master, if it is a slave) servers. +2. `UPDATE` messages. Since in every heartbeat packet there are informations about the sender `configEpoch` and set of hash slots served, if a receiver of an heartbeat packet will find the sender information not updated, it will send a packet with the new information, forcing the stale node to update its info. + +The receiver of an heartbeat or `UPDATE` message uses certain simple rules in +order to update its table mapping hash slots to nodes. When a new Redis Cluster node is created, its local hash slot table is simple initialized to `NULL` entries, so that each hash slot is not bound, not linked to any node. Something like the following: -When a new Redis Cluster node is created, its local slot table, that maps a given hash slot with a given node ID, is initialized so that every hash slot is assigned to nil, that is, the hash slot is unassigned. +``` +0 -> NULL +1 -> NULL +2 -> NULL +... +16383 -> NULL +``` The first rule followed by a node in order to update its hash slot table is the following: -**Rule 1: If a hash slot is unassigned, and a known node claims it, I'll modify my hash slot table to associate the hash slot to this node.** +**Rule 1**: If a hash slot is unassigned (set to `NULL`), and a known node claims it, I'll modify my hash slot table associating the claimed hash slots to it. + +So if we receive an heartbeat from node A, claiming to serve hash slots 1 and 2 with a configuration epoch value of 3, the table will be modified into: + +``` +0 -> NULL +1 -> A [3] +2 -> A [3] +... +16383 -> NULL +``` + +Because of this rule, when a new cluster is created, it is only needed to manually assign (using the `CLUSTER ADDSLOTS` command, via the redis-trib command line tool, or by any other mean) the slots served by each master node to the node itself, and the information will rapidly propagate across the cluster. + +However this rule is not enough. We know that hash slot mapping can change +because of two events: + +1. A slave replaces its master during a failover. +2. A slot is resharded from a node to a different one. -Because of this rule, when a new cluster is created, it is only needed to manually assign (using the `CLUSTER` command, usually via the redis-trib command line tool) the slots served by each master node to the node itself, and the information will rapidly propagate across the cluster. +For now let's focus on failovers. When a slave failover its master, it obtains +a configuration epoch which is guaranteed to be greater than the one of its +master (and more generally greater than any other configuration epoch +generated before). For example node B, which is a slave of A, may failover +B with configuration epoch of 4. It will start to send heartbeat packets +(the first time mass-broadcasting cluster-wide), because of the following +second rule, receivers will update their tables: -However this rule is not enough when a configuration update happens because of a slave gets promoted to master after a master failure. The new master instance will advertise the slots previously served by the old slave, but those slots are not unassigned from the point of view of the other nodes, that will not upgrade the configuration if they just follow the first rule. +**Rule 2**: If a hash slot is already assigned, and a known node is advertising it using a `configEpoch` that is greater than the `configEpoch` of the master currently associated with the slot, I'll rebind the hash slot to the new node. -For this reason there is a second rule that is used in order to rebind a hash slot already assigned to a previous node to a new node claiming it. The rule is the following: +So after receiving messages from B that claims to serve hash slots 1 and 2 with configuration epoch of 4, the receivers will update their table in the following way: + +``` +0 -> NULL +1 -> B [4] +2 -> B [4] +... +16383 -> NULL +``` -**Rule 2: If a hash slot is already assigned, and a known node is advertising it using a `configEpoch` that is greater than the `configEpoch` advertised by the current owner of the slot, I'll rebind the hash slot to the new node.** +Liveness property: because of the second rule eventually all the nodes in the cluster will agree that the owner of a slot is the one with the greatest `configEpoch` among the nodes advertising it. -Because of the second rule eventually all the nodes in the cluster will agree that the owner of a slot is the one with the greatest `configEpoch` among the nodes advertising it. +This mechanism in Redis Cluster is called **last failover wins**. -UPDATE messages +The same happens during reshardings. When a node importing an hash slot +ends the import operation, its configuration epoch is incremented to make +sure the information will be updated in the cluster. + +UPDATE messages, a closer look --- -The described system for the propagation of hash slots configurations -only uses the normal ping and pong messages exchanged by nodes. +With the previous section in mind, it is easy know to check how update messages +work. Node A may rejoin the cluster after some time. It will send heartbeat +packets where it claims it serves hash slots 1 and 2 with configuration epoch +of 3. All the receivers with an updated information will instead see that +the same hash slots are associated with node B having an higher configuration +epoch. Because of this they'll send to A an `UPDATE` message with the new +configuration for the slots. A will update its configuration because of the +**rule 2** above. + +How nodes rejoin the cluster +--- -It also requires that there is a node that is either a slave or a master -for a given hash slot and has the updated configuration, because nodes -send their own configuration in pong and pong packets headers. +The same basic mechanism is also used in order for a node to rejoin a cluster +in a proper way. Continuing with the example above, node A will be notified +that hash slots 1 and 2 are now served by B. Assuming that these two were +the only hash slots served by A, the count of hash slots served by A will +drop to 0! So A will **reconfigure to be a slave of the new master**. -However sometimes a node may recover after a partition in a setup where -it is the only node serving a given hash slot, but with an old configuration. +The actual rule followed is a bit more complex than this. In general it may +happen that A rejoins after a lot of time, in the meantime it may happen that +hash slots originally served by A are served by multiple nodes, for example +hash slot 1 may be served by B, and hash slot 2 by C. -Example: a given hash slot is served by node A and B. A is the master, and at some point fails, so B is promoted as master. Later B fails as well, and the cluster has no way to recover since there are no more replicas for this hash slot. +So the actual *Redis Cluster node role switch rule* is: **A master node will change its configuration to replicate (be a slave of) the node that stolen its last hash slot**. -However A may recover some time later, and rejoin the cluster with an old configuration in which it was writable as a master. There is no replica that can update its configuration. This is the goal of UPDATE messages: when a node detects that another node is advertising its hash slots with an old configuration, it sends the node an UPDATE message with the ID of the new node serving the slots and the set of hash slots (send as a bitmap) that it is serving. +So during the reconfiguration eventually the number of served hash slots will drop to zero, and the node will reconfigure accordingly. Note that in the base case this just means that the old master will be a slave of the slave that replaced it after a failover. However in the general form the rule covers all the possible cases. -NOTE: while currently configuration updates via ping / pong and UPDATE share the -same code path, there is a functional overlap between the two in the way they -update a configuration of a node with stale information. However the two -mechanisms are both useful because ping / pong messages after some time are -able to populate the hash slots routing table of a new node, while UPDATE -messages are only sent when an old configuration is detected, and only -cover the information needed to fix the wrong configuration. +Slaves do exactly the same: they reconfigure to replicate to the node that +stolen the last hash slot of its former master. Replica migration --- @@ -957,26 +1100,35 @@ configEpoch conflicts resolution algorithm When new `configEpoch` values are created via slave promotions during failovers, they are guaranteed to be unique. -However during manual reshardings, when a hash slot is migrated from +However there are two distinct events where new configEpoch values are +created in an unsafe way, just incrementing the local `currentEpoch` of +the local node, hoping there are no conflicts at the same time. +Both the events are system-administrator triggered: + +1. `CLUSTER FAILOVER` command with `TAKEOVER` option is able to manually promote a slave node into a master *without the majority of masters being available*. This is useful, for example, in multi data center setups. +2. Migration of slots for cluster rebalancing also generates new configuration epochs inside the local node without agreement for performance reasons. + +Specifically, during manual reshardings, when a hash slot is migrated from a node A to a node B, the resharding program will force B to upgrade its configuration to an epoch which is the greatest found in the cluster, plus 1 (unless the node is already the one with the greatest configuration epoch), without to require for an agreement from other nodes. -This is needed so that the new slot configuration will win over the old one. - -This process happens when the system administrator performs a manual -resharding, however it is possible that when the slot is closed after -a resharding and the node assigns itself a new configuration epoch, -at the same time a failure happens, just before the new `configEpoch` is -propagated to the cluster. A slave may start a failover and obtain -the authorization. - -This scenario may lead to two nodes having the same `configEpoch`. There -are other scenarios as well ending with two nodes having the same `configEpoch`: - -* New cluster creation: all nodes start with the same `configEpoch` of 0. -* Possible software bugs. -* Manual editing of the configurations, filesystem corruptions. +Usually a real world resharding involves moving several hundred hash slots, +especially in small clusters, so to require an agreement to generate new +configuration epochs during reshardings, for each hash slot moved, is +inefficient. Moreover it requires an fsync every time in all the cluster nodes +in order to store the new configuration. Because of the way it is performed +instead, we need a new config epoch only when the first hash slot is moved +usually, making it much more efficient in production environments. + +However because of the two cases above, it is possible, while unlikely, to end +with multiple nodes having the same configuration epoch (think for example +a resharding operation performed by the system administrator, and a failover +happening at the same time, plus a lot of bad luck so that the `currentEpoch` +is not propagated fast enough to avoid a collision). + +Moreover software bugs and filesystem corruptions are other causes that may +lead to multiple nodes to have the same configuration epoch. When masters serving different hash slots have the same `configEpoch`, there are no issues, and we are more interested in making sure slaves @@ -984,22 +1136,24 @@ failing over a master have a different and unique configuration epoch. However manual interventions or more reshardings may change the cluster configuration in different ways. The Redis Cluster main liveness property -is that the slot configuration always converges, so we really want under every +is that the slot configurations always converges, so we really want under every condition that all the master nodes have a different `configEpoch`. -In order to enforce this, a conflicts resolution is used in the event -that two nodes end with the same `configEpoch`. +In order to enforce this, **a conflicts resolution algorithm** is used in the +event that two nodes end with the same `configEpoch`. * IF a master node detects another master node is advertising itself with the same `configEpoch`. * AND IF the node has a lexicographically smaller Node ID compared to the other node claiming the same `configEpoch`. * THEN it increments its `currentEpoch` by 1, and uses it as the new `configEpoch`. -If there are any set of nodes with the same `configEpoch`, all the nodes but the one with the greatest Node ID will move forward, guaranteeing that every node -will pick a unique configEpoch regardless of what happened. +If there are any set of nodes with the same `configEpoch`, all the nodes but the one with the greatest Node ID will move forward, guaranteeing that, eventually, every node will pick a unique configEpoch regardless of what happened. -This mechanism also guarantees that after a fresh cluster is created all -nodes start with a different `configEpoch`. +This mechanism also guarantees that after a fresh cluster is created, all +nodes start with a different `configEpoch`, even if this is not actually +used since `redis-trib` makes sure to use `CONFIG SET-CONFIG-EPOCH` at startup. +However if for some reason a node is left misconfigured, it will update +its configuration to a different configuration epoch automatically. Nodes reset --- @@ -1048,6 +1202,8 @@ The command does two things: The second operation is needed because Redis Cluster uses gossip in order to auto-discover nodes, so removing the node X from node A, could result into node B to gossip node X to A again. Because of the 60 seconds ban, the Redis Cluster administration tools have 60 seconds in order to remove the node from all the nodes, preventing the re-addition of the node because of auto discovery. +Further information is available in the `CLUSTER FORGET` documentation. + Publish/Subscribe === @@ -1059,6 +1215,9 @@ The current implementation will simply broadcast all the publish messages to all the other nodes, but at some point this will be optimized either using bloom filters or other algorithms. +Appendix +=== + Appendix A: CRC16 reference implementation in ANSI C --- From 6c12cbd2f912a80e20928418e0a5b448afbaa80a Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 26 Mar 2015 10:24:42 +0100 Subject: [PATCH 0230/2314] Cluster spec title :-) --- topics/cluster-spec.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/topics/cluster-spec.md b/topics/cluster-spec.md index b84a0f8beb..9e1c22796e 100644 --- a/topics/cluster-spec.md +++ b/topics/cluster-spec.md @@ -1,9 +1,12 @@ +Redis Cluster Specification +=== + Welcome to the **Redis Cluster Specification**. Here you'll find information about algorithms and design rationales of Redis Cluster. This document is a work in progress as it is continuously synchronized with the actual implementation of Redis. -Overview and rational of the design +Main properties and rationales of the design === Redis Cluster goals From fb8c3631218bad7ef8d33d6db8b124af787e88d2 Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Fri, 27 Mar 2015 12:44:32 +0100 Subject: [PATCH 0231/2314] Typos and small rewordings --- topics/cluster-spec.md | 74 +++++++++++++++++++++--------------------- 1 file changed, 37 insertions(+), 37 deletions(-) diff --git a/topics/cluster-spec.md b/topics/cluster-spec.md index 9e1c22796e..e197fda7a8 100644 --- a/topics/cluster-spec.md +++ b/topics/cluster-spec.md @@ -112,7 +112,7 @@ In Redis Cluster nodes don't proxy commands to the right node in charge for a gi Eventually clients obtain an up to date representation of the cluster and which node serves which subset of keys, so during normal operations clients directly contact the right nodes in order to send a given command. -Because of the use of asynchronous replication, nodes does not wait for other nodes acknowledgment of writes (if not explicitly requested using the `WAIT` command). +Because of the use of asynchronous replication, nodes do not wait for other nodes acknowledgment of writes (if not explicitly requested using the `WAIT` command). Also, because multiple keys commands are only limited to *near* keys, data is never moved between nodes if not in case of resharding. @@ -125,7 +125,7 @@ Redis Cluster. Why merge operations are avoided --- -Redis Cluster design avoids conflicting versions of the same key-value pair in multiple nodes since in the case of the Redis data model this is not always desirable: values in Redis are often very large, it is common to see lists or sorted sets with millions of elements. Also data types are semantically complex. Transferring and merging these kind of values can be a major bottleneck and/or may require a non trivial involvement of application-side logic, additional memory to store meta-data, and so forth. +Redis Cluster design avoids conflicting versions of the same key-value pair in multiple nodes as in the case of the Redis data model this is not always desirable: values in Redis are often very large, it is common to see lists or sorted sets with millions of elements. Also data types are semantically complex. Transferring and merging these kind of values can be a major bottleneck and/or may require a non trivial involvement of application-side logic, additional memory to store meta-data, and so forth. There are no strict technological limits here, CRDTs or synchronously replicated state machines can model complex data types similar to Redis, however the @@ -144,9 +144,9 @@ for the cluster size of 16384 master nodes (however the suggested max size of nodes is in the order of ~ 1000 nodes). Each master nodes in a cluster handles a subset of the 16384 hash slots. -When the cluster is **stable**, that means that there is no a cluster +When the cluster is **stable**, that means there is no cluster reconfiguration in progress (where hash slots are moved from one node -to another) a single hash slot will be served exactly by a single node +to another), a single hash slot will be served exactly by a single node (however the serving node can have one or more slaves that will replace it in the case of net splits or failures, and that can be used in order to scale read operations where reading stale data is acceptable). @@ -202,7 +202,7 @@ Examples: * For the key `foo{}{bar}` the whole key will be hashed as usually since the first occurrence of `{` is followed by `}` on the right without characters in the middle. * For the key `foo{{bar}}zap` the substring `{bar` will be hashed, because it is the substring between the first occurrence of `{` and the first occurrence of `}` on its right. * For the key `foo{bar}{zap}` the substring `bar` will be hashed, since the algorithm stops at the first valid or invalid (without bytes inside) match of `{` and `}`. -* What follows from the algorithm is that if the key starts with `{}`, it is guaranteed to be hashes as a whole. This is useful when using binary data as key names. +* What follows from the algorithm is that if the key starts with `{}`, it is guaranteed to be hashed as a whole. This is useful when using binary data as key names. Adding the hash tags exception, the following is an implementation of the `HASH_SLOT` function in Ruby and C language. @@ -251,7 +251,7 @@ hex representation of a 160 bit random number, obtained the first time a node is started (usually using /dev/urandom). The node will save its ID in the node configuration file, and will use the same ID forever, or at least as long as the node configuration file is not -deleted by the system administrator, or an *hard reset* is requested +deleted by the system administrator, or a *hard reset* is requested via the `CLUSTER RESET` command. The node ID is used to identify every node across the whole cluster. @@ -276,8 +276,8 @@ and finally the set of hash slots served. A detailed [explanation of all the node fields](http://redis.io/commands/cluster-nodes) is described in the `CLUSTER NODES` documentation. -The `CLUSTER NODES` command, that can be sent to each the nodes in the cluster, provides as output the state of the cluster and the informations for each node -according to the local view the queries node has of the cluster. +The `CLUSTER NODES` command, that can be sent to each of the nodes in the cluster, provides as output the state of the cluster and the information for each node +according to the local view the queried node has of the cluster. The following is an example of output of `CLUSTER NODES` sent to a master node in a small cluster of three nodes. @@ -316,7 +316,7 @@ Redis cluster is a full mesh where every node is connected with every other node In a cluster of N nodes, every node has N-1 outgoing TCP connections, and N-1 incoming connections. These TCP connections are kept alive all the time and are not created on demand. -When a node expects a pong reply in response to a ping in the cluster bus, before to wait for enough time to mark the node as unreachable, it will try to +When a node expects a pong reply in response to a ping in the cluster bus, before waiting long enough to mark the node as unreachable, it will try to refresh the connection with the node by reconnecting from scratch. While Redis Cluster nodes form a full mesh, **nodes use a gossip protocol and @@ -386,7 +386,7 @@ An alternative is to just refresh the whole client-side cluster layout when a MOVED redirection is received, using the `CLUSTER NODES` or `CLUSTER SLOTS` commands, since when a redirection is encountered, likely multiple slots were reconfigured, not just one, so to update the configuration -ASAP for the client is often the best strategy. +as soon as possible for the client is often the best strategy. Note that when the Cluster is stable (no ongoing changes in the configuration), eventually all the clients will obtain a map of hash slots -> nodes, making @@ -402,7 +402,7 @@ Cluster live reconfiguration Redis cluster supports the ability to add and remove nodes while the cluster is running. Actually adding or removing a node is abstracted into the same operation, that is, moving a hash slot from a node to another. This means -that the same basic mechanism can use in order to rebalance the cluster, add +that the same basic mechanism can be used in order to rebalance the cluster, add or remove nodes, and so forth. * To add a new node to the cluster an empty node is added to the cluster and some hash slot is moved from existing nodes to the new node. @@ -428,12 +428,12 @@ The following subcommands are available (among others not useful in this case): The first two commands, `ADDSLOTS` and `DELSLOTS`, are simply used to assign (or remove) slots to a Redis node. Assigning a slot means to tell a given -master node, that it will be in charge of storing and serving content for +master node that it will be in charge of storing and serving content for the specified hash slot. After the hash slots are assigned they will propagate across all the cluster using the gossip protocol, as specified later in the -*confiugration propagation* section. +*configuration propagation* section. The `ADDSLOTS` command is usually used when a new cluster is created from scratch to assign each master node a subset of all the 16384 hash @@ -508,8 +508,8 @@ propagation of the new configuration across the cluster. ASK redirection --- -In the previous section we briefly talked about ASK redirection, why we -can't simply use the MOVED redirection? Because while MOVED means that +In the previous section we briefly talked about ASK redirection, why can't +we simply use the MOVED redirection? Because while MOVED means that we think the hash slot is permanently served by a different node and the next queries should be tried against the specified node, ASK means to only ask the next query to the specified node. @@ -524,7 +524,7 @@ that clients will only try node B after A was tried, node B will only accept queries of a slot that is set as IMPORTING if the client sends the ASKING command before sending the query. -Basically the ASKING command set a one-time flag on the client that forces +Basically the ASKING command sets a one-time flag on the client that forces a node to serve a query about an IMPORTING slot. So the full semantics of the ASK redirection is the following, from the @@ -548,8 +548,8 @@ Clients first connection and handling of redirections. --- While it is possible to have a Redis Cluster client implementation that does not -remembers the slots configuration (the map between slot numbers and addresses of -nodes serving it) in memory, and only works contacting random nodes waiting to +remember the slots configuration (the map between slot numbers and addresses of +nodes serving it) in memory and only works contacting random nodes waiting to be redirected, such a client would be very inefficient. Redis Cluster clients should try to be smart enough to memorize the slots @@ -617,7 +617,7 @@ slots configuration map filling the target nodes with NULL objects, and report an error if the user will try to execute commands about keys that belong to unassigned slots. -However before to return an error to the caller, when a slot is found to be +However before returning an error to the caller, when a slot is found to be unassigned, the client should try to fetch the slots configuration again to check if the cluster is now configured properly. @@ -630,7 +630,7 @@ For example the following operation is valid: MSET {user:1000}.name Angela {user:1000}.surname White However multi-key operations may become unavailable when a resharding of the -hash slot the keys belong in progress. +hash slot the keys belong to is in progress. More specifically, even during a resharding, the multi-key operations targeting keys that all exist and are still all in the same node (either @@ -640,7 +640,7 @@ Operations about keys that don't exist or are, during the resharding, split between the source and destination node, will generate a `-TRYAGAIN` error. The client can try the operation after some time, or report back the error. -As soon the migration of the specified hash slot has terminated, all the +As soon as the migration of the specified hash slot has terminated, all the multi key operations are available again for this hash slot. Scaling reads using slave nodes @@ -674,7 +674,7 @@ Nodes heartbeat and gossip messages Nodes in the cluster exchange ping / pong packets, generally just called heartbeat packets. -Usually a node will ping a few random nodes every second so that the total number of ping packets send (and pong packets received) by each node is a constant amount regardless of the number of nodes in the cluster. +Usually a node will ping a few random nodes every second so that the total number of ping packets sent (and pong packets received) by each node is a constant amount regardless of the number of nodes in the cluster. However every node makes sure to ping every other node that we don't either sent a ping or received a pong for longer than half the `NODE_TIMEOUT` time. Before `NODE_TIMEOUT` has elapsed, nodes also try to reconnect the TCP link with another node to make sure nodes are not believed to be unreachable only because there is a problem in the current TCP connection. @@ -696,7 +696,7 @@ Ping and pong packets contain a header that is common to all the kind of packets The common header has the following information: * Node ID, that is a 160 bit pseudorandom string that is assigned the first time a node is created and remains the same for all the life of a Redis Cluster node. -* The `currentEpoch` and `configEpoch` field of the sending node, that are used in order to mount the distributed algorithms used by Redis Cluster (this is explained in details in the next sections). If the node is a slave the `configEpoch` is the last known `configEpoch` of its master. +* The `currentEpoch` and `configEpoch` field of the sending node, that are used in order to mount the distributed algorithms used by Redis Cluster (this is explained in detail in the next sections). If the node is a slave the `configEpoch` is the last known `configEpoch` of its master. * The node flags, indicating if the node is a slave, a master, and other single-bit node information. * A bitmap of the hash slots served by the sending node, or if the node is a slave, a bitmap of the slots served by its master. * The sender TCP base port (that is, the port used by Redis to accept client commands, add 10000 to this to obtain the cluster port). @@ -716,7 +716,7 @@ Gossip sections allow receiving nodes to get information about the state of othe Failure detection --- -Redis Cluster failure detection is used to recognize when a master or slave node is no longer reachable by the majority of nodes, and as a result of this event, either promote a slave to the role of master, of when this is not possible, put the cluster in an error state to stop receiving queries from clients. +Redis Cluster failure detection is used to recognize when a master or slave node is no longer reachable by the majority of nodes, and as a result of this event, either promote a slave to the role of master, or when this is not possible, put the cluster in an error state to stop receiving queries from clients. As already mentioned, every node takes a list of flags associated with other known nodes. There are two flags that are used for failure detection that are called `PFAIL` and `FAIL`. `PFAIL` means *Possible failure*, and is a non acknowledged failure type. `FAIL` means that a node is failing and that this condition was confirmed by a majority of masters within a fixed amount of time. @@ -743,15 +743,15 @@ If all the above conditions are true, Node A will: * Mark the node as `FAIL`. * Send a `FAIL` message to all the reachable nodes. -The `FAIL` message will force every receiving node to mark the node in `FAIL` state, whatever or not it already flagged the node in `PFAIL` state. +The `FAIL` message will force every receiving node to mark the node in `FAIL` state, whether or not it already flagged the node in `PFAIL` state. Note that *the FAIL flag is mostly one way*, that is, a node can go from `PFAIL` to `FAIL`, but for the `FAIL` flag to be cleared there are only two possibilities: -* The node is already reachable, and it is a slave. In this case the `FAIL` flag can be cleared as slaves are not failed over. -* The node is already reachable, and it is a master not serving any slot. In this case the `FAIL` flag can be cleared as masters without slots do not really participate to the cluster, and are waiting to be configured in order to join the cluster. +* The node is already reachable and it is a slave. In this case the `FAIL` flag can be cleared as slaves are not failed over. +* The node is already reachable and it is a master not serving any slot. In this case the `FAIL` flag can be cleared as masters without slots do not really participate to the cluster, and are waiting to be configured in order to join the cluster. * The node is already reachable, is a master, but a long time (N times the `NODE_TIMEOUT`) has elapsed without any detectable slave promotion. Better for it to rejoin the cluster and continue in this case. -In is useful to note that While the `PFAIL` -> `FAIL` transition uses a form of agreement, the agreement used is weak: +It is useful to note that while the `PFAIL` -> `FAIL` transition uses a form of agreement, the agreement used is weak: 1. Nodes collect views of other nodes during some time, so even if the majority of master nodes need to "agree", actually this is just state that we collected from different nodes at different times and we are not sure, nor we require, that at a given moment the majority of masters agreed. However we discard failure reports which are old, so the failure was signaled by the majority of masters within a window of time. 2. While every node detecting the `FAIL` condition will force that condition on other nodes in the cluster using the `FAIL` message, there is no way to ensure the message will reach all the nodes. For instance a node may detect the `FAIL` condition and because of a partition will not be able to reach any other node. @@ -776,11 +776,11 @@ The `currentEpoch` is a 64 bit unsigned number. At node creation every Redis Cluster node, both slaves and master nodes, set the `currentEpoch` to 0. -Every time a packet is received from another node, if the epoch of the sender (part of the cluster bus messages header) is greater than the local node epoch, then `currentEpoch` is updated to the sender epoch. +Every time a packet is received from another node, if the epoch of the sender (part of the cluster bus messages header) is greater than the local node epoch, the `currentEpoch` is updated to the sender epoch. Because of this semantics eventually all the nodes will agree to the greatest `configEpoch` in the cluster. -The way this information is used is when the state of the cluster is changed and a node seeks agreement in order to perform some action. +This information is used when the state of the cluster is changed and a node seeks agreement in order to perform some action. Currently this happens only during slave promotion, as described in the next section. Basically the epoch is a logical clock for the cluster and dictates whatever a given information wins over one with a smaller epoch. @@ -850,9 +850,9 @@ updated replication offset is at rank 0, the second most updated at rank 1, and However if a slave of higher rank fails to be elected, the others will try shortly, so the order is not enforced in a strict way. -Once a slave wins the election, it obtains a new unique and incremental `configEpoch` which is higher than any other exisitng master. It starts advertising itself as master in ping and pong packets, providing the set of served slots with a `configEpoch` that will win over the past ones. +Once a slave wins the election, it obtains a new unique and incremental `configEpoch` which is higher than any other existing master. It starts advertising itself as master in ping and pong packets, providing the set of served slots with a `configEpoch` that will win over the past ones. -In order to speedup the reconfiguration of other nodes, a pong packet is broadcast to all the nodes of the cluster (however nodes not currently reachable will eventually receive a ping or pong packet and will be reconfigured, or will receive an `UPDATE` pakcet is found not upadted by any other node). +In order to speedup the reconfiguration of other nodes, a pong packet is broadcasted to all the nodes of the cluster (however nodes not currently reachable will eventually receive a ping or pong packet and will be reconfigured, or will receive an `UPDATE` packet is found not updated by any other node). The other nodes will detect that there is a new master serving the same slots served by the old master but with a greater `configEpoch`, and will upgrade the configuration. Slaves of the old master, or the failed over master that rejoins the cluster, will not just upgrade the configuration but will also configure to replicate from the new master. How nodes rejoining the cluster are configured is explained in one of the next sections. @@ -875,10 +875,10 @@ Master `currentEpoch` is 5, lastVoteEpoch is 1 (this may happen after a few fail * Slave `currentEpoch` is 3. * Slave tries to be elected with epoch 4 (3+1), master replies with an ok with `currentEpoch` 5, however the reply is delayed. -* Slave wll try to be elected again, at a latter time, with epoch 5 (4+1), the delayed reply reaches to slave with `currentEpoch` 5, and is accepted as valid. +* Slave will try to be elected again, at a later time, with epoch 5 (4+1), the delayed reply reaches the slave with `currentEpoch` 5, and is accepted as valid. 4. Masters don't vote a slave of the same master before `NODE_TIMEOUT * 2` has elapsed since a slave of that master was already voted. This is not strictly required as it is not possible that two slaves win the election in the same epoch, but in practical terms it ensures that normally when a slave is elected it has plenty of time to inform the other slaves avoiding that another slave will win a new election, doing a new unwanted failover. -5. Masters don't try to select the best slave in any way, simply if the slave's master is in `FAIL` state and the master did not voted in the current term, the positive vote is granted. However the best slave is the most likely to start the election and win it before the other slaves, since it usually will be able to start the voting process earlier, because if its *higher rank* as explained in the previous section. +5. Masters don't try to select the best slave in any way, simply if the slave's master is in `FAIL` state and the master did not voted in the current term, the positive vote is granted. However the best slave is the most likely to start the election and win it before the other slaves, since it usually will be able to start the voting process earlier, because of its *higher rank* as explained in the previous section. 6. When a master refuses to vote for a given slave there is no negative response, the request is simply ignored. 7. Masters don't grant the vote to slaves sending a `configEpoch` that is less than any `configEpoch` in the master table for the slots claimed by the slave. Remember that the slave sends the `configEpoch` of its master, and the bitmap of the slots served by its master. What this means is basically that the slave requesting the vote must have a configuration, for the slots it wants to failover, that is newer or equal the one of the master granting the vote. @@ -901,7 +901,7 @@ At this point B is down, and A is available again, having a role of master (actu 3. So, all the nodes will upgrade their table to assign the hash slots to C, and the cluster will continue its operations. As you'll see in the next sections, actually a stale node rejoining a cluster -will usually get notified ASAP about the configuration change, since as soon +will usually get notified as soon as possible about the configuration change, since as soon as it pings any other node, the receiver will detect it has stale information and will send an `UPDATE` message. @@ -913,10 +913,10 @@ An important part of Redis Cluster is the mechanism used to propagate the inform The same mechanism allows nodes partitioned away for an indefinite amount of time to rejoin the cluster in a sensible way. -The way hash slots configurations are propagate are basically two: +The way hash slots configurations are propagated are basically two: 1. Heartbeat messages. The sender of a ping or pong packet always adds information about the set of hash slots it (or its master, if it is a slave) servers. -2. `UPDATE` messages. Since in every heartbeat packet there are informations about the sender `configEpoch` and set of hash slots served, if a receiver of an heartbeat packet will find the sender information not updated, it will send a packet with the new information, forcing the stale node to update its info. +2. `UPDATE` messages. Since in every heartbeat packet there are information about the sender `configEpoch` and set of hash slots served, if a receiver of an heartbeat packet will find the sender information not updated, it will send a packet with the new information, forcing the stale node to update its info. The receiver of an heartbeat or `UPDATE` message uses certain simple rules in order to update its table mapping hash slots to nodes. When a new Redis Cluster node is created, its local hash slot table is simple initialized to `NULL` entries, so that each hash slot is not bound, not linked to any node. Something like the following: From 6738443e03730470f0f652ed6043bc0acc11ee40 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 27 Mar 2015 12:58:07 +0100 Subject: [PATCH 0232/2314] Sentence about heartbeats reworeded in cluster spec. --- topics/cluster-spec.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/topics/cluster-spec.md b/topics/cluster-spec.md index 9e1c22796e..31dbbba39f 100644 --- a/topics/cluster-spec.md +++ b/topics/cluster-spec.md @@ -671,8 +671,9 @@ Fault Tolerance Nodes heartbeat and gossip messages --- -Nodes in the cluster exchange ping / pong packets, generally just called -heartbeat packets. +Redis Cluster nodes continuously exchange ping and pong packets. Those two kind of packets have the same structure, and both carry imporant configuration informations. The only actual difference is the message type field. We'll refer to the sum of ping and pong packets as *heartbeat packets*. + +Usually nodes send ping packets that will trigger the receivers to reply with a pong packets. However this is not necessarely true. It is possible for nodes to just send pong packets to send information to other nodes about their configuration, without triggering a reply. This is useful, for example, in order to broadcast a new configuration ASAP. Usually a node will ping a few random nodes every second so that the total number of ping packets send (and pong packets received) by each node is a constant amount regardless of the number of nodes in the cluster. From 74129f7f151485c4c31e9354b42ceed86a55f0cd Mon Sep 17 00:00:00 2001 From: kenvifire Date: Sat, 28 Mar 2015 09:11:54 +0800 Subject: [PATCH 0233/2314] fix typo --- topics/cluster-tutorial.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/cluster-tutorial.md b/topics/cluster-tutorial.md index a35c351a7f..104ac50e34 100644 --- a/topics/cluster-tutorial.md +++ b/topics/cluster-tutorial.md @@ -190,7 +190,7 @@ This amount of time is a very important configuration directive of Redis Cluster, and is called the **node timeout**. After node timeout has elapsed, a master node is considered to be failing, -and can be replaced by one if its replicas. +and can be replaced by one of its replicas. Similarly after node timeout has elapsed without a master node to be able to sense the majority of the other master nodes, it enters an error state and stops accepting writes. From 22a3ba66b340208fd4eb82da4e7371cc31822777 Mon Sep 17 00:00:00 2001 From: kenvifire Date: Sat, 28 Mar 2015 09:59:51 +0800 Subject: [PATCH 0234/2314] fix typo --- topics/cluster-tutorial.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/topics/cluster-tutorial.md b/topics/cluster-tutorial.md index 104ac50e34..32854da311 100644 --- a/topics/cluster-tutorial.md +++ b/topics/cluster-tutorial.md @@ -200,7 +200,7 @@ Creating and using a Redis Cluster Note: to deploy a Redis Cluster manually is **very important to learn** certain operation aspects of it. However if you want to get a cluster up and running -ASAP skip this section and the next one and go directly to **Creating a Redis Cluster using the create-custer script**. +ASAP skip this section and the next one and go directly to **Creating a Redis Cluster using the create-cluster script**. To create a cluster, the first thing we need is to have a few empty Redis instances running in **cluster mode**. This basically means that @@ -299,7 +299,7 @@ you'll see a message like that: This means that there is at least a master instance serving each of the 16384 slots available. -Creating a Redis Cluster using the create-custer script +Creating a Redis Cluster using the create-cluster script --- If you don't want to create a Redis Cluster by configuring and executing From ed775775bac39da3f68489f02f26ce5a184d029f Mon Sep 17 00:00:00 2001 From: kenvifire Date: Sat, 28 Mar 2015 10:34:39 +0800 Subject: [PATCH 0235/2314] fix grammer error --- topics/cluster-tutorial.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/cluster-tutorial.md b/topics/cluster-tutorial.md index 32854da311..48a8f04c18 100644 --- a/topics/cluster-tutorial.md +++ b/topics/cluster-tutorial.md @@ -268,7 +268,7 @@ Creating the cluster --- Now that we have a number of instances running, we need to create our -cluster writing some meaningful configuration to the nodes. +cluster by writing some meaningful configuration to the nodes. This is very easy to accomplish as we are helped by the Redis Cluster command line utility called `redis-trib`, that is a Ruby program From a03e31e4523775ef336098c8be8e8c2373f6a6bd Mon Sep 17 00:00:00 2001 From: Ed Costello Date: Sat, 28 Mar 2015 05:16:31 -0400 Subject: [PATCH 0236/2314] Copy edits for typos --- commands/cluster failover.md | 4 ++-- topics/cluster-spec.md | 14 +++++++------- topics/cluster-tutorial.md | 4 ++-- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/commands/cluster failover.md b/commands/cluster failover.md index 85238d3482..6c1af23f54 100644 --- a/commands/cluster failover.md +++ b/commands/cluster failover.md @@ -26,7 +26,7 @@ failover ASAP starting from point 4. This is useful when we want to start a manual failover while the master is no longer reachable. However using **FORCE** we still need the majority of masters to be available -in order to authorize the failover and genereate a new configuration epoch +in order to authorize the failover and generate a new configuration epoch for the slave that is going to become master. ## TAKEOVER option: manual failover without cluster consensus @@ -42,7 +42,7 @@ not uses any cluster authorization in order to failover. A slave receiving `CLUSTER FAILOVER TAKEOVER` will instead: 1. Generate a new `configEpoch` unilaterally, just taking the current greatest epoch available and incrementing it if its local configuration epoch is not already the greatest. -2. Assign itself all the hash slots of its master, and propagate the new configuraiton to every node which is reachable ASAP, and eventually to every other node. +2. Assign itself all the hash slots of its master, and propagate the new configuration to every node which is reachable ASAP, and eventually to every other node. Note that **TAKEOVER violates the last-failover-wins principle** of Redis Cluster, since the configuration epoch generated by the slave violates the normal generation of configuration epochs in several ways: diff --git a/topics/cluster-spec.md b/topics/cluster-spec.md index 31dbbba39f..0adff9b43c 100644 --- a/topics/cluster-spec.md +++ b/topics/cluster-spec.md @@ -276,7 +276,7 @@ and finally the set of hash slots served. A detailed [explanation of all the node fields](http://redis.io/commands/cluster-nodes) is described in the `CLUSTER NODES` documentation. -The `CLUSTER NODES` command, that can be sent to each the nodes in the cluster, provides as output the state of the cluster and the informations for each node +The `CLUSTER NODES` command, that can be sent to each the nodes in the cluster, provides as output the state of the cluster and the information for each node according to the local view the queries node has of the cluster. The following is an example of output of `CLUSTER NODES` sent to a master @@ -433,7 +433,7 @@ the specified hash slot. After the hash slots are assigned they will propagate across all the cluster using the gossip protocol, as specified later in the -*confiugration propagation* section. +*configuration propagation* section. The `ADDSLOTS` command is usually used when a new cluster is created from scratch to assign each master node a subset of all the 16384 hash @@ -671,9 +671,9 @@ Fault Tolerance Nodes heartbeat and gossip messages --- -Redis Cluster nodes continuously exchange ping and pong packets. Those two kind of packets have the same structure, and both carry imporant configuration informations. The only actual difference is the message type field. We'll refer to the sum of ping and pong packets as *heartbeat packets*. +Redis Cluster nodes continuously exchange ping and pong packets. Those two kind of packets have the same structure, and both carry important configuration information. The only actual difference is the message type field. We'll refer to the sum of ping and pong packets as *heartbeat packets*. -Usually nodes send ping packets that will trigger the receivers to reply with a pong packets. However this is not necessarely true. It is possible for nodes to just send pong packets to send information to other nodes about their configuration, without triggering a reply. This is useful, for example, in order to broadcast a new configuration ASAP. +Usually nodes send ping packets that will trigger the receivers to reply with a pong packets. However this is not necessarily true. It is possible for nodes to just send pong packets to send information to other nodes about their configuration, without triggering a reply. This is useful, for example, in order to broadcast a new configuration ASAP. Usually a node will ping a few random nodes every second so that the total number of ping packets send (and pong packets received) by each node is a constant amount regardless of the number of nodes in the cluster. @@ -851,9 +851,9 @@ updated replication offset is at rank 0, the second most updated at rank 1, and However if a slave of higher rank fails to be elected, the others will try shortly, so the order is not enforced in a strict way. -Once a slave wins the election, it obtains a new unique and incremental `configEpoch` which is higher than any other exisitng master. It starts advertising itself as master in ping and pong packets, providing the set of served slots with a `configEpoch` that will win over the past ones. +Once a slave wins the election, it obtains a new unique and incremental `configEpoch` which is higher than any other existing master. It starts advertising itself as master in ping and pong packets, providing the set of served slots with a `configEpoch` that will win over the past ones. -In order to speedup the reconfiguration of other nodes, a pong packet is broadcast to all the nodes of the cluster (however nodes not currently reachable will eventually receive a ping or pong packet and will be reconfigured, or will receive an `UPDATE` pakcet is found not upadted by any other node). +In order to speedup the reconfiguration of other nodes, a pong packet is broadcast to all the nodes of the cluster (however nodes not currently reachable will eventually receive a ping or pong packet and will be reconfigured, or will receive an `UPDATE` packet is found not updated by any other node). The other nodes will detect that there is a new master serving the same slots served by the old master but with a greater `configEpoch`, and will upgrade the configuration. Slaves of the old master, or the failed over master that rejoins the cluster, will not just upgrade the configuration but will also configure to replicate from the new master. How nodes rejoining the cluster are configured is explained in one of the next sections. @@ -917,7 +917,7 @@ time to rejoin the cluster in a sensible way. The way hash slots configurations are propagate are basically two: 1. Heartbeat messages. The sender of a ping or pong packet always adds information about the set of hash slots it (or its master, if it is a slave) servers. -2. `UPDATE` messages. Since in every heartbeat packet there are informations about the sender `configEpoch` and set of hash slots served, if a receiver of an heartbeat packet will find the sender information not updated, it will send a packet with the new information, forcing the stale node to update its info. +2. `UPDATE` messages. Since in every heartbeat packet there is information about the sender `configEpoch` and set of hash slots served, if a receiver of an heartbeat packet will find the sender information not updated, it will send a packet with the new information, forcing the stale node to update its info. The receiver of an heartbeat or `UPDATE` message uses certain simple rules in order to update its table mapping hash slots to nodes. When a new Redis Cluster node is created, its local hash slot table is simple initialized to `NULL` entries, so that each hash slot is not bound, not linked to any node. Something like the following: diff --git a/topics/cluster-tutorial.md b/topics/cluster-tutorial.md index a35c351a7f..bfa7c726e5 100644 --- a/topics/cluster-tutorial.md +++ b/topics/cluster-tutorial.md @@ -8,9 +8,9 @@ going into the details that are covered in the [Redis Cluster specification](/topics/cluster-spec) but just describing how the system behaves from the point of view of the user. -However this tutorial tries to provide informations about the availability +However this tutorial tries to provide information about the availability and consistency characteristics of Redis Cluster from the point of view -of the final user, state in a simple to understand way. +of the final user, stated in a simple to understand way. Note this tutorial requires Redis version 3.0 or higher. From af6733929317f3901ae2447a523dbdced81dd75a Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Sun, 29 Mar 2015 17:02:39 +0200 Subject: [PATCH 0237/2314] Fix small typo in Sentinel docu Fixes antirez/redis#2480 --- topics/sentinel.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/sentinel.md b/topics/sentinel.md index 02d2e81176..12457a215f 100644 --- a/topics/sentinel.md +++ b/topics/sentinel.md @@ -326,7 +326,7 @@ The Sentinel properties guarantee that Sentinel 1 and 2 now have the new configuration for the master. However Sentinel 3 has still the old configuration since it lives in a different partition. -When know that Sentinel 3 will get its configuration updated when the network +We know that Sentinel 3 will get its configuration updated when the network partition will heal, however what happens during the partition if there are clients partitioned with the old master? From f06d9df51d992a4c27883b5c047a9a84996801a0 Mon Sep 17 00:00:00 2001 From: prascuna Date: Tue, 31 Mar 2015 10:28:03 +0100 Subject: [PATCH 0238/2314] Update partitioning.md --- topics/partitioning.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/partitioning.md b/topics/partitioning.md index 87bb942636..802623b8d9 100644 --- a/topics/partitioning.md +++ b/topics/partitioning.md @@ -92,7 +92,7 @@ Redis Cluster Redis Cluster is the preferred way to get automatic sharding and high availability. It is currently not production ready, but finally entered beta stage, so we recommend you to start experimenting with it. You can get more information about Redis Cluster in the [Cluster tutorial](/topics/cluster-tutorial). -Once Redis Cluster will be available, and if a Redis Cluster complaint client is available for your language, Redis Cluster will be the de facto standard for Redis partitioning. +Once Redis Cluster will be available, and if a Redis Cluster compliant client is available for your language, Redis Cluster will be the de facto standard for Redis partitioning. Redis Cluster is a mix between *query routing* and *client side partitioning*. From 66c4140f52b650a55cc271bc03e44c8b58648078 Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Wed, 1 Apr 2015 16:17:43 +0200 Subject: [PATCH 0239/2314] More rewordings --- topics/cluster-spec.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/topics/cluster-spec.md b/topics/cluster-spec.md index e197fda7a8..2026d5cffb 100644 --- a/topics/cluster-spec.md +++ b/topics/cluster-spec.md @@ -913,7 +913,7 @@ An important part of Redis Cluster is the mechanism used to propagate the inform The same mechanism allows nodes partitioned away for an indefinite amount of time to rejoin the cluster in a sensible way. -The way hash slots configurations are propagated are basically two: +There are two ways hash slot configurations are propagated: 1. Heartbeat messages. The sender of a ping or pong packet always adds information about the set of hash slots it (or its master, if it is a slave) servers. 2. `UPDATE` messages. Since in every heartbeat packet there are information about the sender `configEpoch` and set of hash slots served, if a receiver of an heartbeat packet will find the sender information not updated, it will send a packet with the new information, forcing the stale node to update its info. @@ -982,7 +982,7 @@ sure the information will be updated in the cluster. UPDATE messages, a closer look --- -With the previous section in mind, it is easy know to check how update messages +With the previous section in mind, it is easy now to check how update messages work. Node A may rejoin the cluster after some time. It will send heartbeat packets where it claims it serves hash slots 1 and 2 with configuration epoch of 3. All the receivers with an updated information will instead see that @@ -1010,7 +1010,7 @@ So the actual *Redis Cluster node role switch rule* is: **A master node will cha So during the reconfiguration eventually the number of served hash slots will drop to zero, and the node will reconfigure accordingly. Note that in the base case this just means that the old master will be a slave of the slave that replaced it after a failover. However in the general form the rule covers all the possible cases. Slaves do exactly the same: they reconfigure to replicate to the node that -stolen the last hash slot of its former master. +stole the last hash slot of its former master. Replica migration --- From efdd7395a76af14787bbff11e305be2921cbdeb5 Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Wed, 1 Apr 2015 16:24:38 +0200 Subject: [PATCH 0240/2314] expand ASAP --- topics/cluster-spec.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/cluster-spec.md b/topics/cluster-spec.md index ddbb0f7e1d..37539ff3f0 100644 --- a/topics/cluster-spec.md +++ b/topics/cluster-spec.md @@ -673,7 +673,7 @@ Nodes heartbeat and gossip messages Redis Cluster nodes continuously exchange ping and pong packets. Those two kind of packets have the same structure, and both carry important configuration information. The only actual difference is the message type field. We'll refer to the sum of ping and pong packets as *heartbeat packets*. -Usually nodes send ping packets that will trigger the receivers to reply with a pong packets. However this is not necessarily true. It is possible for nodes to just send pong packets to send information to other nodes about their configuration, without triggering a reply. This is useful, for example, in order to broadcast a new configuration ASAP. +Usually nodes send ping packets that will trigger the receivers to reply with a pong packets. However this is not necessarily true. It is possible for nodes to just send pong packets to send information to other nodes about their configuration, without triggering a reply. This is useful, for example, in order to broadcast a new configuration as soon as possible. Usually a node will ping a few random nodes every second so that the total number of ping packets sent (and pong packets received) by each node is a constant amount regardless of the number of nodes in the cluster. From f7fe3d01ac8f570906f0bac0e6f74735331c9a18 Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Wed, 1 Apr 2015 16:29:55 +0200 Subject: [PATCH 0241/2314] Rephrase sentence to make sense again --- topics/cluster-spec.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/cluster-spec.md b/topics/cluster-spec.md index 37539ff3f0..3f71d2d7d3 100644 --- a/topics/cluster-spec.md +++ b/topics/cluster-spec.md @@ -853,7 +853,7 @@ shortly, so the order is not enforced in a strict way. Once a slave wins the election, it obtains a new unique and incremental `configEpoch` which is higher than any other existing master. It starts advertising itself as master in ping and pong packets, providing the set of served slots with a `configEpoch` that will win over the past ones. -In order to speedup the reconfiguration of other nodes, a pong packet is broadcasted to all the nodes of the cluster (however nodes not currently reachable will eventually receive a ping or pong packet and will be reconfigured, or will receive an `UPDATE` packet is found not updated by any other node). +In order to speedup the reconfiguration of other nodes, a pong packet is broadcasted to all the nodes of the cluster (however nodes not currently reachable will eventually receive a ping or pong packet and will be reconfigured, or will receive an `UPDATE` packet from another node, if the information it publishes via heartbeat packets are detected to be out of date). The other nodes will detect that there is a new master serving the same slots served by the old master but with a greater `configEpoch`, and will upgrade the configuration. Slaves of the old master, or the failed over master that rejoins the cluster, will not just upgrade the configuration but will also configure to replicate from the new master. How nodes rejoining the cluster are configured is explained in one of the next sections. From 1a5a765cc90fc3037f6506a8b9ff8f2de5e41ff0 Mon Sep 17 00:00:00 2001 From: Baptiste Fontaine Date: Thu, 2 Apr 2015 10:09:55 +0200 Subject: [PATCH 0242/2314] Typo fixed in PFCOUNT doc --- commands/pfcount.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands/pfcount.md b/commands/pfcount.md index 47634e2135..019f51b12f 100644 --- a/commands/pfcount.md +++ b/commands/pfcount.md @@ -52,7 +52,7 @@ Redis HyperLogLogs are represented using a double representation: the *sparse* r The sparse representation uses a run-length encoding optimized to store efficiently a big number of registers set to zero. The dense representation is a Redis string of 12288 bytes in order to store 16384 6-bit counters. The need for the double representation comes from the fact that using 12k (which is the dense representation memory requirement) to encode just a few registers for smaller cardinalities is extremely suboptimal. -Both representations are prefixed with a 16 bytes header, that includes a magic, an encoding / version fiend, and the cached cardinality estimation computed, stored in little endian format (the most significant bit is 1 if the estimation is invalid since the HyperLogLog was updated since the cardinality was computed). +Both representations are prefixed with a 16 bytes header, that includes a magic, an encoding / version field, and the cached cardinality estimation computed, stored in little endian format (the most significant bit is 1 if the estimation is invalid since the HyperLogLog was updated since the cardinality was computed). The HyperLogLog, being a Redis string, can be retrieved with `GET` and restored with `SET`. Calling `PFADD`, `PFCOUNT` or `PFMERGE` commands with a corrupted HyperLogLog is never a problem, it may return random values but does not affect the stability of the server. Most of the times when corrupting a sparse representation, the server recognizes the corruption and returns an error. From 14d2b0d4921fc588f0fcf7949668092f8b202668 Mon Sep 17 00:00:00 2001 From: Tanguy Le Barzic Date: Thu, 2 Apr 2015 12:58:53 +0200 Subject: [PATCH 0243/2314] Fix typo in cluster-spec (SETSLOT) --- topics/cluster-spec.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/cluster-spec.md b/topics/cluster-spec.md index 3f71d2d7d3..f13430333c 100644 --- a/topics/cluster-spec.md +++ b/topics/cluster-spec.md @@ -502,7 +502,7 @@ there are latency constraints in the application using the database. When finally the migration process is finished, the `SETSLOT NODE ` command is send to the two nodes involved in the migration in order to set the slots in normal state again. Moreover the same command is usually -send to all the other instances in order to don't want for the natural +send to all the other instances in order to not wait for the natural propagation of the new configuration across the cluster. ASK redirection From a9619882c9f7b1790a7593da1b5430876b907cec Mon Sep 17 00:00:00 2001 From: tanguylebarzic Date: Thu, 2 Apr 2015 13:02:35 +0200 Subject: [PATCH 0244/2314] Fix typo in cluster-spec (SETSLOT) More idiomatic version --- topics/cluster-spec.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/cluster-spec.md b/topics/cluster-spec.md index f13430333c..beaa0144a9 100644 --- a/topics/cluster-spec.md +++ b/topics/cluster-spec.md @@ -502,7 +502,7 @@ there are latency constraints in the application using the database. When finally the migration process is finished, the `SETSLOT NODE ` command is send to the two nodes involved in the migration in order to set the slots in normal state again. Moreover the same command is usually -send to all the other instances in order to not wait for the natural +send to all the other instances in order not to wait for the natural propagation of the new configuration across the cluster. ASK redirection From 28fd1730d8818d09a3f3a54832cc3cd8a29b3fa0 Mon Sep 17 00:00:00 2001 From: Tanguy Le Barzic Date: Thu, 2 Apr 2015 13:28:32 +0200 Subject: [PATCH 0245/2314] Typo - client slots configuration --- topics/cluster-spec.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/cluster-spec.md b/topics/cluster-spec.md index beaa0144a9..e29ed4ba33 100644 --- a/topics/cluster-spec.md +++ b/topics/cluster-spec.md @@ -549,7 +549,7 @@ Clients first connection and handling of redirections. While it is possible to have a Redis Cluster client implementation that does not remember the slots configuration (the map between slot numbers and addresses of -nodes serving it) in memory and only works contacting random nodes waiting to +nodes serving it) in memory and only works by contacting random nodes waiting to be redirected, such a client would be very inefficient. Redis Cluster clients should try to be smart enough to memorize the slots From 8420e0a2f9af5841aa582ddf5cab45cff6f11df7 Mon Sep 17 00:00:00 2001 From: Tanguy Le Barzic Date: Thu, 2 Apr 2015 13:29:23 +0200 Subject: [PATCH 0246/2314] Typo - slots configuration --- topics/cluster-spec.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/cluster-spec.md b/topics/cluster-spec.md index e29ed4ba33..67a9688202 100644 --- a/topics/cluster-spec.md +++ b/topics/cluster-spec.md @@ -553,7 +553,7 @@ nodes serving it) in memory and only works by contacting random nodes waiting to be redirected, such a client would be very inefficient. Redis Cluster clients should try to be smart enough to memorize the slots -configuration. However this configuration does not *require* to be up to date, +configuration. However this configuration is not *required* to be up to date, since contacting the wrong node will simply result in a redirection, that will trigger an update of the client view. From 048526ff97c0a9b70b47fd2429246f4097fe15ea Mon Sep 17 00:00:00 2001 From: Tanguy Le Barzic Date: Thu, 2 Apr 2015 13:30:03 +0200 Subject: [PATCH 0247/2314] Typo - in/at different moments --- topics/cluster-spec.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/cluster-spec.md b/topics/cluster-spec.md index 67a9688202..43bea1b1ff 100644 --- a/topics/cluster-spec.md +++ b/topics/cluster-spec.md @@ -558,7 +558,7 @@ since contacting the wrong node will simply result in a redirection, that will trigger an update of the client view. Clients usually need to fetch a complete list of slots and mapped node -addresses in two different moments: +addresses at two different moments: * At startup in order to populate the initial slots configuration. * When a `MOVED` redirection is received. From 5ba6059d400d00a355b1e749275382ef89dbd80c Mon Sep 17 00:00:00 2001 From: Tanguy Le Barzic Date: Thu, 2 Apr 2015 13:30:21 +0200 Subject: [PATCH 0248/2314] Typo - multiple nodes --- topics/cluster-spec.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/cluster-spec.md b/topics/cluster-spec.md index 43bea1b1ff..779d0552ea 100644 --- a/topics/cluster-spec.md +++ b/topics/cluster-spec.md @@ -637,7 +637,7 @@ targeting keys that all exist and are still all in the same node (either the source or destination node) are still available. Operations about keys that don't exist or are, during the resharding, split -between the source and destination node, will generate a `-TRYAGAIN` error. +between the source and destination nodes, will generate a `-TRYAGAIN` error. The client can try the operation after some time, or report back the error. As soon as the migration of the specified hash slot has terminated, all the From e4ae733510e7626dc2f33a1fc255c66e582d79c0 Mon Sep 17 00:00:00 2001 From: Tanguy Le Barzic Date: Thu, 2 Apr 2015 13:30:40 +0200 Subject: [PATCH 0249/2314] Typo - pong packets --- topics/cluster-spec.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/cluster-spec.md b/topics/cluster-spec.md index 779d0552ea..2db35ab511 100644 --- a/topics/cluster-spec.md +++ b/topics/cluster-spec.md @@ -673,7 +673,7 @@ Nodes heartbeat and gossip messages Redis Cluster nodes continuously exchange ping and pong packets. Those two kind of packets have the same structure, and both carry important configuration information. The only actual difference is the message type field. We'll refer to the sum of ping and pong packets as *heartbeat packets*. -Usually nodes send ping packets that will trigger the receivers to reply with a pong packets. However this is not necessarily true. It is possible for nodes to just send pong packets to send information to other nodes about their configuration, without triggering a reply. This is useful, for example, in order to broadcast a new configuration as soon as possible. +Usually nodes send ping packets that will trigger the receivers to reply with pong packets. However this is not necessarily true. It is possible for nodes to just send pong packets to send information to other nodes about their configuration, without triggering a reply. This is useful, for example, in order to broadcast a new configuration as soon as possible. Usually a node will ping a few random nodes every second so that the total number of ping packets sent (and pong packets received) by each node is a constant amount regardless of the number of nodes in the cluster. From 37e2c4d4668d8991aca3de422f348507028d6ef8 Mon Sep 17 00:00:00 2001 From: Tanguy Le Barzic Date: Thu, 2 Apr 2015 13:31:35 +0200 Subject: [PATCH 0250/2314] Based on the rest of the sentence, the actual meaning is that the number of messages can be large. Picking sizable instead of sensible, which carries the opposite meaning. --- topics/cluster-spec.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/cluster-spec.md b/topics/cluster-spec.md index 2db35ab511..3e27a6d212 100644 --- a/topics/cluster-spec.md +++ b/topics/cluster-spec.md @@ -679,7 +679,7 @@ Usually a node will ping a few random nodes every second so that the total numbe However every node makes sure to ping every other node that we don't either sent a ping or received a pong for longer than half the `NODE_TIMEOUT` time. Before `NODE_TIMEOUT` has elapsed, nodes also try to reconnect the TCP link with another node to make sure nodes are not believed to be unreachable only because there is a problem in the current TCP connection. -The amount of messages globally exchanged can be sensible if `NODE_TIMEOUT` is set to a small figure and the number of nodes (N) is very large, since every node will try to ping every other node for which we don't have fresh information for half the `NODE_TIMEOUT` time. +The amount of messages globally exchanged can be sizable if `NODE_TIMEOUT` is set to a small figure and the number of nodes (N) is very large, since every node will try to ping every other node for which we don't have fresh information for half the `NODE_TIMEOUT` time. For example in a 100 nodes cluster with a node timeout set to 60 seconds, every node will try to send 99 pings every 30 seconds, with a total amount of pings of 3.3 per second, that multiplied for 100 nodes is 330 pings per second in the total cluster. From ffe77feac988b2ff14cec9e757844d6c48c7ad66 Mon Sep 17 00:00:00 2001 From: Tanguy Le Barzic Date: Thu, 2 Apr 2015 13:33:19 +0200 Subject: [PATCH 0251/2314] Typo - multiple fields --- topics/cluster-spec.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/cluster-spec.md b/topics/cluster-spec.md index 3e27a6d212..a15be1bab6 100644 --- a/topics/cluster-spec.md +++ b/topics/cluster-spec.md @@ -697,7 +697,7 @@ Ping and pong packets contain a header that is common to all the kind of packets The common header has the following information: * Node ID, that is a 160 bit pseudorandom string that is assigned the first time a node is created and remains the same for all the life of a Redis Cluster node. -* The `currentEpoch` and `configEpoch` field of the sending node, that are used in order to mount the distributed algorithms used by Redis Cluster (this is explained in detail in the next sections). If the node is a slave the `configEpoch` is the last known `configEpoch` of its master. +* The `currentEpoch` and `configEpoch` fields of the sending node, that are used in order to mount the distributed algorithms used by Redis Cluster (this is explained in detail in the next sections). If the node is a slave the `configEpoch` is the last known `configEpoch` of its master. * The node flags, indicating if the node is a slave, a master, and other single-bit node information. * A bitmap of the hash slots served by the sending node, or if the node is a slave, a bitmap of the slots served by its master. * The sender TCP base port (that is, the port used by Redis to accept client commands, add 10000 to this to obtain the cluster port). From 33ea03a0e4cc612becc7ae31bbc7a17aa1ae4452 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 3 Apr 2015 09:41:43 +0200 Subject: [PATCH 0252/2314] Cluster tutorial: document cluster config options. --- topics/cluster-tutorial.md | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/topics/cluster-tutorial.md b/topics/cluster-tutorial.md index b3f742ab7d..6ffaba8502 100644 --- a/topics/cluster-tutorial.md +++ b/topics/cluster-tutorial.md @@ -195,6 +195,21 @@ Similarly after node timeout has elapsed without a master node to be able to sense the majority of the other master nodes, it enters an error state and stops accepting writes. +Redis Cluster configuration parameters +=== + +We are about to create an example cluster deployment. Before to continue +let's introduce the configuration parameters that Redis Cluster introduces +in the `redis.conf` file. Some will be obvious, others will be more clear +as you continue reading. + +* **cluster-enabled **: If yes enables Redis Cluster support in a specific Redis instance. Otherwise the instance starts as a stand alone instance as usually. +* **cluster-config-file **: Note that despite the name of this option, this is not an user editable configuration file, but the file where a Redis Cluster node automatically persists the cluster configuration (the state, basically) every time there is a change, in order to be able to re-read it at startup. The file lists things like the other nodes in the cluster, their state, persistent variables, and so forth. Often this file is rewritten and flushed on disk as a result of some message reception. +* **cluster-node-timeout **: The maximum amount of time a Redis Cluster node can be unavailable, without it being considered as failing. If a master node is not reachable for more than the specified amount of time, it will be failed over by its slaves. This parameter controls other important things in Redis Cluster. Notably, every node that can't reach the majority of master nodes for the specified amount of time, will stop accepting queries. +* **cluster-slave-validity-factor **: If set to zero, a slave will always try to failover a master, regardless of the amount of time the link between the master and the slave remained disconnected. If the value is positive, a maximum disconnection time is calculated as the *node timeout* value multiplied by the factor provided with this option, and if the node is a slave, it will not try to start a failover if the master link was disconnected for more than the specified amount of time. For example if the node timeout is set to 5 seconds, and the validity factor is set to 10, a slave disconnected from the master for more than 50 seconds will not try to failover its master. Note that any value different than zero may result in Redis Cluster to be not available after a master failure if there is no slave able to failover it. In that case the cluster will return back available only when the original master rejoins the cluster. +* **cluster-migration-barrier **: Minimum number of slaves a master will remain connected with, for another slave to migrate to a master which is no longer covered by any slave. See the appropriate section about replica migration in this tutorial for more information. +* **cluster-require-full-coverage **: If this is set to yes, as it is by default, the cluster stops accepting writes if some percentage of the key space is not covered by any node. If the option is set to no, the cluster will still serve queries even if only requests about a subset of keys can be processed. + Creating and using a Redis Cluster === From 00d67b6cf2c5b9d6d045432ebf17a507193a4d1c Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 3 Apr 2015 09:42:55 +0200 Subject: [PATCH 0253/2314] Markdown fixes. --- topics/cluster-tutorial.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/topics/cluster-tutorial.md b/topics/cluster-tutorial.md index 6ffaba8502..782ed0afc5 100644 --- a/topics/cluster-tutorial.md +++ b/topics/cluster-tutorial.md @@ -203,12 +203,12 @@ let's introduce the configuration parameters that Redis Cluster introduces in the `redis.conf` file. Some will be obvious, others will be more clear as you continue reading. -* **cluster-enabled **: If yes enables Redis Cluster support in a specific Redis instance. Otherwise the instance starts as a stand alone instance as usually. -* **cluster-config-file **: Note that despite the name of this option, this is not an user editable configuration file, but the file where a Redis Cluster node automatically persists the cluster configuration (the state, basically) every time there is a change, in order to be able to re-read it at startup. The file lists things like the other nodes in the cluster, their state, persistent variables, and so forth. Often this file is rewritten and flushed on disk as a result of some message reception. -* **cluster-node-timeout **: The maximum amount of time a Redis Cluster node can be unavailable, without it being considered as failing. If a master node is not reachable for more than the specified amount of time, it will be failed over by its slaves. This parameter controls other important things in Redis Cluster. Notably, every node that can't reach the majority of master nodes for the specified amount of time, will stop accepting queries. -* **cluster-slave-validity-factor **: If set to zero, a slave will always try to failover a master, regardless of the amount of time the link between the master and the slave remained disconnected. If the value is positive, a maximum disconnection time is calculated as the *node timeout* value multiplied by the factor provided with this option, and if the node is a slave, it will not try to start a failover if the master link was disconnected for more than the specified amount of time. For example if the node timeout is set to 5 seconds, and the validity factor is set to 10, a slave disconnected from the master for more than 50 seconds will not try to failover its master. Note that any value different than zero may result in Redis Cluster to be not available after a master failure if there is no slave able to failover it. In that case the cluster will return back available only when the original master rejoins the cluster. -* **cluster-migration-barrier **: Minimum number of slaves a master will remain connected with, for another slave to migrate to a master which is no longer covered by any slave. See the appropriate section about replica migration in this tutorial for more information. -* **cluster-require-full-coverage **: If this is set to yes, as it is by default, the cluster stops accepting writes if some percentage of the key space is not covered by any node. If the option is set to no, the cluster will still serve queries even if only requests about a subset of keys can be processed. +* **cluster-enabled ``**: If yes enables Redis Cluster support in a specific Redis instance. Otherwise the instance starts as a stand alone instance as usually. +* **cluster-config-file ``**: Note that despite the name of this option, this is not an user editable configuration file, but the file where a Redis Cluster node automatically persists the cluster configuration (the state, basically) every time there is a change, in order to be able to re-read it at startup. The file lists things like the other nodes in the cluster, their state, persistent variables, and so forth. Often this file is rewritten and flushed on disk as a result of some message reception. +* **cluster-node-timeout ``**: The maximum amount of time a Redis Cluster node can be unavailable, without it being considered as failing. If a master node is not reachable for more than the specified amount of time, it will be failed over by its slaves. This parameter controls other important things in Redis Cluster. Notably, every node that can't reach the majority of master nodes for the specified amount of time, will stop accepting queries. +* **cluster-slave-validity-factor ``**: If set to zero, a slave will always try to failover a master, regardless of the amount of time the link between the master and the slave remained disconnected. If the value is positive, a maximum disconnection time is calculated as the *node timeout* value multiplied by the factor provided with this option, and if the node is a slave, it will not try to start a failover if the master link was disconnected for more than the specified amount of time. For example if the node timeout is set to 5 seconds, and the validity factor is set to 10, a slave disconnected from the master for more than 50 seconds will not try to failover its master. Note that any value different than zero may result in Redis Cluster to be not available after a master failure if there is no slave able to failover it. In that case the cluster will return back available only when the original master rejoins the cluster. +* **cluster-migration-barrier ``**: Minimum number of slaves a master will remain connected with, for another slave to migrate to a master which is no longer covered by any slave. See the appropriate section about replica migration in this tutorial for more information. +* **cluster-require-full-coverage ``**: If this is set to yes, as it is by default, the cluster stops accepting writes if some percentage of the key space is not covered by any node. If the option is set to no, the cluster will still serve queries even if only requests about a subset of keys can be processed. Creating and using a Redis Cluster === From 27f955ec6d60bab8c5583d7e42d3f95d86bf5acb Mon Sep 17 00:00:00 2001 From: plz Date: Fri, 3 Apr 2015 16:08:44 +0200 Subject: [PATCH 0254/2314] Update faq.md Small grammar correction --- topics/faq.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/faq.md b/topics/faq.md index c3842fdacb..2ec910b181 100644 --- a/topics/faq.md +++ b/topics/faq.md @@ -68,7 +68,7 @@ with an error to write commands (but will continue to accept read-only commands), or you can configure it to evict keys when the max memory limit is reached in the case you are using Redis for caching. -We have documentations you plan to use [Redis as an LRU cache](/topics/lru-cache). +We have documentation if you plan to use [Redis as an LRU cache](/topics/lru-cache). ## Background saving is failing with a fork() error under Linux even if I've a lot of free RAM! From d95f6cc3cbfbeac7aea229afca4ce6946dec6f9e Mon Sep 17 00:00:00 2001 From: Itamar Haber Date: Sat, 4 Apr 2015 17:05:56 +0300 Subject: [PATCH 0255/2314] Minor typo fix --- topics/memory-optimization.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/topics/memory-optimization.md b/topics/memory-optimization.md index b078856db4..ffe5b619c2 100644 --- a/topics/memory-optimization.md +++ b/topics/memory-optimization.md @@ -102,7 +102,7 @@ As you can see every hash will end containing 100 fields, that is an optimal compromise between CPU and memory saved. There is another very important thing to note, with this schema -every hash will have more or +every hash will have more or less 100 fields regardless of the number of objects we cached. This is since our objects will always end with a number, and not a random string. In some way the final number can be considered as a form of implicit pre-sharding. @@ -212,7 +212,7 @@ The fragmentation is calculated as the amount of memory currently in use (as the sum of all the allocations performed by Redis) divided by the physical memory actually used (the RSS value). Because the RSS reflects the peak memory, when the (virtually) used memory is low since a lot of keys / values were -freed, but the RSS is high, the ration `mem_used / RSS` will be very high. +freed, but the RSS is high, the ratio `mem_used / RSS` will be very high. If `maxmemory` is not set Redis will keep allocating memory as it finds fit and thus it can (gradually) eat up all your free memory. From 89fe2951d04330348f61cb759e1a89f82155b47e Mon Sep 17 00:00:00 2001 From: Joffrey JAFFEUX Date: Mon, 6 Apr 2015 17:50:30 +0200 Subject: [PATCH 0256/2314] remove action script client --- clients.json | 8 -------- 1 file changed, 8 deletions(-) diff --git a/clients.json b/clients.json index 56cad7583d..aedb4f297b 100644 --- a/clients.json +++ b/clients.json @@ -10,14 +10,6 @@ "active": true }, - { - "name": "as3redis", - "language": "ActionScript", - "repository": "https://github.com/claus/as3redis", - "description": "", - "authors": ["cwahlers"] - }, - { "name": "carmine", "language": "Clojure", From a7a0caec7ded2b4359c5d8b9c875fcc5f09fe064 Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Thu, 9 Apr 2015 15:10:47 +0200 Subject: [PATCH 0257/2314] Be more specific about the effect of DISCARD. Thanks @ixtli Closes #529 --- commands/discard.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands/discard.md b/commands/discard.md index aaac5dae38..d84b50331c 100644 --- a/commands/discard.md +++ b/commands/discard.md @@ -3,7 +3,7 @@ connection state to normal. [tt]: /topics/transactions -If `WATCH` was used, `DISCARD` unwatches all keys. +If `WATCH` was used, `DISCARD` unwatches all keys watched by the connection. @return From 2e2397f25f52bb255b1e08026a4c78ff56c04f5b Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Thu, 9 Apr 2015 15:14:19 +0200 Subject: [PATCH 0258/2314] Actually migrate to the destination --- commands/cluster setslot.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands/cluster setslot.md b/commands/cluster setslot.md index 038bed6288..bce7eb8d07 100644 --- a/commands/cluster setslot.md +++ b/commands/cluster setslot.md @@ -69,7 +69,7 @@ It is important to note that step 3 is the only time when a Redis Cluster node w The `CLUSTER SETSLOT` command is an important piece used by Redis Cluster in order to migrate all the keys contained in one hash slot from one node to another. This is how the migration is orchestrated, with the help of other commands as well. We'll call the node that has the current ownership of the hash slot the `source` node, and the node where we want to migrate the `destination` node. 1. Set the destination node slot to *importing* state using `CLUSTER SETSLOT IMPORTING `. -2. Set the source node slot to *migrating* state using `CLUSTER SETSLOT MIGRATING `. +2. Set the source node slot to *migrating* state using `CLUSTER SETSLOT MIGRATING `. 3. Get keys from the source node with `CLUSTER GETKEYSINSLOT` command and move them into the destination node using the `MIGRATE` command. 4. Use `CLUSTER SETSLOT STABLE` in the source, destination, and all the other nodes. From 6abbe7b369effa9e9dcbd718799cc75c23e45d7b Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Thu, 9 Apr 2015 15:14:49 +0200 Subject: [PATCH 0259/2314] Assign the slot to one node after successful migrating data --- commands/cluster setslot.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands/cluster setslot.md b/commands/cluster setslot.md index bce7eb8d07..55f9783e28 100644 --- a/commands/cluster setslot.md +++ b/commands/cluster setslot.md @@ -71,7 +71,7 @@ The `CLUSTER SETSLOT` command is an important piece used by Redis Cluster in ord 1. Set the destination node slot to *importing* state using `CLUSTER SETSLOT IMPORTING `. 2. Set the source node slot to *migrating* state using `CLUSTER SETSLOT MIGRATING `. 3. Get keys from the source node with `CLUSTER GETKEYSINSLOT` command and move them into the destination node using the `MIGRATE` command. -4. Use `CLUSTER SETSLOT STABLE` in the source, destination, and all the other nodes. +4. Use `CLUSTER SETSLOT NODE ` in the source or destination. Notes: From 81e552a23d3396aab40d24a26c9c378a2d539980 Mon Sep 17 00:00:00 2001 From: Itamar Haber Date: Thu, 9 Apr 2015 18:54:42 +0300 Subject: [PATCH 0260/2314] Minor correction ;) --- topics/partitioning.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/partitioning.md b/topics/partitioning.md index 802623b8d9..82e48af59f 100644 --- a/topics/partitioning.md +++ b/topics/partitioning.md @@ -57,7 +57,7 @@ Consistent hashing implementations are often able to switch to other nodes if th The main concept here is the following: * If Redis is used as a cache **scaling up and down** using consistent hashing is easy. -* If Redis is used as a store, **a fixed keys-to-nodes map is used, so the number of nodes must be fixed and cannot vary**. Otherwise, a system is needed that is able to rebalance keys between nodes when nodes are added or removed, and currently only Redis Cluster is able to do this, but Redis Cluster is currently in beta, and not yet considered production-ready. +* If Redis is used as a store, **a fixed keys-to-nodes map is used, so the number of nodes must be fixed and cannot vary**. Otherwise, a system is needed that is able to rebalance keys between nodes when nodes are added or removed, and currently only Redis Cluster is able to do this - Redis Cluster is GA and production-ready. Presharding --- From 341be92ed5881ba3c1d3e8ede9198f52d40967ed Mon Sep 17 00:00:00 2001 From: Itamar Haber Date: Fri, 10 Apr 2015 00:30:31 +0300 Subject: [PATCH 0261/2314] GA expanded Per @badboy_'s feedback --- topics/partitioning.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/partitioning.md b/topics/partitioning.md index 82e48af59f..83d6cbf47f 100644 --- a/topics/partitioning.md +++ b/topics/partitioning.md @@ -57,7 +57,7 @@ Consistent hashing implementations are often able to switch to other nodes if th The main concept here is the following: * If Redis is used as a cache **scaling up and down** using consistent hashing is easy. -* If Redis is used as a store, **a fixed keys-to-nodes map is used, so the number of nodes must be fixed and cannot vary**. Otherwise, a system is needed that is able to rebalance keys between nodes when nodes are added or removed, and currently only Redis Cluster is able to do this - Redis Cluster is GA and production-ready. +* If Redis is used as a store, **a fixed keys-to-nodes map is used, so the number of nodes must be fixed and cannot vary**. Otherwise, a system is needed that is able to rebalance keys between nodes when nodes are added or removed, and currently only Redis Cluster is able to do this - Redis Cluster is generally available and production-ready as of [April 1st, 2015](https://groups.google.com/d/msg/redis-db/dO0bFyD_THQ/Uoo2GjIx6qgJ). Presharding --- From 4f76e2a6b31d7423e9e433a2c14a6cee6ec90c4d Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Wed, 15 Apr 2015 13:17:04 +0200 Subject: [PATCH 0262/2314] `count` never made it into 3.0 See antirez/redis#2525 --- commands/spop.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands/spop.md b/commands/spop.md index 98023611be..10d773049c 100644 --- a/commands/spop.md +++ b/commands/spop.md @@ -2,7 +2,7 @@ Removes and returns one or more random elements from the set value store at `key This operation is similar to `SRANDMEMBER`, that returns one or more random elements from a set but does not remove it. -The `count` argument will be available in 3.0 and is not available in 2.6 or 2.8 +The `count` argument will be available in a later version and is not available in 2.6, 2.8, 3.0 @return From bae20f4f3cb290c1be4e9234c8251a6b095e2fbe Mon Sep 17 00:00:00 2001 From: luin Date: Fri, 24 Apr 2015 11:56:53 +0800 Subject: [PATCH 0263/2314] Add ioredis as a Node.js client --- clients.json | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/clients.json b/clients.json index aedb4f297b..60e6168244 100644 --- a/clients.json +++ b/clients.json @@ -709,6 +709,16 @@ "authors": ["fictorial"] }, + { + "name": "ioredis", + "language": "Node.js", + "repository": "https://github.com/luin/ioredis", + "description": "A delightful, performance-focused and full-featured Redis client. Supports Cluster, Sentinel, Pipelining and Lua Scripting", + "authors": ["luin"], + "recommended": true, + "active": true + }, + { "name": "iodis", "language": "Io", From 160f604fe4c50cb7d465d00a87a6c92488bc9e64 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 24 Apr 2015 17:05:13 +0200 Subject: [PATCH 0264/2314] Fix ioredis client author Twitter handle. --- clients.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clients.json b/clients.json index 60e6168244..42e8c275a4 100644 --- a/clients.json +++ b/clients.json @@ -714,7 +714,7 @@ "language": "Node.js", "repository": "https://github.com/luin/ioredis", "description": "A delightful, performance-focused and full-featured Redis client. Supports Cluster, Sentinel, Pipelining and Lua Scripting", - "authors": ["luin"], + "authors": ["luinlee"], "recommended": true, "active": true }, From 2ea480daa90948933439ca2e171a3cab479edebd Mon Sep 17 00:00:00 2001 From: Milo Price Date: Fri, 24 Apr 2015 09:04:19 -0700 Subject: [PATCH 0265/2314] Typo fix Note that BITOPS could also plausibly be a misspelling of BITOP, except that BITOP can't operate on ranges. --- topics/data-types-intro.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/data-types-intro.md b/topics/data-types-intro.md index 67de359aba..968b3a0d3a 100644 --- a/topics/data-types-intro.md +++ b/topics/data-types-intro.md @@ -943,7 +943,7 @@ There are three commands operating on group of bits: 2. `BITCOUNT` performs population counting, reporting the number of bits set to 1. 3. `BITPOS` finds the first bit having the specified value of 0 or 1. -Both `BITOPS` and `BITCOUNT` are able to operate with byte ranges of the +Both `BITPOS` and `BITCOUNT` are able to operate with byte ranges of the string, instead of running for the whole length of the string. The following is a trivial example of `BITCOUNT` call: From c0d824cec503c4070f7dc2299c0176f43370d87d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=EC=B5=9C=EC=9E=AC=EC=9B=90?= Date: Sat, 25 Apr 2015 23:54:17 +0900 Subject: [PATCH 0266/2314] Typo - Replica migration --- topics/cluster-spec.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/cluster-spec.md b/topics/cluster-spec.md index a15be1bab6..ab569141b5 100644 --- a/topics/cluster-spec.md +++ b/topics/cluster-spec.md @@ -1029,7 +1029,7 @@ the independent failures of single nodes caused by hardware or software issues that can accumulate over time. For example: * Master A has a single slave A1. -* Master A fails. A1 is promoted as new slave. +* Master A fails. A1 is promoted as new master. * Three hours later A1 fails in an independent manner (not related to the failure of A). No other slave is available for promotion since also node A is still down. The cluster cannot continue normal operations. If the map between masters and slaves is fixed, the only way to make the cluster From 7eb07ad6ea0744bc3d2aad4d96abdf5b15f94bd2 Mon Sep 17 00:00:00 2001 From: Alexandr Emelin Date: Sun, 26 Apr 2015 20:09:20 +0300 Subject: [PATCH 0267/2314] fix typo in how Redis expires keys section --- commands/expire.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands/expire.md b/commands/expire.md index e580ec0d58..250692b743 100644 --- a/commands/expire.md +++ b/commands/expire.md @@ -137,7 +137,7 @@ Specifically this is what Redis does 10 times per second: 1. Test 20 random keys from the set of keys with an associated expire. 2. Delete all the keys found expired. -3. If more than 25 keys were expired, start again from step 1. +3. If more than 25% of keys were expired, start again from step 1. This is a trivial probabilistic algorithm, basically the assumption is that our sample is representative of the whole key space, and we continue to expire until From ac5807afe702b3acfb040dba1cef07f93bf3ed7c Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Thu, 30 Apr 2015 10:38:00 +0200 Subject: [PATCH 0268/2314] Add info about stable-released cluster --- topics/partitioning.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/topics/partitioning.md b/topics/partitioning.md index 83d6cbf47f..4b7c408e42 100644 --- a/topics/partitioning.md +++ b/topics/partitioning.md @@ -90,7 +90,9 @@ So far we covered Redis partitioning in theory, but what about practice? What sy Redis Cluster --- -Redis Cluster is the preferred way to get automatic sharding and high availability. It is currently not production ready, but finally entered beta stage, so we recommend you to start experimenting with it. You can get more information about Redis Cluster in the [Cluster tutorial](/topics/cluster-tutorial). +Redis Cluster is the preferred way to get automatic sharding and high availability. +It is generally available and production-ready as of [April 1st, 2015](https://groups.google.com/d/msg/redis-db/dO0bFyD_THQ/Uoo2GjIx6qgJ). +You can get more information about Redis Cluster in the [Cluster tutorial](/topics/cluster-tutorial). Once Redis Cluster will be available, and if a Redis Cluster compliant client is available for your language, Redis Cluster will be the de facto standard for Redis partitioning. From a51dbcc8ae05b6a3e44c23cefc51e43b6c7e97f6 Mon Sep 17 00:00:00 2001 From: Itamar Haber Date: Sun, 3 May 2015 10:42:03 +0300 Subject: [PATCH 0269/2314] Added missing glob-style patterns Specifically the character set's complement and range. A separate pattern-matching.md topic that's linked from `KEYS` and `[HSZ]?SCAN` doc pages seems to be a good idea <- thoughts? --- commands/keys.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/commands/keys.md b/commands/keys.md index 9263ea2724..670bce96ac 100644 --- a/commands/keys.md +++ b/commands/keys.md @@ -21,6 +21,8 @@ Supported glob-style patterns: * `h?llo` matches `hello`, `hallo` and `hxllo` * `h*llo` matches `hllo` and `heeeello` * `h[ae]llo` matches `hello` and `hallo,` but not `hillo` +* `h[^e]llo` matches `hallo`, `hbllo`, ... but not `hello` +* `h[a-b]llo` matches `hallo` and `hbllo` Use `\` to escape special characters if you want to match them verbatim. From f440bc7128dff93442ea5a0d2476f0a5e78b9934 Mon Sep 17 00:00:00 2001 From: antirez Date: Tue, 5 May 2015 11:55:24 +0200 Subject: [PATCH 0270/2314] Fix typo in cluster tutorial --- topics/cluster-tutorial.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/cluster-tutorial.md b/topics/cluster-tutorial.md index 782ed0afc5..ffe8e646e1 100644 --- a/topics/cluster-tutorial.md +++ b/topics/cluster-tutorial.md @@ -67,7 +67,7 @@ little bandwidth and processing time. Redis Cluster data sharding --- -Redis Cluster does not use consistency hashing, but a different form of sharding +Redis Cluster does not use consistent hashing, but a different form of sharding where every key is conceptually part of what we call an **hash slot**. There are 16384 hash slots in Redis Cluster, and to compute what is the hash From f9c1d2cf4606e183031846e542f4855f69c59882 Mon Sep 17 00:00:00 2001 From: Romuald Brunet Date: Mon, 11 May 2015 16:57:46 +0200 Subject: [PATCH 0271/2314] Fix typo in cmsgpack eval example --- commands/eval.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands/eval.md b/commands/eval.md index a25096b12d..97f506f938 100644 --- a/commands/eval.md +++ b/commands/eval.md @@ -581,7 +581,7 @@ Example: ``` 127.0.0.1:6379> eval 'return cmsgpack.pack({"foo", "bar", "baz"})' 0 "\x93\xa3foo\xa3bar\xa3baz" -127.0.0.1:6379> eval 'return cmsgpack.unpack(ARGV[1])' 0 "\x93\xa3foo\xa3bar\xa3baz +127.0.0.1:6379> eval 'return cmsgpack.unpack(ARGV[1])' 0 "\x93\xa3foo\xa3bar\xa3baz" 1) "foo" 2) "bar" 3) "baz" From 5832961a26df5a1f6a1d9d4c4ac587a4736ba0cd Mon Sep 17 00:00:00 2001 From: Hugo Lopes Tavares Date: Mon, 11 May 2015 11:15:44 -0400 Subject: [PATCH 0272/2314] Remove unnecessary "if" from sentence --- topics/cluster-tutorial.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/cluster-tutorial.md b/topics/cluster-tutorial.md index ffe8e646e1..511ee0bb97 100644 --- a/topics/cluster-tutorial.md +++ b/topics/cluster-tutorial.md @@ -114,7 +114,7 @@ In our example cluster with nodes A, B, C, if node B fails the cluster is not able to continue, since we no longer have a way to serve hash slots in the range 5501-11000. -However if when the cluster is created (or at a latter time) we add a slave +However when the cluster is created (or at a latter time) we add a slave node to every master, so that the final cluster is composed of A, B, C that are masters nodes, and A1, B1, C1 that are slaves nodes, the system is able to continue if node B fails. From 83d9e64493e4989b127aae401a80c48ffcd0f080 Mon Sep 17 00:00:00 2001 From: Hugo Lopes Tavares Date: Mon, 11 May 2015 11:16:52 -0400 Subject: [PATCH 0273/2314] s/performances/performance/ in cluster-tutorial.md --- topics/cluster-tutorial.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/topics/cluster-tutorial.md b/topics/cluster-tutorial.md index 511ee0bb97..fa95065863 100644 --- a/topics/cluster-tutorial.md +++ b/topics/cluster-tutorial.md @@ -153,10 +153,10 @@ are already able to reason about because of past experiences with traditional database systems not involving distributed systems. Similarly you can improve consistency by forcing the database to flush data on disk before replying to the client, but this usually results into prohibitively low -performances. That would be the equivalent of synchronous replication in +performance. That would be the equivalent of synchronous replication in the case of Redis Cluster. -Basically there is a trade-off to take between performances and consistency. +Basically there is a trade-off to take between performance and consistency. Redis Cluster has support for synchronous writes when absolutely needed, implemented via the `WAIT` command, this makes losing writes a lot less From 0cbd15202d35876f7a95766e363ac39df50e2354 Mon Sep 17 00:00:00 2001 From: Hugo Lopes Tavares Date: Mon, 11 May 2015 11:17:24 -0400 Subject: [PATCH 0274/2314] s/try/see/ in cluster-tutorial.md --- topics/cluster-tutorial.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/cluster-tutorial.md b/topics/cluster-tutorial.md index fa95065863..a42be73bc9 100644 --- a/topics/cluster-tutorial.md +++ b/topics/cluster-tutorial.md @@ -500,7 +500,7 @@ ruby ./example.rb ``` This is not a very interesting program and we'll use a better one in a moment -but we can already try what happens during a resharding when the program +but we can already see what happens during a resharding when the program is running. Resharding the cluster From 03fd32000497030bd46932ff9c08bc90da78c66e Mon Sep 17 00:00:00 2001 From: Hugo Lopes Tavares Date: Mon, 11 May 2015 11:18:36 -0400 Subject: [PATCH 0275/2314] Fix "loose" and "say" tenses in cluster-tutorial.md --- topics/cluster-tutorial.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/topics/cluster-tutorial.md b/topics/cluster-tutorial.md index a42be73bc9..2d26623378 100644 --- a/topics/cluster-tutorial.md +++ b/topics/cluster-tutorial.md @@ -691,8 +691,8 @@ Now we can look at the output of the consistency test to see what it reported. As you can see during the failover the system was not able to accept 578 reads and 577 writes, however no inconsistency was created in the database. This may sound unexpected as in the first part of this tutorial we stated that Redis -Cluster can lost writes during the failover because it uses asynchronous -replication. What we did not said is that this is not very likely to happen +Cluster can loose writes during the failover because it uses asynchronous +replication. What we did not say is that this is not very likely to happen because Redis sends the reply to the client, and the commands to replicate to the slaves, about at the same time, so there is a very small window to lose data. However the fact that it is hard to trigger does not mean that it From 469ef999bf48390aafa3aac8da934b1b1963f453 Mon Sep 17 00:00:00 2001 From: Hugo Lopes Tavares Date: Mon, 11 May 2015 11:19:07 -0400 Subject: [PATCH 0276/2314] Fix redis-trib add-node command name in cluster-tutorial.md --- topics/cluster-tutorial.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/cluster-tutorial.md b/topics/cluster-tutorial.md index 2d26623378..b1e56e81f1 100644 --- a/topics/cluster-tutorial.md +++ b/topics/cluster-tutorial.md @@ -792,7 +792,7 @@ the existing cluster. ./redis-trib.rb add-node 127.0.0.1:7006 127.0.0.1:7000 -As you can see I used the **addnode** command specifying the address of the +As you can see I used the **add-node** command specifying the address of the new node as first argument, and the address of a random existing node in the cluster as second argument. From bbf7da888b37fbec2aea9bbf433fe9cf1d78ddca Mon Sep 17 00:00:00 2001 From: Hugo Lopes Tavares Date: Mon, 11 May 2015 11:19:21 -0400 Subject: [PATCH 0277/2314] Fix markdown link syntax in cluster-tutorial.md --- topics/cluster-tutorial.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/cluster-tutorial.md b/topics/cluster-tutorial.md index b1e56e81f1..7f92285094 100644 --- a/topics/cluster-tutorial.md +++ b/topics/cluster-tutorial.md @@ -903,7 +903,7 @@ master to another one automatically, without the help of the system administrato The automatic reconfiguration of replicas is called *replicas migration* and is able to improve the reliability of a Redis Cluster. -Note: you can read the details of replicas migration in the (Redis Cluster Specification)[/topics/cluster-spec], here we'll only provide some information about the +Note: you can read the details of replicas migration in the [Redis Cluster Specification](/topics/cluster-spec), here we'll only provide some information about the general idea and what you should do in order to benefit from it. The reason why you may want to let your cluster replicas to move from one master From 33611503187749e4187374c6fef81f8295637b61 Mon Sep 17 00:00:00 2001 From: Hugo Lopes Tavares Date: Mon, 11 May 2015 11:19:39 -0400 Subject: [PATCH 0278/2314] Remove extra "w" from end of sentence in cluster-turorial.md --- topics/cluster-tutorial.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/cluster-tutorial.md b/topics/cluster-tutorial.md index 7f92285094..c55e439aae 100644 --- a/topics/cluster-tutorial.md +++ b/topics/cluster-tutorial.md @@ -993,7 +993,7 @@ in order to migrate your data set to Redis Cluster: 6. Restart your Redis Cluster nodes with the new AOF files. They'll complain that there are keys that should not be there according to their configuration. 7. Use `redis-trib fix` command in order to fix the cluster so that keys will be migrated according to the hash slots each node is authoritative or not. 8. Use `redis-trib check` at the end to make sure your cluster is ok. -9. Restart your clients modified to use a Redis Cluster aware client library.w +9. Restart your clients modified to use a Redis Cluster aware client library. There is an alternative way to import data from external instances to a Redis Cluster, which is to use the `redis-trib import` command. From 040373644f6cb7c8ee6dd08ee32d9e3eca419e84 Mon Sep 17 00:00:00 2001 From: Hugo Lopes Tavares Date: Tue, 12 May 2015 22:51:41 -0400 Subject: [PATCH 0279/2314] Fix typo in cluster-tutorial.md (s/loose/lose/) --- topics/cluster-tutorial.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/cluster-tutorial.md b/topics/cluster-tutorial.md index c55e439aae..67f159692c 100644 --- a/topics/cluster-tutorial.md +++ b/topics/cluster-tutorial.md @@ -691,7 +691,7 @@ Now we can look at the output of the consistency test to see what it reported. As you can see during the failover the system was not able to accept 578 reads and 577 writes, however no inconsistency was created in the database. This may sound unexpected as in the first part of this tutorial we stated that Redis -Cluster can loose writes during the failover because it uses asynchronous +Cluster can lose writes during the failover because it uses asynchronous replication. What we did not say is that this is not very likely to happen because Redis sends the reply to the client, and the commands to replicate to the slaves, about at the same time, so there is a very small window to From 3fbe93be7496b93b57fa8bf70f72975d88199244 Mon Sep 17 00:00:00 2001 From: Damian Janowski Date: Mon, 23 Mar 2015 10:09:11 -0300 Subject: [PATCH 0280/2314] Have the build fail when spelling mistakes are found. --- Rakefile | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/Rakefile b/Rakefile index b6f2dc7655..f2877f68a4 100644 --- a/Rakefile +++ b/Rakefile @@ -29,6 +29,8 @@ task :spellcheck do io.puts(File.read("wordlist")) end + errors = false + Dir["**/*.md"].each do |file| command = %q{ ruby -pe 'gsub /^ .*$/, ""' | @@ -41,8 +43,13 @@ task :spellcheck do line[/^& ([^ ]+)/, 1] end.compact - puts "#{file}: #{words.uniq.sort.join(" ")}" if words.any? + if words.size > 0 + errors = true + puts("#{file}: #{words.uniq.sort.join(" ")}") + end end + + abort("Spelling errors found.") if errors end namespace :format do From 9d897863abf25f675d1a84afea0e842165451c5b Mon Sep 17 00:00:00 2001 From: Damian Janowski Date: Mon, 23 Mar 2015 10:01:34 -0300 Subject: [PATCH 0281/2314] Ignore code blocks for spell checks. --- Rakefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Rakefile b/Rakefile index f2877f68a4..8f4bd30f6b 100644 --- a/Rakefile +++ b/Rakefile @@ -35,7 +35,7 @@ task :spellcheck do command = %q{ ruby -pe 'gsub /^ .*$/, ""' | ruby -pe 'gsub /`[^`]+`/, ""' | - ruby -e 'puts $stdin.read.gsub /\[([^\]]+)\]\(([^\)]+)\)/m, "\\1"' | + ruby -e 'puts $stdin.read.gsub(/\[([^\]]+)\]\(([^\)]+)\)/m, "\\1").gsub(/^```.*```/m, "")' | aspell -H -a --extra-dicts=./tmp/dict 2>/dev/null } From c013837cdaf8eefbaa9e7ca67e7cc57e7d3d36d0 Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 20 May 2015 10:47:12 +0200 Subject: [PATCH 0282/2314] Sentinel documentation big rewrite --- topics/sentinel.md | 1087 +++++++++++++++++++++++++++++++------------- 1 file changed, 777 insertions(+), 310 deletions(-) diff --git a/topics/sentinel.md b/topics/sentinel.md index 12457a215f..dd0744ec48 100644 --- a/topics/sentinel.md +++ b/topics/sentinel.md @@ -1,31 +1,39 @@ Redis Sentinel Documentation === -Redis Sentinel is a system designed to help managing Redis instances. -It performs the following four tasks: +Redis Sentinel provides high availability for Redis. In practical terms this +means that using Senitnel you can create a Redis deployment that resists +without human intervention to certian kind of failures. + +Redis Senitnel also provides other collateral tasks such as monitoring, +notifications and acts as a configuration provider for clients. + +This is the full list of Sentinel capabilities at a macroscopical level (i.e. the *big picture*): * **Monitoring**. Sentinel constantly checks if your master and slave instances are working as expected. -* **Notification**. Sentinel can notify the system administrator, or another computer program, via an API, that something is wrong with one of the monitored Redis instances. +* **Notification**. Sentinel can notify the system administrator, another computer programs, via an API, that something is wrong with one of the monitored Redis instances. * **Automatic failover**. If a master is not working as expected, Sentinel can start a failover process where a slave is promoted to master, the other additional slaves are reconfigured to use the new master, and the applications using the Redis server informed about the new address to use when connecting. * **Configuration provider**. Sentinel acts as a source of authority for clients service discovery: clients connect to Sentinels in order to ask for the address of the current Redis master responsible for a given service. If a failover occurs, Sentinels will report the new address. Distributed nature of Sentinel --- -Redis Sentinel is a distributed system. This means that usually you want to run -multiple Sentinel processes across your infrastructure. These processes -will use gossip protocols in order to understand if a master is down and -agreement protocols in order to become authorized to perform the failover and assign -a new version to the new configuration. +Redis Sentinel is a distributed system: + +Sentinel itself is designed to run in a configuration where there are multiple Sentinel processes cooperating together. The advantage of having multiple Sentinel processes cooperating are the following: + +1. Failure detection is performed when multiple Sentinels agree about the fact a given master is no longer available. This lowers the probability of false positives. +2. Sentinel works even if not all the Sentinel processes are working, making the system robust against failures. There is no fun in having a fail over system which is itself a single point of failure, after all. -Distributed systems have given *safety* and *liveness* properties, in order to -use Redis Sentinel well you are supposed to understand, at least at higher level, -how Sentinel works as a distributed system. This makes Sentinel more complex but -also better compared to a system using a single process, for example: +The sum of Sentinels, Redis instances (masters and slaves) and clients +connecting to Sentinel and Redis, are also a larger distributed system with +specific properties. In this document concepts will be introduced gradually +starting from basic informations needed in order to understand the basic +properties of Sentinel, to more complex informations (that are optional) in +order to understand how exactly Sentinel works. -* A cluster of Sentinels can failover a master even if some Sentinels are failing. -* A single Sentinel not working well, or not well connected, can't failover a master without authorization from other Sentinels. -* Clients can connect to any random Sentinel to fetch the configuration of a master. +Quick Start +=== Obtaining Sentinel --- @@ -34,16 +42,14 @@ The current version of Sentinel is called **Sentinel 2**. It is a rewrite of the initial Sentinel implementation using stronger and simpler to predict algorithms (that are explained in this documentation). -A stable release of Redis Sentinel is shipped with Redis 2.8, which is the -latest stable release of Redis. +A stable release of Redis Sentinel is shipped with Redis 2.8 and 3.0, which are +the two latest stable releases of Redis. -New developments are performed in the *unstable* branch, and new features are -backported into the 2.8 branch as soon as they are considered to be stable. +New developments are performed in the *unstable* branch, and new features +sometimes are back ported into the 2.8 and 3.0 branch as soon as they are +considered to be stable. -IMPORTANT: **Even if you are using Redis 2.6, you should use Sentinel shipped with Redis 2.8**. Redis Sentinel shipped with Redis 2.6, that is, "Sentinel 1", -is deprecated and has many bugs. In general you should migrate all your -Redis and Sentinel instances to Redis 2.8 ASAP to get a better overall -experience. +Redis Sentinel version 1, shipped with Redis 2.6, is deprecated and should not be used. Running Sentinel --- @@ -69,6 +75,16 @@ connections from the IP addresses of the other Sentinel instances. Otherwise Sentinels can't talk and can't agree about what to do, so failover will never be performed. +Fundamental things to know about Sentinel before to deploy +--- + +1. You need at least three Sentinel instances for a robust deployment. +2. The three Sentinel instances should be placed into computers or virtual machines that are believed to fail in an independent way. So for example different physical servers or Virtual Machines executed on different availability zones. +3. Sentinel + Redis distributed system does not guarantee that acknowledged writes are retained during failures, since Redis uses asynchronous replication. However there are ways to deploy Sentinel that make the window to lose writes limited to certain moments, while there are other less secure ways to deploy it. +4. You need Sentinel support in your clients. Popular client libraries have Sentinel support, but not all. +5. There is no HA setup which is safe if you don't test from time to time in development environments, or even better if you can, in production environments, if they work. You may have a misconfiguration that will become apparent only when it's too late (at 3am when your master stops working). +6. **Sentinel, Docker, or other forms of Network Address Translation or Port Mapping should be mixed with care**: Docker performs port remapping, breaking Sentinel auto discovery of other Sentinel processes and the list of slaves for a master. Check the section about Sentinel and Docker later in this document for more information. + Configuring Sentinel --- @@ -92,26 +108,37 @@ master (that may have any number of slaves) a different name. There is no need to specify slaves, which are auto-discovered. Sentinel will update the configuration automatically with additional information about slaves (in order to retain the information in case of restart). The configuration is -also rewritten every time a slave is promoted to master during a failover. +also rewritten every time a slave is promoted to master during a failover +and every time a new Sentinel is discovered. The example configuration above, basically monitor two sets of Redis instances, each composed of a master and an undefined number of slaves. One set of instances is called `mymaster`, and the other `resque`. +The meaning of the arguments of `sentinel monitor` statements is the following: + + sentinel monitor + For the sake of clarity, let's check line by line what the configuration options mean: The first line is used to tell Redis to monitor a master called *mymaster*, -that is at address 127.0.0.1 and port 6379, with a level of agreement needed -to detect this master as failing of 2 sentinels (if the agreement is not reached -the automatic failover does not start). +that is at address 127.0.0.1 and port 6379, with a quorum of 2. Everything +is pretty obvious but the **quorum** argument: + +* The **quorum** is the number of Sentinels that need to agree about the fact the master is not reachable, in order for really mark the slave as failing, and eventually start a fail over procedure if possible. +* However **the quorum is only used to detect the failure**. In order to actually perform a failover, one of the Sentinels need to be elected leader for the failover and be authorized to proceed. This only happens with the vote of the **majority of the Sentienl processes**. + +So for example if you have 5 Sentinel processes, and the quorum for a given +master set to the value of 2, this is what happens: -However note that whatever the agreement you specify to detect an instance as not working, a Sentinel requires **the vote from the majority** of the known Sentinels in the system in order to start a failover and obtain a new *configuration Epoch* to assign to the new configuration after the failover. +* If two Sentinels agree at the same time about the master being unreachable, one of the two will try to start a failover. +* If there are at least a total of three Sentinels reachable, the failover will be authorized and will actually start. -In the example the quorum is set to to 2, so it takes 2 sentinels that agree that -a given master is not reachable or in an error condition for a failover to -be triggered (however as you'll see in the next section to trigger a failover is -not enough to start a successful failover, authorization is required). +In practical terms this means during failures **Sentinel never starts a failover if the majority of Setinel processes are unable to talk** (aka no failover in the minority partition). + +Other Sentinel options +--- The other options are almost always in the form: @@ -121,21 +148,15 @@ And are used for the following purposes: * `down-after-milliseconds` is the time in milliseconds an instance should not be reachable (either does not reply to our PINGs or it is replying with an -error) for a Sentinel starting to think it is down. After this time has elapsed -the Sentinel will mark an instance as **subjectively down** (also known as -`SDOWN`), that is not enough to start the automatic failover. -However if enough instances will think that there is a subjectively down -condition, then the instance is marked as **objectively down**. The number of -sentinels that needs to agree depends on the configured agreement for this -master. +error) for a Sentinel starting to think it is down. * `parallel-syncs` sets the number of slaves that can be reconfigured to use the new master after a failover at the same time. The lower the number, the more time it will take for the failover process to complete, however if the slaves are configured to serve old data, you may not want all the slaves to -resync at the same time with the new master, as while the replication process -is mostly non blocking for a slave, there is a moment when it stops to load -the bulk data from the master during a resync. You may make sure only one -slave at a time is not reachable by setting this option to the value of 1. +re-synchronize with the master at the same time. While the replication +process is mostly non blocking for a slave, there is a moment when it stops to +load the bulk data from the master. You may want to make sure only one slave +at a time is not reachable by setting this option to the value of 1. Additional options are described in the rest of this document and documented in the example `sentinel.conf` file shipped with the Redis @@ -143,343 +164,437 @@ distribution. All the configuration parameters can be modified at runtime using the `SENTINEL SET` command. See the **Reconfiguring Sentinel at runtime** section for more information. -Quorum +Example Sentinel deployments --- -The previous section showed that every master monitored by Sentinel is associated to -a configured **quorum**. It specifies the number of Sentinel processes -that need to agree about the unreachability or error condition of the master in -order to trigger a failover. +Now that you know the basic information about Sentinel, you may wonder where +you should place your Sentinel processes, how much Sentinel processes you need +and so forth. This section shows a few example deployments. -However, after the failover is triggered, in order for the failover to actually be -performed, **at least a majority of Sentinels must authorize the Sentinel to -failover**. +We use ASCII art in order to show you configuration examples in a *graphical* +format, this is what the different symbols means: -Let's try to make things a bit more clear: + +--------------------+ + | This is a computer | + | or VM that fails | + | indepenedently. We | + | call it a "box" | + +--------------------+ -* Quorum: the number of Sentinel processes that need to detect an error condition in order for a master to be flagged as **ODOWN**. -* The failover is triggered by the **ODOWN** state. -* Once the failover is triggered, the Sentinel trying to failover is required to ask for authorization to a majority of Sentinels (or more than the majority if the quorum is set to a number greater than the majority). +We write inside the boxes what they are running: -The difference may seem subtle but is actually quite simple to understand and use. -For example if you have 5 Sentinel instances, and the quorum is set to 2, a failover -will be triggered as soon as 2 Sentinels believe that the master is not reachable, -however one of the two Sentinels will be able to failover only if it gets authorization -at least from 3 Sentinels. + +-------------------+ + | Redis master M1 | + | Redis Sentinel S1 | + +-------------------+ -If instead the quorum is configured to 5, all the Sentinels must agree about the master -error condition, and the authorization from all Sentinels is required in order to -failover. +Different boxes are connected by lines, to show that they are able to talk: -Configuration epochs ---- + +-------------+ +-------------+ + | Sentinel S1 |---------------| Sentinel S2 | + +-------------+ +-------------+ -Sentinels require to get authorizations from a majority in order to start a -failover for a few important reasons: +Network partitions are shown as interrupted lines using slashes: -When a Sentinel is authorized, it gets a unique **configuration epoch** for the master it is failing over. This is a number that will be used to version the new configuration after the failover is completed. Because a majority agreed that a given version was assigned to a given Sentinel, no other Sentinel will be able to use it. This means that every configuration of every failover is versioned with a unique version. We'll see why this is so important. + +-------------+ +-------------+ + | Sentinel S1 |------ // ------| Sentinel S2 | + +-------------+ +-------------+ -Moreover Sentinels have a rule: if a Sentinel voted another Sentinel for the failover of a given master, it will wait some time to try to failover the same master again. This delay is the `failover-timeout` you can configure in `sentinel.conf`. This means that Sentinels will not try to failover the same master at the same time, the first to ask to be authorized will try, if it fails another will try after some time, and so forth. +Also note that: -Redis Sentinel guarantees the *liveness* property that if a majority of Sentinels are able to talk, eventually one will be authorized to failover if the master is down. +* Masters are called M1, M2, M3, ..., Mn. +* Slaves are called R1, R2, R3, ..., Rn (R stands for *replica*). +* Sentinels are called S1, S2, S3, ..., Sn. +* Clients are called C1, C2, C3, ..., Cn. +* When an instance changes role because of Sentinel actions, we put it inside square brackets, so [M1] means an instance that is now a master because of Sentinel intervention. -Redis Sentinel also guarantees the *safety* property that every Sentinel will failover the same master using a different *configuration epoch*. +Note that will never show **setups where just two Sentinels are used**, since +Sentinels always need **to talk with the majority** in order to start a +failover. -Configuration propagation +Example 1: just two Sentinels, DON'T DO THIS --- -Once a Sentinel is able to failover a master successfully, it will start to broadcast -the new configuration so that the other Sentinels will update their information -about a given master. + +----+ +----+ + | M1 |---------| R1 | + | S1 | | S2 | + +----+ +----+ -For a failover to be considered successful, it requires that the Sentinel was able -to send the `SLAVEOF NO ONE` command to the selected slave, and that the switch to -master was later observed in the `INFO` output of the master. + Configuration: quorum = 1 -At this point, even if the reconfiguration of the slaves is in progress, the failover -is considered to be successful, and all the Sentinels are required to start reporting -the new configuration. +* In this setup, if the master M1 fails, R1 will be promoted since the two Sentinels can reach agreement about the failure (obviously with quorum set to 1) and can also authorize a failover because the majority is two. So apparently it could superficially work, however check the next points to see why this setup is broken. +* If the box where M1 is running stops working, also S1 stops working. The Sentinel running in the other box S2 will not be able to authorize a failover, so the system will become not available. -The way a new configuration is propagated is the reason why we need that every -Sentinel failover is authorized with a different version number (configuration epoch). +Note that a majority is needed in order to order different failovers, and later propagate the latest configuration to all the Sentinels. Also note that the ability to failover in a single side of the above setup, without any agreement, would be very dangerous: -Every Sentinel continuously broadcast its version of the configuration of a master -using Redis Pub/Sub messages, both in the master and all the slaves. -At the same time all the Sentinels wait for messages to see what is the configuration -advertised by the other Sentinels. + +----+ +------+ + | M1 |----//-----| [M1] | + | S1 | | S2 | + +----+ +------+ -Configurations are broadcast in the `__sentinel__:hello` Pub/Sub channel. +In the above configuration we created two masters (assuming S2 could failover +without authorization) in a perfectly symmetrical way. Clients may write +indefinitely to both sides, and there is no way to understand when the +partition heals what configuration is the right one, in order to prevent +a *permanent split brian condition*. -Because every configuration has a different version number, the greater version -always wins over smaller versions. - -So for example the configuration for the master `mymaster` start with all the -Sentinels believing the master is at 192.168.1.50:6379. This configuration -has version 1. After some time a Sentinel is authorized to failover with version 2. -If the failover is successful, it will start to broadcast a new configuration, let's -say 192.168.1.50:9000, with version 2. All the other instances will see this configuration -and will update their configuration accordingly, since the new configuration has -a greater version. - -This means that Sentinel guarantees a second liveness property: a set of -Sentinels that are able to communicate will all converge to the same configuration -with the higher version number. - -Basically if the net is partitioned, every partition will converge to the higher -local configuration. In the special case of no partitions, there is a single -partition and every Sentinel will agree about the configuration. +So please **deploy at least three Sentinels in three different boxes** always. -More details about SDOWN and ODOWN +Example 2: basic setup with three boxes --- -As already briefly mentioned in this document Redis Sentinel has two different -concepts of *being down*, one is called a *Subjectively Down* condition -(SDOWN) and is a down condition that is local to a given Sentinel instance. -Another is called *Objectively Down* condition (ODOWN) and is reached when -enough Sentinels (at least the number configured as the `quorum` parameter -of the monitored master) have an SDOWN condition, and get feedback from -other Sentinels using the `SENTINEL is-master-down-by-addr` command. - -From the point of view of a Sentinel an SDOWN condition is reached if we -don't receive a valid reply to PING requests for the number of seconds -specified in the configuration as `is-master-down-after-milliseconds` -parameter. - -An acceptable reply to PING is one of the following: +This is a very simple setup, that has the advantage to be simple to tune +for additional safety. It is based on three boxes, each box running both +a Redis process and a Sentinel process. + + + +----+ + | M1 | + | S1 | + +----+ + | + +----+ | +----+ + | R2 |----+----| R3 | + | S2 | | S3 | + +----+ +----+ + + Configuration: quorum = 2 + +If the master M1 fails, S2 and S3 will agree about the failure and will +be able to authorize a failover, making clients able to continue. + +In every Sentinel setup, being Redis asynchronously replicated, there is +always the risk of losing some write because a given acknowledged write +may not be able to reach the slave which is promoted to master. However in +the above setup there is an higher risk due to clients partitioned away +with an old master, like in the following picture: + + +----+ + | M1 | + | S1 | <- C1 (writes will be lost) + +----+ + | + / + / + +------+ | +----+ + | [M2] |----+----| R3 | + | S2 | | S3 | + +------+ +----+ + +In this case a network partition isolated the old master M1, so the +slave R2 is promoted to master. However clients, like C1, that are +in the same partition as the old master, may continue to write data +to the old master. This data will be lost forever since when the partition +will heal, the master will be reconfigured as a slave of the new master, +discarding its data set. + +This problem can be mitigated using the following Redis replication +feature, that allows to stop accepting writes if a master detects that +is no longer to transfer its writes to the specified number of slaves. -* PING replied with +PONG. -* PING replied with -LOADING error. -* PING replied with -MASTERDOWN error. - -Any other reply (or no reply) is considered non valid. + min-slaves-to-write 1 + min-slaves-max-lag 10 -Note that SDOWN requires that no acceptable reply is received for the whole -interval configured, so for instance if the interval is 30000 milliseconds -(30 seconds) and we receive an acceptable ping reply every 29 seconds, the -instance is considered to be working. +With the above configuration (please see the self-commented `redis.conf` example in the Redis distribution for more information) a Redis instance, when acting as a master, will stop accepting writes if it can't write to at least 1 slave. Since replication is asynchronous *not being able to write* actually means that the slave is either disconnected, or is not sending us asynchronous acknowledges for more than the specified `max-lag` number of seconds. -To switch from SDOWN to ODOWN no strong consensus algorithm is used, but -just a form of gossip: if a given Sentinel gets reports that the master -is not working from enough Sentinels in a given time range, the SDOWN is -promoted to ODOWN. If this acknowledge is later missing, the flag is cleared. +Using this configuration the old Redis master M1 in the above example, will become unavailable after 10 seconds. When the partition heals, the Sentinel configuration will converge to the new one, the client C1 will be able to fetch a valid configuration and will continue with the new master. -As already explained, a more strict authorization is required in order -to really start the failover, but no failover can be triggered without -reaching the ODOWN state. +However there is no free lunch. With this refinement, if the two slaves are +down, the master will stop accepting writes. It's a trade off. -The ODOWN condition **only applies to masters**. For other kind of instances -Sentinel don't require any agreement, so the ODOWN state is never reached -for slaves and other sentinels. +Example 3: Sentinel in the client boxes +--- -Sentinels and Slaves auto discovery +Sometimes we have only two Redis boxes available, one for the master and +one for the slave. The configuration in the example 2 is not viable in +that case, so we can resort to the following, where Sentinels are placed +where clients are: + + +----+ +----+ + | M1 |----+----| R1 | + | S1 | | | S2 | + +----+ | +----+ + | + +------------+------------+ + | | | + | | | + +----+ +----+ +----+ + | C1 | | C2 | | C3 | + | S1 | | S2 | | S3 | + +----+ +----+ +----+ + + Configuration: quorum = 2 + +In this setup, the point of view Sentinels is the same as the clients: if +a master is reachable by the majority of the clients, it is fine. +C1, C2, C3 here are generic clients, it does not mean that C1 identifies +a single client connected to Redis. It is more likely something like +an application server, a Rails app, or something like that. + +If the box where M1 and S1 are running fails, the failover will happen +without issues, however it is easy to see that different network partitions +will result in different behaviors. For example Sentinel will not be able +to setup if the network between the clients and the Redis servers will +get disconnected, since the Redis master and slave will be both not +available. + +Note that if C3 gets partitioned with M1 (hardly possible with +the network described above, but more likely possible with different +layouts, or because of failures at the software layer), we have a similar +issue as described in Example 2, with the difference that here we have +no way to break the symmetry, since there is just a slave and master, so +the master can't stop accepting queries when is disconnected by its master, +otherwise the master would never be available during slave failures. + +So this is a valid setup but the setup in the Example 2 has advantages +such as the HA system of Redis running in the same boxes as Redis itself +which may be simpler to manage, and the ability to put a bound on the amount +of time a master into the minority partition can receive writes. + +Example 4: Sentinel client side with less than three clients --- -While Sentinels stay connected with other Sentinels in order to reciprocally -check the availability of each other, and to exchange messages, you don't -need to configure the other Sentinel addresses in every Sentinel instance you -run, as Sentinel uses the Redis master Pub/Sub capabilities in order to -discover the other Sentinels that are monitoring the same master. +The setup described in the Example 3 cannot be used if there are not enough +three boxes in the client side (for example three web servers). In this +case we need to resort to a mixed setup like the following: + + +----+ +----+ + | M1 |----+----| R1 | + | S1 | | | S2 | + +----+ | +----+ + | + +------+-----+ + | | + | | + +----+ +----+ + | C1 | | C2 | + | S3 | | S4 | + +----+ +----+ + + Configuration: quorum = 3 + +This is similar to the setup in Example 3, but here we run four Sentinels +in the four boxes we have available. If the master M1 becomes not available +the other three Sentinels will perform the failover. + +In theory this setup works removing the box where C2 and S4 are running, and +setting the quorum to 2. However it is unlikely that we want HA in the +Redis side without having high availability in our application layer. + +Sentinel, Docker, NAT, and possible issues +--- -This is obtained by sending *Hello Messages* into the channel named -`__sentinel__:hello`. +Docker uses a technique called port mapping: programs running inside Docker +containers may be exposed with a different port compared to the one the +program believes to be using. This is useful in order to run multiple +containers using the same ports, at the same time, in the same server. -Similarly you don't need to configure what is the list of the slaves attached -to a master, as Sentinel will auto discover this list querying Redis. +Docker is not the only software system where this happens, there are other +Network Address Translation setups where ports may be remapped, and sometimes +not ports but also IP addresses. -* Every Sentinel publishes a message to every monitored master and slave Pub/Sub channel `__sentinel__:hello`, every two seconds, announcing its presence with ip, port, runid. -* Every Sentinel is subscribed to the Pub/Sub channel `__sentinel__:hello` of every master and slave, looking for unknown sentinels. When new sentinels are detected, they are added as sentinels of this master. -* Hello messages also include the full current configuration of the master. If another Sentinel has a configuration for a given master that is older than the one received, it updates to the new configuration immediately. -* Before adding a new sentinel to a master a Sentinel always checks if there is already a sentinel with the same runid or the same address (ip and port pair). In that case all the matching sentinels are removed, and the new added. +Remapping ports and addresses creates issues with Sentinel in two ways: -Consistency under partitions ---- +1. Sentinel auto-discovery of other Sentinels no longer works, since it is based on *hello* messages where each Sentinel announce at which port and IP address they are listening for connection. However Sentinels have no way to understand that an address or port is remapped, so it is announcing an information that is not correct for other Sentinels to connect. +2. Slaves are listed in the `INFO` output of a Redis master in a similar way: the address is detected by the master checking the remote peer of the TCP connection, while the port is advertised by the slave itself during the handshake, however the port may be wrong for the same reason as exposed in point 1. -Redis Sentinel configurations are eventually consistent, so every partition will -converge to the higher configuration available. -However in a real-world system using Sentinel there are three different players: +Since Sentinels auto detect slaves using masters `INFO` output information, +the detected slaves will not be reachable, and Sentinel will never be able to +failover the master, since there are no good slaves from the point of view of +the system, so there is currently no way to monitor with Sentinel a set of +master and slave instances deployed with Docker, unless you instruct Docker +to map the port 1:1. -* Redis instances. -* Sentinel instances. -* Clients. +For the first problem instead, in case you want to run a set of Sentinel +instances using Docker, you can use the following two Sentinel configuration +directives in order to force Sentinel to announce a specific set of IP +and port: -In order to define the behavior of the system we have to consider all three. + sentinel announce-ip + sentinel announce-port -The following is a simple network where there are 3 nodes, each running -a Redis instance, and a Sentinel instance: +A quick tutorial +=== - +-------------+ - | Sentinel 1 | <--- Client A - | Redis 1 (M) | - +-------------+ - | - | - +-------------+ | +------------+ - | Sentinel 2 |-----+-- / partition / ----| Sentinel 3 | <--- Client B - | Redis 2 (S) | | Redis 3 (M)| - +-------------+ +------------+ +In the next sections of this document, all the details about Sentinel API, +configuration and semantics will be covered incrementally. However for people +that want to play with the system ASAP, this section is a tutorial that shows +how to configure and interact with 3 Sentinel instances. -In this system the original state was that Redis 3 was the master, while -Redis 1 and 2 were slaves. A partition occurred isolating the old master. -Sentinels 1 and 2 started a failover promoting Sentinel 1 as the new master. +Here we assume that the instances are executed at port 5000, 5001, 5002. +We also assume that you have a running Redis master at port 6379 with a +slave running at port 6380. We will use the IPv4 loopback address 127.0.0.1 +everywhere during the tutorial, assuming you are running the simulation +in your personal computer. -The Sentinel properties guarantee that Sentinel 1 and 2 now have the new -configuration for the master. However Sentinel 3 has still the old configuration -since it lives in a different partition. +The three Sentinel configuration files should look like the following: -We know that Sentinel 3 will get its configuration updated when the network -partition will heal, however what happens during the partition if there -are clients partitioned with the old master? + port 5000 + sentinel monitor mymaster 127.0.0.1 6379 2 + sentinel down-after-milliseconds mymaster 5000 + sentinel failover-timeout mymaster 60000 + sentinel parallel-syncs mymaster 1 -Clients will be still able to write to Redis 3, the old master. When the -partition will rejoin, Redis 3 will be turned into a slave of Redis 1, and -all the data written during the partition will be lost. +The other two configuration files will be identical but using 5001 and 5002 +as port numbers. -Depending on your configuration you may want or not that this scenario happens: +A few things to note about the above configuration: -* If you are using Redis as a cache, it could be handy that Client B is still able to write to the old master, even if its data will be lost. -* If you are using Redis as a store, this is not good and you need to configure the system in order to partially prevent this problem. +* The master set is called `mymaster`. It identifies the master and its slaves. Since each *master set* has a different name, Sentinel can monitor different sets of masters and slaves at the same time. +* The quorum was set to the value of 2 (last argument of `sentinel monitor` configuration directive). +* The `down-after-milliseconds` value is 5000 milliseconds, that is 5 seconds, so masters will be detected as failing as soon as we don't receive any reply from our pings within this amount of time. -Since Redis is asynchronously replicated, there is no way to totally prevent data loss in this scenario, however you can bound the divergence between Redis 3 and Redis 1 -using the following Redis configuration option: +Once you start the three Sentinels, you'll see a few messages they log, like: - min-slaves-to-write 1 - min-slaves-max-lag 10 + +monitor master mymaster 127.0.0.1 6379 quorum 2 -With the above configuration (please see the self-commented `redis.conf` example in the Redis distribution for more information) a Redis instance, when acting as a master, will stop accepting writes if it can't write to at least 1 slave. Since replication is asynchronous *not being able to write* actually means that the slave is either disconnected, or is not sending us asynchronous acknowledges for more than the specified `max-lag` number of seconds. +This is a Sentinel event, and you can receive this kind of events via Pub/Sub +if you `SUBSCRIBE` to the event name as specified later. -Using this configuration the Redis 3 in the above example will become unavailable after 10 seconds. When the partition heals, the Sentinel 3 configuration will converge to -the new one, and Client B will be able to fetch a valid configuration and continue. +Sentinel generates and logs different events during failure detection and +failover. -Sentinel persistent state +Asking Sentinel about the state of a master --- -Sentinel state is persisted in the sentinel configuration file. For example -every time a new configuration is received, or created (leader Sentinels), for -a master, the configuration is persisted on disk together with the configuration -epoch. This means that it is safe to stop and restart Sentinel processes. - -Sentinel reconfiguration of instances outside the failover procedure. +The most obvious thing to do with Sentinel to get started, is check if the +master it is monitoring is doing well: + + $ redis-cli -p 5000 + 127.0.0.1:5000> sentinel master mymaster + 1) "name" + 2) "mymaster" + 3) "ip" + 4) "127.0.0.1" + 5) "port" + 6) "6379" + 7) "runid" + 8) "953ae6a589449c13ddefaee3538d356d287f509b" + 9) "flags" + 10) "master" + 11) "link-pending-commands" + 12) "0" + 13) "link-refcount" + 14) "1" + 15) "last-ping-sent" + 16) "0" + 17) "last-ok-ping-reply" + 18) "735" + 19) "last-ping-reply" + 20) "735" + 21) "down-after-milliseconds" + 22) "5000" + 23) "info-refresh" + 24) "126" + 25) "role-reported" + 26) "master" + 27) "role-reported-time" + 28) "532439" + 29) "config-epoch" + 30) "1" + 31) "num-slaves" + 32) "1" + 33) "num-other-sentinels" + 34) "2" + 35) "quorum" + 36) "2" + 37) "failover-timeout" + 38) "60000" + 39) "parallel-syncs" + 40) "1" + +As you can see, it prints a number of information about the master. There are +a few that are of particular interest for us: + +1. `num-other-sentinels` is 2, so we know the Sentinel already detected two more Sentinels for this master. If you check the logs you'll see the `+sentinel` events generated. +2. `flags` is just `master`. If the master was down we could expect to see `s_down` or `o_down` flag as well here. +3. `num-slaves` is correctly set to 1, so Sentinel also detected that there is an attached slave to our master. + +In order to explore more about this instance, you may want to try the following +two commands: + + SENTINEL slaves mymaster + SENTINEL sentinels mymaster + +The first will provide similar informations about the slaves connected to the +master, and the second about the other Sentinels. + +Obtaining the address of the current master --- -Even when no failover is in progress, Sentinels will always try to set the -current configuration on monitored instances. Specifically: - -* Slaves (according to the current configuration) that claim to be masters, will be configured as slaves to replicate with the current master. -* Slaves connected to a wrong master, will be reconfigured to replicate with the right master. - -For Sentinels to reconfigure slaves, the wrong configuration must be observed for some time, that is greater than the period used to broadcast new configurations. - -This prevents that Sentinels with a stale configuration (for example because they just rejoined from a partition) will try to change the slaves configuration before receiving an update. - -Also note how the semantics of always trying to impose the current configuration makes -the failover more resistant to partitions: +As we already specified, Sentinel also acts as a configuration provider for +clients that want to connect to a set of master and slaves. Because of +possible failovers or reconfigurations, clients have no idea about who is +the currently active master for a given set of instances, so Sentinel exports +an API to ask this question: -* Masters failed over are reconfigured as slaves when they return available. -* Slaves partitioned away during a partition are reconfigured once reachable. + 127.0.0.1:5000> SENTINEL get-master-addr-by-name mymaster + 1) "127.0.0.1" + 2) "6379" -Slave selection and priority +Testing the failover --- -When a Sentinel instance is ready to perform a failover, since the master -is in `ODOWN` state and the Sentinel received the authorization to failover -from the majority of the Sentinel instances known, a suitable slave needs -to be selected. - -The slave selection process evaluates the following information about slaves: - -1. Disconnection time from the master. -2. Slave priority. -3. Replication offset processed. -4. Run ID. - -A slave that is found to be disconnected from the master for more than ten -times the configured master timeout (down-after-milliseconds option), plus -the time the master is also not available from the point of view of the -Sentinel doing the failover, is considered to be not suitable for the failover -and is skipped. +At this point our toy Sentinel deployment is ready to be tested. We can +just kill our master and check if the configuration changes. To do so +we can just do: -In more rigorous terms, a slave whose the `INFO` output suggests to be -disconnected from the master for more than: + redis-cli -p 6379 DEBUG sleep 30 - (down-after-milliseconds * 10) + milliseconds_since_master_is_in_SDOWN_state +This command will make our master no longer reachable, sleeping for 30 seconds. +It basically simulates a master hanging for some reason. -Is considered to be unreliable and is disregarded entirely. - -The slave selection only considers the slaves that passed the above test, -and sorts it based on the above criteria, in the following order. +If you check the Sentinel logs, you should be able to see a lot of action: -1. The slaves are sorted by `slave-priority` as configured in the `redis.conf` file of the Redis instance. A lower priority will be preferred. -2. If the priority is the same, the replication offset processed by the slave is checked, and the slave that received more data from the master is selected. -3. If multiple slaves have the same priority and processed the same data from the master, a further check is performed, selecting the slave with the lexicographically smaller run ID. Having a lower run ID is not a real advantage for a slave, but is useful in order to make the process of slave selection more deterministic, instead of resorting to select a random slave. +1. Each Sentinel detects the master is down with an `+sdown` event. +2. This event is later escalated to `+odown`, which means that multiple Sentinels agree about the fact the master is not reachable. +3. Sentinels vote a Sentinel that will start the first failover attempt. +4. The failover happens. -Redis masters (that may be turned into slaves after a failover), and slaves, all -must be configured with a `slave-priority` if there are machines to be strongly -preferred. Otherwise all the instances can run with the default run ID (which -is the suggested setup, since it is far more interesting to select the slave -by replication offset). +If you ask again what is the current master address for `mymaster`, eventually +we should get a different reply this time: -A Redis instance can be configured with a special `slave-priority` of zero -in order to be **never selected** by Sentinels as the new master. -However a slave configured in this way will still be reconfigured by -Sentinels in order to replicate with the new master after a failover, the -only difference is that it will never become a master itself. + 127.0.0.1:5000> SENTINEL get-master-addr-by-name mymaster + 1) "127.0.0.1" + 2) "6380" -Sentinel and Redis authentication ---- - -When the master is configured to require a password from clients, -as a security measure, slaves need to also be aware of this password in -order to authenticate with the master and create the master-slave connection -used for the asynchronous replication protocol. - -This is achieved using the following configuration directives: - -* `requirepass` in the master, in order to set the authentication password, and to make sure the instance will not process requests for non authenticated clients. -* `masterauth` in the slaves in order for the slaves to authenticate with the master in order to correctly replicate data from it. - -When Sentinel is used, there is not a single master, since after a failover -slaves may play the role of masters, and old masters can be reconfigured in -order to act as slaves, so what you want to do is to set the above directives -in all your instances, both masters and slaves. - -This is also usually a logically sane setup since you don't want to protect -data only in the master, having the same data accessible in the slaves. - -However, in the uncommon case where you need a slave that is accessible -without authentication, you can still do it by setting up a slave priority -of zero (that will not allow the slave to be promoted to master), and -configuring only the `masterauth` directive for this slave, without -the `requirepass` directive, so that data will be readable by unauthenticated -clients. +So far so good... At this point you may jump to create your Sentinel deployment +or can read more to understand all the Sentinel commands and internals. Sentinel API === +Sentinel provides an API in order to inspect its state, check the health +of monitored masters and slaves, subscribe in order to receive specific +notifications, and change the Sentinel configuration at run time. + By default Sentinel runs using TCP port 26379 (note that 6379 is the normal Redis port). Sentinels accept commands using the Redis protocol, so you can use `redis-cli` or any other unmodified Redis client in order to talk with Sentinel. -There are two ways to talk with Sentinel: it is possible to directly query -it to check what is the state of the monitored Redis instances from its point -of view, to see what other Sentinels it knows, and so forth. - -An alternative is to use Pub/Sub to receive *push style* notifications from -Sentinels, every time some event happens, like a failover, or an instance -entering an error condition, and so forth. +It is possible to directly query a Sentinel to check what is the state of +the monitored Redis instances from its point of view, to see what other +Sentinels it knows, and so forth. Alternatively, using Pub/Sub, it is possible +to receive *push style* notifications from Sentinels, every time some event +happens, like a failover, or an instance entering an error condition, and +so forth. Sentinel commands --- -The following is a list of accepted commands: +The following is a list of accepted commands, not covering commands used in +order to modify the Sentinel configuration, which are covered later. * **PING** This command simply returns PONG. * **SENTINEL masters** Show a list of monitored masters and their state. * **SENTINEL master ``** Show the state and info of the specified master. * **SENTINEL slaves ``** Show a list of slaves for this master, and their state. +* **SENTINEL sentinels ``** Show a list of sentinel instances for this master, and their state. * **SENTINEL get-master-addr-by-name ``** Return the ip and port number of the master with that name. If a failover is in progress or terminated successfully for this master it returns the address and port of the promoted slave. * **SENTINEL reset ``** This command will reset all the masters with matching name. The pattern argument is a glob-style pattern. The reset process clears any previous state in a master (including a failover in progress), and removes every slave and sentinel already discovered and associated with the master. * **SENTINEL failover ``** Force a failover as if the master was not reachable, and without asking for agreement to other Sentinels (however a new version of the configuration will be published so that the other Sentinels will update their configurations). +* **SENTINEL ckquorum ``** Check if the current Sentinel configuraiton is able to reach the quorum needed to failover a master, and the majority needed to authorize the failover. This command should be used in monitoring systems to check if a Sentinel deployment is ok. Reconfiguring Sentinel at Runtime --- @@ -517,15 +632,14 @@ about the first one before adding the next. This is useful in order to still guarantee that majority can be achieved only in one side of a partition, in the chance failures should happen in the process of adding new Sentinels. -This can be easily achieved by adding every new Sentinel with a 30 seconds delay, -and during absence of network partitions. +This can be easily achieved by adding every new Sentinel with a 30 seconds delay, and during absence of network partitions. At the end of the process it is possible to use the command `SENTINEL MASTER mastername` in order to check if all the Sentinels agree about the total number of Sentinels monitoring the master. -Removing a Sentinel is a bit more complex: Sentinels never forget already seen -Sentinels, even if they are not reachable for a long time, since we don't +Removing a Sentinel is a bit more complex: **Sentinels never forget already seen +Sentinels**, even if they are not reachable for a long time, since we don't want to dynamically change the majority needed to authorize a failover and the creation of a new configuration number. So in order to remove a Sentinel the following steps should be performed in absence of network partitions: @@ -563,7 +677,8 @@ channels and get notified about specific events. The channel name is the same as the name of the event. For instance the channel named `+sdown` will receive all the notifications related to instances -entering an `SDOWN` condition. +entering an `SDOWN` (SDOWN means the instance is no longer reachable from +the point of view of the Sentinel you are querying) condition. To get all the messages simply subscribe using `PSUBSCRIBE *`. @@ -603,6 +718,368 @@ and is only specified if the instance is not a master itself. * **+tilt** -- Tilt mode entered. * **-tilt** -- Tilt mode exited. +Handling of -BUSY state +--- + +The -BUSY error is returned by a Redis instance when a Lua script is running for +more time than the configured Lua script time limit. When this happens before +triggering a fail over Redis Sentinel will try to send a `SCRIPT KILL` +command, that will only succeed if the script was read-only. + +If the instance will still be in an error condition after this try, it will +eventually be failed over. + +Slaves priority +--- + +Redis instances have a configuration parameter called `slave-priority`. +This information is exposed by Redis slave instances in their `INFO` output, +and Sentinel uses it in order to pick a slave among the ones that can be +used in order to failover a master: + +1. If the slave priority is set to 0, the slave is never promoted to master. +2. Slaves with a *lower* priority number are prefredded by Sentinel. + +For example if there is a slave S1 in the same data center of the current +master, and another slave S2 in another data center, it is possible to set +S1 with a priority of 10 and S2 with a priority of 100, so that if the master +fails and both S1 and S2 are available, S1 will be preferred. + +For more information about the the way slaves are selected, please check the **slave selection and priority** section of this documentation. + +Sentinel and Redis authentication +--- + +When the master is configured to require a password from clients, +as a security measure, slaves need to also be aware of this password in +order to authenticate with the master and create the master-slave connection +used for the asynchronous replication protocol. + +This is achieved using the following configuration directives: + +* `requirepass` in the master, in order to set the authentication password, and to make sure the instance will not process requests for non authenticated clients. +* `masterauth` in the slaves in order for the slaves to authenticate with the master in order to correctly replicate data from it. + +When Sentinel is used, there is not a single master, since after a failover +slaves may play the role of masters, and old masters can be reconfigured in +order to act as slaves, so what you want to do is to set the above directives +in all your instances, both masters and slaves. + +This is also usually a sane setup since you don't want to protect +data only in the master, having the same data accessible in the slaves. + +However, in the uncommon case where you need a slave that is accessible +without authentication, you can still do it by setting up **a slave priority +of zero**, to prevent this slave from being promoted to master, and +configuring in this slave only the `masterauth` directive, without +using the `requirepass` directive, so that data will be readable by +unauthenticated clients. + +Sentinel clients implementation +--- + +Sentinel requires explicit client support, unless the system is configured to execute a script that performs a transparent redirection of all the requests to the new master instance (virtual IP or other similar systems). The topic of client libraries implementation is covered in the document [Sentinel clients guidelines](/topics/sentinel-clients). + +More advanced concepts +=== + +In the following sections we'll cover a few details about how Sentinel work, +without to resorting to implementation details and algorithms that will be +covered in the final part of this document. + +SDOWN and ODOWN failure state +--- + +Redis Sentinel has two different concepts of *being down*, one is called +a *Subjectively Down* condition (SDOWN) and is a down condition that is +local to a given Sentinel instance. Another is called *Objectively Down* +condition (ODOWN) and is reached when enough Sentinels (at least the +number configured as the `quorum` parameter of the monitored master) have +an SDOWN condition, and get feedback from other Sentinels using +the `SENTINEL is-master-down-by-addr` command. + +From the point of view of a Sentinel an SDOWN condition is reached when it +does not receive a valid reply to PING requests for the number of seconds +specified in the configuration as `is-master-down-after-milliseconds` +parameter. + +An acceptable reply to PING is one of the following: + +* PING replied with +PONG. +* PING replied with -LOADING error. +* PING replied with -MASTERDOWN error. + +Any other reply (or no reply at all) is considered non valid. +However note that **a logical master that advertises itself as a slave in +the INFO output is considered to be down**. + +Note that SDOWN requires that no acceptable reply is received for the whole +interval configured, so for instance if the interval is 30000 milliseconds +(30 seconds) and we receive an acceptable ping reply every 29 seconds, the +instance is considered to be working. + +SDOWN is not enough to trigger a failover: it only means a single Sentinel +believes a Redis instance is not available. To trigger a failover, the +ODOWN state must be reached. + +To switch from SDOWN to ODOWN no strong consensus algorithm is used, but +just a form of gossip: if a given Sentinel gets reports that a master +is not working from enough Sentinels **in a given time range**, the SDOWN is +promoted to ODOWN. If this acknowledge is later missing, the flag is cleared. + +A more strict authorization that uses an actual majority is required in +order to really start the failover, but no failover can be triggered without +reaching the ODOWN state. + +The ODOWN condition **only applies to masters**. For other kind of instances +Sentinel doesn't require to act, so the ODOWN state is never reached for slaves +and other sentinels, but only SDOWN is. + +However SDOWN has also semantical implications. For example a slave in SDOWN +state is not selected to be promoted by a Sentinel performing a failover. + +Sentinels and Slaves auto discovery +--- + +Sentinels stay connected with other Sentinels in order to reciprocally +check the availability of each other, and to exchange messages. However you +don't need to configure a list of other Sentinel addresses in every Sentinel +instance you run, as Sentinel uses the Redis instances Pub/Sub capabilities +in order to discover the other Sentinels that are monitoring the same masters +and slaves. + +This feature is implemented by sending *hello messages* into the channel named +`__sentinel__:hello`. + +Similarly you don't need to configure what is the list of the slaves attached +to a master, as Sentinel will auto discover this list querying Redis. + +* Every Sentinel publishes a message to every monitored master and slave Pub/Sub channel `__sentinel__:hello`, every two seconds, announcing its presence with ip, port, runid. +* Every Sentinel is subscribed to the Pub/Sub channel `__sentinel__:hello` of every master and slave, looking for unknown sentinels. When new sentinels are detected, they are added as sentinels of this master. +* Hello messages also include the full current configuration of the master. If the receiving Sentinel has a configuration for a given master which is older than the one received, it updates to the new configuration immediately. +* Before adding a new sentinel to a master a Sentinel always checks if there is already a sentinel with the same runid or the same address (ip and port pair). In that case all the matching sentinels are removed, and the new added. + +Sentinel reconfiguration of instances outside the failover procedure. +--- + +Even when no failover is in progress, Sentinels will always try to set the +current configuration on monitored instances. Specifically: + +* Slaves (according to the current configuration) that claim to be masters, will be configured as slaves to replicate with the current master. +* Slaves connected to a wrong master, will be reconfigured to replicate with the right master. + +For Sentinels to reconfigure slaves, the wrong configuration must be observed for some time, that is greater than the period used to broadcast new configurations. + +This prevents Sentinels with a stale configuration (for example because they just rejoined from a partition) will try to change the slaves configuration before receiving an update. + +Also note how the semantics of always trying to impose the current configuration makes the failover more resistant to partitions: + +* Masters failed over are reconfigured as slaves when they return available. +* Slaves partitioned away during a partition are reconfigured once reachable. + +The important lesson to remember about this section is: **Sentinel is a system where each process will always try to impose the last logical configuration to the set of monitored instances**. + +Slave selection and priority +--- + +When a Sentinel instance is ready to perform a failover, since the master +is in `ODOWN` state and the Sentinel received the authorization to failover +from the majority of the Sentinel instances known, a suitable slave needs +to be selected. + +The slave selection process evaluates the following information about slaves: + +1. Disconnection time from the master. +2. Slave priority. +3. Replication offset processed. +4. Run ID. + +A slave that is found to be disconnected from the master for more than ten +times the configured master timeout (down-after-milliseconds option), plus +the time the master is also not available from the point of view of the +Sentinel doing the failover, is considered to be not suitable for the failover +and is skipped. + +In more rigorous terms, a slave whose the `INFO` output suggests to be +disconnected from the master for more than: + + (down-after-milliseconds * 10) + milliseconds_since_master_is_in_SDOWN_state + +Is considered to be unreliable and is disregarded entirely. + +The slave selection only considers the slaves that passed the above test, +and sorts it based on the above criteria, in the following order. + +1. The slaves are sorted by `slave-priority` as configured in the `redis.conf` file of the Redis instance. A lower priority will be preferred. +2. If the priority is the same, the replication offset processed by the slave is checked, and the slave that received more data from the master is selected. +3. If multiple slaves have the same priority and processed the same data from the master, a further check is performed, selecting the slave with the lexicographically smaller run ID. Having a lower run ID is not a real advantage for a slave, but is useful in order to make the process of slave selection more deterministic, instead of resorting to select a random slave. + +Redis masters (that may be turned into slaves after a failover), and slaves, all +must be configured with a `slave-priority` if there are machines to be strongly +preferred. Otherwise all the instances can run with the default run ID (which +is the suggested setup, since it is far more interesting to select the slave +by replication offset). + +A Redis instance can be configured with a special `slave-priority` of zero +in order to be **never selected** by Sentinels as the new master. +However a slave configured in this way will still be reconfigured by +Sentinels in order to replicate with the new master after a failover, the +only difference is that it will never become a master itself. + +Algorithms and internals +=== + +In the following sections we will explore the details of Sentinel behavior. +It is not strictly needed for users to be aware of all the details, but a +deep understanding of Sentinel may help to deploy and operate Sentinel in +a more effective way. + +Quorum +--- + +The previous sections showed that every master monitored by Sentinel is associated to a configured **quorum**. It specifies the number of Sentinel processes +that need to agree about the unreachability or error condition of the master in +order to trigger a failover. + +However, after the failover is triggered, in order for the failover to actually be performed, **at least a majority of Sentinels must authorize the Sentinel to +failover**. Sentinel never performs a failover in the partition where a +minority of Sentinels exist. + +Let's try to make things a bit more clear: + +* Quorum: the number of Sentinel processes that need to detect an error condition in order for a master to be flagged as **ODOWN**. +* The failover is triggered by the **ODOWN** state. +* Once the failover is triggered, the Sentinel trying to failover is required to ask for authorization to a majority of Sentinels (or more than the majority if the quorum is set to a number greater than the majority). + +The difference may seem subtle but is actually quite simple to understand and use. For example if you have 5 Sentinel instances, and the quorum is set to 2, a failover will be triggered as soon as 2 Sentinels believe that the master is not reachable, however one of the two Sentinels will be able to failover only if it gets authorization at least from 3 Sentinels. + +If instead the quorum is configured to 5, all the Sentinels must agree about the master error condition, and the authorization from all Sentinels is required in order to failover. + +This means that the quorum can be used to tune Sentinel in two ways: + +1. If a the quorum is set to a value smaller than the majority of Sentinels we deploy, we are basically making Sentinel more sensible to master failures, triggering a failover as soon as even just a minority of Sentinels is no longer able to talk with the master. +2. If a quorum is set to a value greater than the majority of Sentinels, we are making Sentinel able to failover only when there are a very large number (larger than majority) of well connected Sentinels which agree about the master being down. + +Configuration epochs +--- + +Sentinels require to get authorizations from a majority in order to start a +failover for a few important reasons: + +When a Sentinel is authorized, it gets a unique **configuration epoch** for the master it is failing over. This is a number that will be used to version the new configuration after the failover is completed. Because a majority agreed that a given version was assigned to a given Sentinel, no other Sentinel will be able to use it. This means that every configuration of every failover is versioned with a unique version. We'll see why this is so important. + +Moreover Sentinels have a rule: if a Sentinel voted another Sentinel for the failover of a given master, it will wait some time to try to failover the same master again. This delay is the `failover-timeout` you can configure in `sentinel.conf`. This means that Sentinels will not try to failover the same master at the same time, the first to ask to be authorized will try, if it fails another will try after some time, and so forth. + +Redis Sentinel guarantees the *liveness* property that if a majority of Sentinels are able to talk, eventually one will be authorized to failover if the master is down. + +Redis Sentinel also guarantees the *safety* property that every Sentinel will failover the same master using a different *configuration epoch*. + +Configuration propagation +--- + +Once a Sentinel is able to failover a master successfully, it will start to broadcast the new configuration so that the other Sentinels will update their information about a given master. + +For a failover to be considered successful, it requires that the Sentinel was able to send the `SLAVEOF NO ONE` command to the selected slave, and that the switch to master was later observed in the `INFO` output of the master. + +At this point, even if the reconfiguration of the slaves is in progress, the failover is considered to be successful, and all the Sentinels are required to start reporting the new configuration. + +The way a new configuration is propagated is the reason why we need that every +Sentinel failover is authorized with a different version number (configuration epoch). + +Every Sentinel continuously broadcast its version of the configuration of a master using Redis Pub/Sub messages, both in the master and all the slaves. At the same time all the Sentinels wait for messages to see what is the configuration +advertised by the other Sentinels. + +Configurations are broadcast in the `__sentinel__:hello` Pub/Sub channel. + +Because every configuration has a different version number, the greater version +always wins over smaller versions. + +So for example the configuration for the master `mymaster` start with all the +Sentinels believing the master is at 192.168.1.50:6379. This configuration +has version 1. After some time a Sentinel is authorized to failover with version 2. If the failover is successful, it will start to broadcast a new configuration, let's say 192.168.1.50:9000, with version 2. All the other instances will see this configuration and will update their configuration accordingly, since the new configuration has a greater version. + +This means that Sentinel guarantees a second liveness property: a set of +Sentinels that are able to communicate will all converge to the same configuration with the higher version number. + +Basically if the net is partitioned, every partition will converge to the higher +local configuration. In the special case of no partitions, there is a single +partition and every Sentinel will agree about the configuration. + +Consistency under partitions +--- + +Redis Sentinel configurations are eventually consistent, so every partition will +converge to the higher configuration available. +However in a real-world system using Sentinel there are three different players: + +* Redis instances. +* Sentinel instances. +* Clients. + +In order to define the behavior of the system we have to consider all three. + +The following is a simple network where there are 3 nodes, each running +a Redis instance, and a Sentinel instance: + + +-------------+ + | Sentinel 1 |----- Client A + | Redis 1 (M) | + +-------------+ + | + | + +-------------+ | +------------+ + | Sentinel 2 |-----+-- // ----| Sentinel 3 |----- Client B + | Redis 2 (S) | | Redis 3 (M)| + +-------------+ +------------+ + +In this system the original state was that Redis 3 was the master, while +Redis 1 and 2 were slaves. A partition occurred isolating the old master. +Sentinels 1 and 2 started a failover promoting Sentinel 1 as the new master. + +The Sentinel properties guarantee that Sentinel 1 and 2 now have the new +configuration for the master. However Sentinel 3 has still the old configuration +since it lives in a different partition. + +We know that Sentinel 3 will get its configuration updated when the network +partition will heal, however what happens during the partition if there +are clients partitioned with the old master? + +Clients will be still able to write to Redis 3, the old master. When the +partition will rejoin, Redis 3 will be turned into a slave of Redis 1, and +all the data written during the partition will be lost. + +Depending on your configuration you may want or not that this scenario happens: + +* If you are using Redis as a cache, it could be handy that Client B is still able to write to the old master, even if its data will be lost. +* If you are using Redis as a store, this is not good and you need to configure the system in order to partially prevent this problem. + +Since Redis is asynchronously replicated, there is no way to totally prevent data loss in this scenario, however you can bound the divergence between Redis 3 and Redis 1 +using the following Redis configuration option: + + min-slaves-to-write 1 + min-slaves-max-lag 10 + +With the above configuration (please see the self-commented `redis.conf` example in the Redis distribution for more information) a Redis instance, when acting as a master, will stop accepting writes if it can't write to at least 1 slave. Since replication is asynchronous *not being able to write* actually means that the slave is either disconnected, or is not sending us asynchronous acknowledges for more than the specified `max-lag` number of seconds. + +Using this configuration the Redis 3 in the above example will become unavailable after 10 seconds. When the partition heals, the Sentinel 3 configuration will converge to +the new one, and Client B will be able to fetch a valid configuration and continue. + +In general Redis + Sentinel as a whole are a an **eventually consistent system** where the merge function is **last failover wins**, and the data from old masters are discarded to replicate the data of the current master, so there is always a window for losing acknowledged writes. This is due to Redis asynchronous +replication and the discarding nature of the "virtual" merge function of the system. Note that this is not a limitation of Sentinel itself, and if you orchestrate the failover with a strongly consistent replicated state machine, the same properties will still apply. There are only two ways to avoid losing acknowledged writes: + +1. Use synchronous replication (and a proper consensus algorithm to run a replicated state machine). +2. Use an eventually consistent system where different versions of the same object can be merged. + +Redis currently is not able to use any of the above systems, and is currently outside the development goals. However there are proxies implementing solution "2" on top of Redis stores such as SoundClound [Roshi](https://github.com/soundcloud/roshi), or Netflix [Dynomite](https://github.com/Netflix/dynomite). + +Sentinel persistent state +--- + +Sentinel state is persisted in the sentinel configuration file. For example +every time a new configuration is received, or created (leader Sentinels), for +a master, the configuration is persisted on disk together with the configuration +epoch. This means that it is safe to stop and restart Sentinel processes. + TILT mode --- @@ -633,17 +1110,7 @@ When in TILT mode the Sentinel will continue to monitor everything, but: If everything appears to be normal for 30 second, the TILT mode is exited. -Handling of -BUSY state ---- - -(Warning: Yet not implemented) - -The -BUSY error is returned when a script is running for more time than the -configured script time limit. When this happens before triggering a fail over -Redis Sentinel will try to send a "SCRIPT KILL" command, that will only -succeed if the script was read-only. - -Sentinel clients implementation ---- - -Sentinel requires explicit client support, unless the system is configured to execute a script that performs a transparent redirection of all the requests to the new master instance (virtual IP or other similar systems). The topic of client libraries implementation is covered in the document [Sentinel clients guidelines](/topics/sentinel-clients). +Note that in some way TILT mode could be replaced using the monotonic clock +API that many kernels offer. However it is not still clear if this is a good +solution since the current system avoids issues in case the process is just +suspended or not executed by the scheduler for a long time. From 21fe63a8c0064a5c7a4c88f3b449c014b346e414 Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 20 May 2015 11:21:52 +0200 Subject: [PATCH 0283/2314] Fix typo in Sentinel doc. --- topics/sentinel.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/topics/sentinel.md b/topics/sentinel.md index dd0744ec48..14a46c38de 100644 --- a/topics/sentinel.md +++ b/topics/sentinel.md @@ -2,10 +2,10 @@ Redis Sentinel Documentation === Redis Sentinel provides high availability for Redis. In practical terms this -means that using Senitnel you can create a Redis deployment that resists +means that using Sentinel you can create a Redis deployment that resists without human intervention to certian kind of failures. -Redis Senitnel also provides other collateral tasks such as monitoring, +Redis Sentinel also provides other collateral tasks such as monitoring, notifications and acts as a configuration provider for clients. This is the full list of Sentinel capabilities at a macroscopical level (i.e. the *big picture*): From 3ce62528c5093a326a1be551b665b9160c4e9c2a Mon Sep 17 00:00:00 2001 From: Rocky Madden Date: Wed, 20 May 2015 23:55:14 -0600 Subject: [PATCH 0284/2314] Fixed small typo. --- topics/persistence.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/persistence.md b/topics/persistence.md index 81c73a5289..33b55d269f 100644 --- a/topics/persistence.md +++ b/topics/persistence.md @@ -51,7 +51,7 @@ The general indication is that you should use both persistence methods if you want a degree of data safety comparable to what PostgreSQL can provide you. If you care a lot about your data, but still can live with a few minutes of -data lose in case of disasters, you can simply use RDB alone. +data loss in case of disasters, you can simply use RDB alone. There are many users using AOF alone, but we discourage it since to have an RDB snapshot from time to time is a great idea for doing database backups, From 74568cd5592f82132e534e0b1b999dbe26727889 Mon Sep 17 00:00:00 2001 From: antirez Date: Mon, 25 May 2015 12:13:38 +0200 Subject: [PATCH 0285/2314] Sentinel flushconfig command documented. --- topics/sentinel.md | 1 + 1 file changed, 1 insertion(+) diff --git a/topics/sentinel.md b/topics/sentinel.md index 14a46c38de..be8a3f7499 100644 --- a/topics/sentinel.md +++ b/topics/sentinel.md @@ -595,6 +595,7 @@ order to modify the Sentinel configuration, which are covered later. * **SENTINEL reset ``** This command will reset all the masters with matching name. The pattern argument is a glob-style pattern. The reset process clears any previous state in a master (including a failover in progress), and removes every slave and sentinel already discovered and associated with the master. * **SENTINEL failover ``** Force a failover as if the master was not reachable, and without asking for agreement to other Sentinels (however a new version of the configuration will be published so that the other Sentinels will update their configurations). * **SENTINEL ckquorum ``** Check if the current Sentinel configuraiton is able to reach the quorum needed to failover a master, and the majority needed to authorize the failover. This command should be used in monitoring systems to check if a Sentinel deployment is ok. +* **SENTINEL flushconfig** Force Sentinel to rewrite its configuration on disk, including the current Sentinel state. Normally Sentinel rewrites the configuration every time something changes in its state (in the context of the subset of the state which is persisted on disk across restart). However sometimes it is possible that the configuration file is lost because of operation errors, disk failures, package upgrade scripts or configuration managers. In those cases a way to to force Sentinel to rewrite the configuration file is handy. This command works even if the previous configuration file is completely missing. Reconfiguring Sentinel at Runtime --- From c93d9c35de723806f9c083f64ac3169f49132e95 Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Tue, 26 May 2015 21:16:45 +0200 Subject: [PATCH 0286/2314] =?UTF-8?q?and=20=E2=86=92=20any?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- topics/cluster-tutorial.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/cluster-tutorial.md b/topics/cluster-tutorial.md index ffe8e646e1..02359729ca 100644 --- a/topics/cluster-tutorial.md +++ b/topics/cluster-tutorial.md @@ -357,7 +357,7 @@ I'm aware of the following implementations: * [thunk-redis](https://github.com/thunks/thunk-redis) offers support for Node.js and io.js, it is a thunk/promise-based redis client with pipelining and cluster. * The `redis-cli` utility in the unstable branch of the Redis repository at GitHub implements a very basic cluster support when started with the `-c` switch. -An easy way to test Redis Cluster is either to try and of the above clients +An easy way to test Redis Cluster is either to try any of the above clients or simply the `redis-cli` command line utility. The following is an example of interaction using the latter: From 98a29883fffbf6d66fe26ad2abb51d62696caae3 Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Tue, 26 May 2015 21:19:04 +0200 Subject: [PATCH 0287/2314] Update redis-benchmark help output --- topics/benchmarks.md | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/topics/benchmarks.md b/topics/benchmarks.md index ee31ff86ea..584e43dd0a 100644 --- a/topics/benchmarks.md +++ b/topics/benchmarks.md @@ -12,22 +12,23 @@ The following options are supported: -h Server hostname (default 127.0.0.1) -p Server port (default 6379) -s Server socket (overrides host and port) + -a Password for Redis Auth -c Number of parallel connections (default 50) - -n Total number of requests (default 10000) + -n Total number of requests (default 100000) -d Data size of SET/GET value in bytes (default 2) + -dbnum SELECT the specified db number (default 0) -k 1=keep alive 0=reconnect (default 1) -r Use random keys for SET/GET/INCR, random values for SADD - Using this option the benchmark will get/set keys - in the form mykey_rand:000000012456 instead of constant - keys, the argument determines the max - number of values for the random number. For instance - if set to 10 only rand:000000000000 - rand:000000000009 - range will be allowed. + Using this option the benchmark will expand the string __rand_int__ + inside an argument with a 12 digits number in the specified range + from 0 to keyspacelen-1. The substitution changes every time a command + is executed. Default tests use this to hit random keys in the + specified range. -P Pipeline requests. Default 1 (no pipeline). -q Quiet. Just show query/sec values --csv Output in CSV format -l Loop. Run the tests forever - -t Only run the comma-separated list of tests. The test + -t Only run the comma separated list of tests. The test names are the same as the ones produced as output. -I Idle mode. Just open N idle connections and wait. @@ -124,7 +125,7 @@ in account. + Redis is a server: all commands involve network or IPC round trips. It is meaningless to compare it to embedded data stores such as SQLite, Berkeley DB, -Tokyo/Kyoto Cabinet, etc ... because the cost of most operations is +Tokyo/Kyoto Cabinet, etc ... because the cost of most operations is primarily in network/protocol management. + Redis commands return an acknowledgment for all usual commands. Some other data stores do not (for instance MongoDB does not implicitly acknowledge write From 9d47670a5dad7d77b268f2d4ed4578b154591be8 Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Tue, 26 May 2015 21:36:04 +0200 Subject: [PATCH 0288/2314] Move comma --- topics/cluster-tutorial.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/topics/cluster-tutorial.md b/topics/cluster-tutorial.md index 02359729ca..ba1842412c 100644 --- a/topics/cluster-tutorial.md +++ b/topics/cluster-tutorial.md @@ -576,8 +576,8 @@ line like the following: ./redis-trib.rb reshard : --from --to --slots --yes -This allows to build some automatism if you are likely to reshard often -however currently, there is no way for `redis-trib` to automatically +This allows to build some automatism if you are likely to reshard often, +however currently there is no way for `redis-trib` to automatically rebalance the cluster checking the distribution of keys across the cluster nodes and intelligently moving slots as needed. This feature will be added in the future. From ac11c5fbfa8e4fee2732e63e82ce238e96266466 Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Tue, 26 May 2015 21:52:10 +0200 Subject: [PATCH 0289/2314] Use correct markdown syntax for link --- topics/cluster-tutorial.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/cluster-tutorial.md b/topics/cluster-tutorial.md index ba1842412c..21927f09ac 100644 --- a/topics/cluster-tutorial.md +++ b/topics/cluster-tutorial.md @@ -903,7 +903,7 @@ master to another one automatically, without the help of the system administrato The automatic reconfiguration of replicas is called *replicas migration* and is able to improve the reliability of a Redis Cluster. -Note: you can read the details of replicas migration in the (Redis Cluster Specification)[/topics/cluster-spec], here we'll only provide some information about the +Note: you can read the details of replicas migration in the [Redis Cluster Specification](/topics/cluster-spec), here we'll only provide some information about the general idea and what you should do in order to benefit from it. The reason why you may want to let your cluster replicas to move from one master From a24a9f49a1637606979a9af40f1aa4a85878b88d Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Tue, 26 May 2015 21:55:19 +0200 Subject: [PATCH 0290/2314] Use correct config option name --- topics/cluster-tutorial.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/cluster-tutorial.md b/topics/cluster-tutorial.md index 21927f09ac..b30d538acd 100644 --- a/topics/cluster-tutorial.md +++ b/topics/cluster-tutorial.md @@ -938,7 +938,7 @@ So what you should know about replicas migration in short? * The cluster will try to migrate a replica from the master that has the greatest number of replicas in a given moment. * To benefit from replica migration you have just to add a few more replicas to a single master in your cluster, it does not matter what master. -* There is a configuration parameter that controls the replica migration feature that is called `replica-migration-barrier`: you can read more about it in the example `redis.conf` file provided with Redis Cluster. +* There is a configuration parameter that controls the replica migration feature that is called `cluster-migration-barrier`: you can read more about it in the example `redis.conf` file provided with Redis Cluster. Upgrading nodes in a Redis Cluster --- From 0d0cfbf1e76f62d9d3b76cd7db159d23c361017a Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Tue, 26 May 2015 22:25:01 +0200 Subject: [PATCH 0291/2314] Slaves processes data from a master --- commands/cluster failover.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands/cluster failover.md b/commands/cluster failover.md index 6c1af23f54..8e2ff5ab33 100644 --- a/commands/cluster failover.md +++ b/commands/cluster failover.md @@ -8,7 +8,7 @@ without any window for data loss. It works in the following way: 1. The slave tells the master to stop processing queries from clients. 2. The master replies to the slave with the current *replication offset*. -3. The slave waits for the replication offset to match on its side, to make sure it processed all the data from the slave before to continue. +3. The slave waits for the replication offset to match on its side, to make sure it processed all the data from the master before it continues. 4. The slave starts a failover, obtains a new configuration epoch from the majority of the masters, and broadcast the new configuration. 5. The old master receives the configuration update: unblocks its clients and start replying with redirection messages so that they'll continue the chat with the new master. From d8e55317ee549921bc8ac3b0d9f18864371251e8 Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Tue, 26 May 2015 22:27:08 +0200 Subject: [PATCH 0292/2314] =?UTF-8?q?For=20=E2=86=92=20The?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- commands/cluster failover.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands/cluster failover.md b/commands/cluster failover.md index 8e2ff5ab33..f6c97848c8 100644 --- a/commands/cluster failover.md +++ b/commands/cluster failover.md @@ -37,7 +37,7 @@ for this is to mass promote slaves in a different data center to masters in order to perform a data center switch, while all the masters are down or partitioned away. -For **TAKEOVER** option implies everything **FORCE** implies, but also does +The **TAKEOVER** option implies everything **FORCE** implies, but also does not uses any cluster authorization in order to failover. A slave receiving `CLUSTER FAILOVER TAKEOVER` will instead: From 146214723ab8a80446b8070d737b7e93634279e5 Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 27 May 2015 11:02:23 +0200 Subject: [PATCH 0293/2314] WAIT command documented. --- commands.json | 16 ++++++++++++++ commands/wait.md | 57 ++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 73 insertions(+) create mode 100644 commands/wait.md diff --git a/commands.json b/commands.json index 78efa036c2..00047722d1 100644 --- a/commands.json +++ b/commands.json @@ -2262,6 +2262,22 @@ "since": "2.2.0", "group": "transactions" }, + "WAIT": { + "summary": "Wait for the synchronous replication of all the write commands sent in the context of the current connection", + "complexity": "O(1)", + "arguments": [ + { + "name": "numslaves", + "type": "integer" + }, + { + "name": "timeout", + "type": "integer" + } + ], + "since": "3.0.0", + "group": "generic" + }, "WATCH": { "summary": "Watch the given keys to determine execution of the MULTI/EXEC block", "complexity": "O(1) for every key.", diff --git a/commands/wait.md b/commands/wait.md new file mode 100644 index 0000000000..65722c26fa --- /dev/null +++ b/commands/wait.md @@ -0,0 +1,57 @@ +This command blocks the current client until all the previous write commands +are successfully transferred and acknowledged by at least the specified number +of slaves. If the timeout, specified in milliseconds, is reached, the command +returns even if the specified number of slaves were not yet reached. + +The command **will always return** the number of slaves that acknowledged +the write commands sent before the `WAIT` command, both in the case where +the specified number of slaves are reached, or when the timeout is reached. + +A few remarks: + +1. When `WAIT` returns, all the previous write commands sent in the context of the current connection are guaranteed to be received by the number of slaves returned by `WAIT`. +2. If the command is sent as part of a `MULTI` transaction, the command does not block but instead just return ASAP the number of slaves that acknowledged the previous write commands. +3. A timeout of 0 means to block forever. +4. Since `WAIT` returns the number of slaves reached both in case of failure and success, the client should check that the returned value is equal or greater to the replication level it demanded. + +Consistency and WAIT +--- + +Note that `WAIT` does not make Redis a strongly consistent store: while synchronous replication is part of a replicated state machine, it is not the only thing needed. However in the context of Sentinel or Redis Cluster failover, `WAIT` improves the real world data safety. + +Specifically if a given write is transferred to one or more slaves, it is more likely (but not guaranteed) that if the master fails, we'll be able to promote, during a failover, a slave that received the write: both Sentinel and Redis Cluster will do a best-effort attempt to promote the best slave among the set of available slaves. + +However this is just a best-effort attempt so it is possible to still lose a write synchronously replicated to multiple slaves. + +Implementation details +--- + +Since the introduction of partial resynchronization with slaves (PSYNC feature) +Redis slaves asynchronously ping their master with the offset they already +processed in the replication stream. This is used in multiple ways: + +1. Detect timed out slaves. +2. Perform a partial resynchronization after a disconnection. +3. Implement `WAIT`. + +In the specific case of the implementation of `WAIT`, Redis remembers, for each client, the replication offset of the produced replication stream when a given +write command was executed in the context of a given client. When `WAIT` is +called Redis checks if the specified number of slaves already acknowledged +this offset or a greater one. + +@return + +@integer-reply: The command returns the number of slaves reached by all the writes performed in the context of the current connection. + +@examples + +``` +> SET foo bar +OK +> WAIT 1 0 +(integer) 1 +> WAIT 2 1000 +(integer) 1 +``` + +In the following example the first call to `WAIT` does not use a timeout and asks for the write to reach 1 slave. It returns with success. In the second attempt instead we put a timeout, and ask for the replication of the write to two slaves. Since there is a single slave available, after one second `WAIT` unblocks and returns 1, the number of slaves reached. From 2dac658a4990d6c06f9132cbe6452973ec7e368b Mon Sep 17 00:00:00 2001 From: Mike Perham Date: Wed, 27 May 2015 15:50:15 -0700 Subject: [PATCH 0294/2314] Clean up some english Some editing and corrections from a native speaker. I didn't edit everything but just a few quirks I saw. --- commands/eval.md | 44 +++++++++++++++++++++----------------------- 1 file changed, 21 insertions(+), 23 deletions(-) diff --git a/commands/eval.md b/commands/eval.md index 97f506f938..3bb64869fc 100644 --- a/commands/eval.md +++ b/commands/eval.md @@ -37,12 +37,12 @@ functions: * `redis.pcall()` `redis.call()` is similar to `redis.pcall()`, the only difference is that if a -Redis command call will result into an error, `redis.call()` will raise a Lua +Redis command call will result in an error, `redis.call()` will raise a Lua error that in turn will force `EVAL` to return an error to the command caller, -while `redis.pcall` will trap the error returning a Lua table representing the +while `redis.pcall` will trap the error and return a Lua table representing the error. -The arguments of the `redis.call()` and `redis.pcall()` functions are simply all +The arguments of the `redis.call()` and `redis.pcall()` functions are all the arguments of a well formed Redis command: ``` @@ -50,29 +50,27 @@ the arguments of a well formed Redis command: OK ``` -The above script actually sets the key `foo` to the string `bar`. +The above script sets the key `foo` to the string `bar`. However it violates the `EVAL` command semantics as all the keys that the script -uses should be passed using the KEYS array, in the following way: +uses should be passed using the KEYS array: ``` > eval "return redis.call('set',KEYS[1],'bar')" 1 foo OK ``` -The reason for passing keys in the proper way is that, before `EVAL` all the -Redis commands could be analyzed before execution in order to establish what -keys the command will operate on. - -In order for this to be true for `EVAL` also keys must be explicit. -This is useful in many ways, but especially in order to make sure Redis Cluster -is able to forward your request to the appropriate cluster node (Redis Cluster +All Redis commands must be analyzed before execution to determine which +keys the command will operate on. In order for this to be true for `EVAL`, keys must be passed explicitly. +This is useful in many ways, but especially to make sure Redis Cluster +can forward your request to the appropriate cluster node (Redis Cluster is a work in progress, but the scripting feature was designed in order to play well with it). -However this rule is not enforced in order to provide the user with + +Note this rule is not enforced in order to provide the user with opportunities to abuse the Redis single instance configuration, at the cost of writing scripts not compatible with Redis Cluster. -Lua scripts can return a value, that is converted from the Lua type to the Redis +Lua scripts can return a value that is converted from the Lua type to the Redis protocol using a set of conversion rules. ## Conversion between Lua and Redis data types @@ -85,7 +83,7 @@ client. This conversion between data types is designed in a way that if a Redis type is converted into a Lua type, and then the result is converted back into a Redis -type, the result is the same as of the initial value. +type, the result is the same as the initial value. In other words there is a one-to-one conversion between Lua and Redis types. The following table shows you all the conversions rules: @@ -166,19 +164,19 @@ There is no difference between using the helper functions or directly returning Redis uses the same Lua interpreter to run all the commands. Also Redis guarantees that a script is executed in an atomic way: no other script or Redis command will be executed while a script is being executed. -This semantics is very similar to the one of `MULTI` / `EXEC`. +This semantic is similar to the one of `MULTI` / `EXEC`. From the point of view of all the other clients the effects of a script are either still not visible or already completed. However this also means that executing slow scripts is not a good idea. It is not hard to create fast scripts, as the script overhead is very low, but if you are going to use slow scripts you should be aware that while the script -is running no other client can execute commands since the server is busy. +is running no other client can execute commands. ## Error handling As already stated, calls to `redis.call()` resulting in a Redis command error -will stop the execution of the script and will return the error, in a way that +will stop the execution of the script and return an error, in a way that makes it obvious that the error was generated by a script: ``` @@ -190,7 +188,7 @@ makes it obvious that the error was generated by a script: (error) ERR Error running script (call to f_6b1bf486c81ceb7edf3c093f4c48582e38c0e791): ERR Operation against a key holding the wrong kind of value ``` -Using the `redis.pcall()` command no error is raised, but an error object is +Using `redis.pcall()` no error is raised, but an error object is returned in the format specified above (as a Lua table with an `err` field). The script can pass the exact error to the user by returning the error object returned by `redis.pcall()`. @@ -205,13 +203,13 @@ be optimal in many contexts. On the other hand, defining commands using a special command or via `redis.conf` would be a problem for a few reasons: -* Different instances may have different versions of a command implementation. +* Different instances may have different implementations of a command. -* Deployment is hard if there is to make sure all the instances contain a +* Deployment is hard if we have to make sure all instances contain a given command, especially in a distributed environment. -* Reading an application code the full semantic could not be clear since the - application would call commands defined server side. +* Reading application code, the complete semantics might not be clear since the + application calls commands defined server side. In order to avoid these problems while avoiding the bandwidth penalty, Redis implements the `EVALSHA` command. From c296442a911cc9b70014bd369a8aa337c276b5c2 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 29 May 2015 12:40:13 +0200 Subject: [PATCH 0295/2314] ZADD doc updated --- commands.json | 6 ++++++ commands/zadd.md | 11 +++++++++++ 2 files changed, 17 insertions(+) diff --git a/commands.json b/commands.json index 00047722d1..79d297a56a 100644 --- a/commands.json +++ b/commands.json @@ -2299,6 +2299,12 @@ "name": "key", "type": "key" }, + { + "name": "options", + "type": "enum", + "enum": ["NX","XX","CH","INCR"], + "optional": true + }, { "name": ["score", "member"], "type": ["double", "string"], diff --git a/commands/zadd.md b/commands/zadd.md index f6dab260e6..b145dfbe6f 100644 --- a/commands/zadd.md +++ b/commands/zadd.md @@ -10,6 +10,17 @@ members is created, like if the sorted set was empty. If the key exists but does The score values should be the string representation of a double precision floating point number. `+inf` and `-inf` values are valid values as well. +ZADD options (Redis 3.0.2 or greater) +--- + +ZADD supports a list of options, specified after the name of the key and before +the first score argument. Options are: + +* **XX**: Only update elements that already exist. Never add elements. +* **NX**: Don't update already existing elements. Always add new elements. +* **CH**: Modify the return value from the number of new elements added, to the total number of elements changed (CH is an abbreviation of *changed*). Changed elements are **new elements added** and elements already existing for which **the score was updated**. So elements specified in the command line having the same score as they had in the past are not counted. Note: normally the return value of `ZADD` only counts the number of new elements added. +* **INCR**: When this option is specified `ZADD` acts like `ZINCRBY`. Only one score-element pair can be specified in this mode. + Range of integer scores that can be expressed precisely --- From cc23cf66a38841d2c13cde8e2a245e3fcccfcc2f Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Mon, 1 Jun 2015 15:56:38 +0200 Subject: [PATCH 0296/2314] Uppercase HyperLogLog --- commands/pfcount.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands/pfcount.md b/commands/pfcount.md index 019f51b12f..967a405787 100644 --- a/commands/pfcount.md +++ b/commands/pfcount.md @@ -1,6 +1,6 @@ When called with a single key, returns the approximated cardinality computed by the HyperLogLog data structure stored at the specified variable, which is 0 if the variable does not exist. -When called with multiple keys, returns the approximated cardinality of the union of the HyperLogLogs passed, by internally merging the HyperLogLogs stored at the provided keys into a temporary hyperLogLog. +When called with multiple keys, returns the approximated cardinality of the union of the HyperLogLogs passed, by internally merging the HyperLogLogs stored at the provided keys into a temporary HyperLogLog. The HyperLogLog data structure can be used in order to count **unique** elements in a set using just a small constant amount of memory, specifically 12k bytes for every HyperLogLog (plus a few bytes for the key itself). From 1748d8b32ad4a96477c11610e9458d4313b1d8eb Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Mon, 1 Jun 2015 16:04:54 +0200 Subject: [PATCH 0297/2314] Fixed typos --- topics/sentinel.md | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/topics/sentinel.md b/topics/sentinel.md index be8a3f7499..c9c4d8bb41 100644 --- a/topics/sentinel.md +++ b/topics/sentinel.md @@ -3,7 +3,7 @@ Redis Sentinel Documentation Redis Sentinel provides high availability for Redis. In practical terms this means that using Sentinel you can create a Redis deployment that resists -without human intervention to certian kind of failures. +without human intervention to certain kind of failures. Redis Sentinel also provides other collateral tasks such as monitoring, notifications and acts as a configuration provider for clients. @@ -28,8 +28,8 @@ Sentinel itself is designed to run in a configuration where there are multiple S The sum of Sentinels, Redis instances (masters and slaves) and clients connecting to Sentinel and Redis, are also a larger distributed system with specific properties. In this document concepts will be introduced gradually -starting from basic informations needed in order to understand the basic -properties of Sentinel, to more complex informations (that are optional) in +starting from basic information needed in order to understand the basic +properties of Sentinel, to more complex information (that are optional) in order to understand how exactly Sentinel works. Quick Start @@ -45,7 +45,7 @@ algorithms (that are explained in this documentation). A stable release of Redis Sentinel is shipped with Redis 2.8 and 3.0, which are the two latest stable releases of Redis. -New developments are performed in the *unstable* branch, and new features +New developments are performed in the *unstable* branch, and new features sometimes are back ported into the 2.8 and 3.0 branch as soon as they are considered to be stable. @@ -127,7 +127,7 @@ that is at address 127.0.0.1 and port 6379, with a quorum of 2. Everything is pretty obvious but the **quorum** argument: * The **quorum** is the number of Sentinels that need to agree about the fact the master is not reachable, in order for really mark the slave as failing, and eventually start a fail over procedure if possible. -* However **the quorum is only used to detect the failure**. In order to actually perform a failover, one of the Sentinels need to be elected leader for the failover and be authorized to proceed. This only happens with the vote of the **majority of the Sentienl processes**. +* However **the quorum is only used to detect the failure**. In order to actually perform a failover, one of the Sentinels need to be elected leader for the failover and be authorized to proceed. This only happens with the vote of the **majority of the Sentinel processes**. So for example if you have 5 Sentinel processes, and the quorum for a given master set to the value of 2, this is what happens: @@ -135,7 +135,7 @@ master set to the value of 2, this is what happens: * If two Sentinels agree at the same time about the master being unreachable, one of the two will try to start a failover. * If there are at least a total of three Sentinels reachable, the failover will be authorized and will actually start. -In practical terms this means during failures **Sentinel never starts a failover if the majority of Setinel processes are unable to talk** (aka no failover in the minority partition). +In practical terms this means during failures **Sentinel never starts a failover if the majority of Sentinel processes are unable to talk** (aka no failover in the minority partition). Other Sentinel options --- @@ -177,7 +177,7 @@ format, this is what the different symbols means: +--------------------+ | This is a computer | | or VM that fails | - | indepenedently. We | + | independently. We | | call it a "box" | +--------------------+ @@ -236,7 +236,7 @@ In the above configuration we created two masters (assuming S2 could failover without authorization) in a perfectly symmetrical way. Clients may write indefinitely to both sides, and there is no way to understand when the partition heals what configuration is the right one, in order to prevent -a *permanent split brian condition*. +a *permanent split brain condition*. So please **deploy at least three Sentinels in three different boxes** always. @@ -365,7 +365,7 @@ case we need to resort to a mixed setup like the following: | +------+-----+ | | - | | + | | +----+ +----+ | C1 | | C2 | | S3 | | S4 | @@ -516,7 +516,7 @@ two commands: SENTINEL slaves mymaster SENTINEL sentinels mymaster -The first will provide similar informations about the slaves connected to the +The first will provide similar information about the slaves connected to the master, and the second about the other Sentinels. Obtaining the address of the current master @@ -594,7 +594,7 @@ order to modify the Sentinel configuration, which are covered later. * **SENTINEL get-master-addr-by-name ``** Return the ip and port number of the master with that name. If a failover is in progress or terminated successfully for this master it returns the address and port of the promoted slave. * **SENTINEL reset ``** This command will reset all the masters with matching name. The pattern argument is a glob-style pattern. The reset process clears any previous state in a master (including a failover in progress), and removes every slave and sentinel already discovered and associated with the master. * **SENTINEL failover ``** Force a failover as if the master was not reachable, and without asking for agreement to other Sentinels (however a new version of the configuration will be published so that the other Sentinels will update their configurations). -* **SENTINEL ckquorum ``** Check if the current Sentinel configuraiton is able to reach the quorum needed to failover a master, and the majority needed to authorize the failover. This command should be used in monitoring systems to check if a Sentinel deployment is ok. +* **SENTINEL ckquorum ``** Check if the current Sentinel configuration is able to reach the quorum needed to failover a master, and the majority needed to authorize the failover. This command should be used in monitoring systems to check if a Sentinel deployment is ok. * **SENTINEL flushconfig** Force Sentinel to rewrite its configuration on disk, including the current Sentinel state. Normally Sentinel rewrites the configuration every time something changes in its state (in the context of the subset of the state which is persisted on disk across restart). However sometimes it is possible that the configuration file is lost because of operation errors, disk failures, package upgrade scripts or configuration managers. In those cases a way to to force Sentinel to rewrite the configuration file is handy. This command works even if the previous configuration file is completely missing. Reconfiguring Sentinel at Runtime @@ -739,7 +739,7 @@ and Sentinel uses it in order to pick a slave among the ones that can be used in order to failover a master: 1. If the slave priority is set to 0, the slave is never promoted to master. -2. Slaves with a *lower* priority number are prefredded by Sentinel. +2. Slaves with a *lower* priority number are preferred by Sentinel. For example if there is a slave S1 in the same data center of the current master, and another slave S2 in another data center, it is possible to set From bfe9101eee4f806bbf95d679fb46215210ead5e2 Mon Sep 17 00:00:00 2001 From: lord63 Date: Sat, 6 Jun 2015 16:59:38 +0800 Subject: [PATCH 0298/2314] Fix the command format in data-types-intro.md --- topics/data-types-intro.md | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/topics/data-types-intro.md b/topics/data-types-intro.md index 968b3a0d3a..9133d8987b 100644 --- a/topics/data-types-intro.md +++ b/topics/data-types-intro.md @@ -1011,11 +1011,10 @@ same: * Every time you see a new element, you add it to the count with `PFADD`. * Every time you want to retrieve the current approximation of the unique elements *added* with `PFADD` so far, you use the `PFCOUNT`. - - > pfadd hll a b c d - (integer) 1 - > pfcount hll - (integer) 4 + > pfadd hll a b c d + (integer) 1 + > pfcount hll + (integer) 4 An example of use case for this data structure is counting unique queries performed by users in a search form every day. From aef29b57fa05498ada1f09ad0a7e003329fefce6 Mon Sep 17 00:00:00 2001 From: Mike Marcacci Date: Sat, 6 Jun 2015 21:41:10 -0700 Subject: [PATCH 0299/2314] Added nodejs implementation to list https://github.com/mike-marcacci/node-redlock --- topics/distlock.md | 1 + 1 file changed, 1 insertion(+) diff --git a/topics/distlock.md b/topics/distlock.md index 86d9be6675..935bf55feb 100644 --- a/topics/distlock.md +++ b/topics/distlock.md @@ -31,6 +31,7 @@ already available, that can be used as a reference. * [Redis::DistLock](https://github.com/sbertrang/redis-distlock) (Perl implementation). * [Redlock-cpp](https://github.com/jacket-code/redlock-cpp) (Cpp implementation). * [Redlock-cs](https://github.com/kidfashion/redlock-cs) (C#/.NET implementation). +* [node-redlock](https://github.com/mike-marcacci/node-redlock) (NodeJS implementation). Includes support for lock extension. Safety and Liveness guarantees --- From 30d4b6129658dd96c57b8e0fbb1652d1475ff87c Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Tue, 9 Jun 2015 10:00:42 +0200 Subject: [PATCH 0300/2314] Now that we've got Redis Cluster, Twemproxy is not necessary anymore --- topics/partitioning.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/partitioning.md b/topics/partitioning.md index 4b7c408e42..352e7af5bd 100644 --- a/topics/partitioning.md +++ b/topics/partitioning.md @@ -107,7 +107,7 @@ Twemproxy supports automatic partitioning among multiple Redis instances, with o It is *not* a single point of failure since you can start multiple proxies and instruct your clients to connect to the first that accepts the connection. -Basically Twemproxy is an intermediate layer between clients and Redis instances, that will reliably handle partitioning for us with minimal additional complexities. Currently it is the **suggested way to handle partitioning with Redis**. +Basically Twemproxy is an intermediate layer between clients and Redis instances, that will reliably handle partitioning for us with minimal additional complexities. You can read more about Twemproxy [in this antirez blog post](http://antirez.com/news/44). From 45a9da75a6abf1f94eb624a63606b2df34afea0c Mon Sep 17 00:00:00 2001 From: Damian Janowski Date: Tue, 9 Jun 2015 12:45:02 -0300 Subject: [PATCH 0301/2314] Tidy up clients. --- clients.json | 101 +++++++++++++++++++++++------------------------ makefile | 4 ++ utils/clients.rb | 79 ++++++++++++++++++++++++++++++++++++ 3 files changed, 133 insertions(+), 51 deletions(-) create mode 100644 makefile create mode 100644 utils/clients.rb diff --git a/clients.json b/clients.json index 42e8c275a4..c825ade1f0 100644 --- a/clients.json +++ b/clients.json @@ -2,7 +2,6 @@ { "name": "redis-rb", "language": "Ruby", - "url": "http://redis-rb.keyvalue.org", "repository": "https://github.com/redis/redis-rb", "description": "Very stable and mature client. Install and require the hiredis gem before redis-rb for maximum performances.", "authors": ["ezmobius", "soveran", "djanowski", "pnoordhuis"], @@ -24,7 +23,7 @@ "language": "Clojure", "repository": "https://github.com/ztellman/aleph", "description": "Redis client build on top of lamina", - "authors":["Zach Tellman"], + "authors": ["ztellman"], "active": true }, { @@ -60,7 +59,7 @@ "language": "Erlang", "repository": "https://github.com/jeremyong/sharded_eredis", "description": "Wrapper around eredis providing process pools and consistent hashing.", - "authors": ["jeremyong", "hiroeorz"], + "authors": ["jeremyong"], "active": true }, @@ -134,7 +133,7 @@ "language": "Go", "repository": "https://github.com/simonz05/godis", "description": "A Redis client for Go.", - "authors": ["simonz05"], + "authors": [], "active": true }, @@ -147,7 +146,7 @@ "authors": ["xiam"], "active": true }, - + { "name": "goredis", "language": "Go", @@ -165,7 +164,7 @@ "authors": ["keimoon"], "active": true }, - + { "name": "shipwire/redis", "language": "Go", @@ -189,8 +188,8 @@ { "name": "haskell-redis", "language": "Haskell", - "url": "http://bitbucket.org/videlalvaro/redis-haskell/wiki/Home", - "repository": "http://bitbucket.org/videlalvaro/redis-haskell/src", + "url": "https://bitbucket.org/videlalvaro/redis-haskell/wiki/Home", + "repository": "https://bitbucket.org/videlalvaro/redis-haskell/src", "description": "Not actively maintained, supports Redis <= 2.0.", "authors": ["old_sound"] }, @@ -218,7 +217,7 @@ { "name": "JRedis", "language": "Java", - "url": "http://code.google.com/p/jredis", + "url": "https://code.google.com/p/jredis/", "repository": "https://github.com/alphazero/jredis", "description": "", "authors": ["SunOf27"], @@ -228,8 +227,8 @@ { "name": "JDBC-Redis", "language": "Java", - "url": "http://code.google.com/p/jdbc-redis", - "repository": "http://code.google.com/p/jdbc-redis/source/browse", + "url": "https://code.google.com/p/jdbc-redis/", + "repository": "https://code.google.com/p/jdbc-redis/source/browse", "description": "", "authors": ["mavcunha"] }, @@ -289,8 +288,8 @@ { "name": "Redis", "language": "Perl", - "url": "http://search.cpan.org/dist/Redis", - "repository": "https://github.com/melo/perl-redis", + "url": "http://search.cpan.org/dist/Redis/", + "repository": "https://github.com/PerlRedis/perl-redis", "description": "Perl binding for Redis database", "authors": ["pedromelo"], "recommended": true, @@ -300,7 +299,7 @@ { "name": "RedisDB", "language": "Perl", - "url": "http://search.cpan.org/dist/RedisDB", + "url": "http://search.cpan.org/dist/RedisDB/", "repository": "https://github.com/trinitum/RedisDB", "description": "Perl binding for Redis database with fast XS-based protocolparser", "authors": ["trinitum"], @@ -319,7 +318,7 @@ { "name": "AnyEvent::Redis", "language": "Perl", - "url": "http://search.cpan.org/dist/AnyEvent-Redis", + "url": "http://search.cpan.org/dist/AnyEvent-Redis/", "repository": "https://github.com/miyagawa/AnyEvent-Redis", "description": "Non-blocking Redis client", "authors": ["miyagawa"] @@ -328,7 +327,7 @@ { "name": "AnyEvent::Redis::RipeRedis", "language": "Perl", - "url": "http://search.cpan.org/dist/AnyEvent-Redis-RipeRedis", + "url": "http://search.cpan.org/dist/AnyEvent-Redis-RipeRedis/", "repository": "https://github.com/iph0/AnyEvent-Redis-RipeRedis", "description": "Flexible non-blocking Redis client with reconnect feature", "authors": ["iph"], @@ -338,7 +337,7 @@ { "name": "AnyEvent::Hiredis", "language": "Perl", - "url": "http://search.cpan.org/dist/AnyEvent-Hiredis", + "url": "http://search.cpan.org/dist/AnyEvent-Hiredis/", "repository": "https://github.com/wjackson/AnyEvent-Hiredis", "description": "Non-blocking client using the hiredis C library", "authors": [], @@ -348,7 +347,7 @@ { "name": "Mojo::Redis", "language": "Perl", - "url": "http://search.cpan.org/dist/Mojo-Redis", + "url": "http://search.cpan.org/dist/Mojo-Redis/", "repository": "https://github.com/marcusramberg/mojo-redis", "description": "asynchronous Redis client for Mojolicious", "authors": ["und3f", "marcusramberg", "jhthorsen"], @@ -358,7 +357,7 @@ { "name": "Danga::Socket::Redis", "language": "Perl", - "url": "http://search.cpan.org/dist/Danga-Socket-Redis", + "url": "http://search.cpan.org/dist/Danga-Socket-Redis/", "description": "An asynchronous redis client using the Danga::Socket async library", "authors": ["martinredmond"] }, @@ -417,7 +416,7 @@ "description": "Lightweight, standalone, unit-tested fork of Redisent which wraps phpredis for best performance if available.", "authors": ["colinmollenhour"] }, - + { "name": "phpish/redis", "language": "PHP", @@ -425,16 +424,16 @@ "description": "Simple Redis client in PHP", "authors": ["sandeepshetty"] }, - + { "name": "PHP Sentinel Client", "language": "PHP", - "repository": "https://github.com/Sparkcentral/php-redis-sentinel", + "repository": "https://github.com/Sparkcentral/PSRedis", "description": "A PHP sentinel client acting as an extension to your regular redis client", "authors": ["jamescauwelier"], "active": true }, - + { "name": "redis-async", "language": "PHP", @@ -465,7 +464,7 @@ { "name": "txredis", "language": "Python", - "url": "http://pypi.python.org/pypi/txredis/0.1.1", + "url": "https://pypi.python.org/pypi/txredis", "description": "", "authors": ["dio_rian"] }, @@ -504,7 +503,7 @@ "description": "Redis network scheme for Rebol 3", "authors": ["rebolek"] }, - + { "name": "scala-redis", "language": "Scala", @@ -528,7 +527,7 @@ "language": "Scala", "repository": "https://github.com/andreyk0/redis-client-scala-netty", "description": "", - "authors": [""] + "authors": [] }, { "name": "sedis", @@ -555,7 +554,7 @@ "authors": ["livestream"], "active": true }, - + { "name": "rediscala", "language": "Scala", @@ -604,7 +603,7 @@ { "name": "Sider", "language": "C#", - "url": "http://nuget.org/List/Packages/Sider", + "url": "http://www.nuget.org/packages/Sider", "description": "Minimalistic client for C#/.NET 4.0", "authors": ["chakrit"] }, @@ -622,8 +621,8 @@ { "name": "hxneko-redis", "language": "haXe", - "url": "http://code.google.com/p/hxneko-redis", - "repository": "http://code.google.com/p/hxneko-redis/source/browse", + "url": "https://code.google.com/p/hxneko-redis/", + "repository": "https://code.google.com/p/hxneko-redis/source/browse", "description": "", "authors": [] }, @@ -657,9 +656,9 @@ { "name": "credis", "language": "C", - "repository": "http://code.google.com/p/credis/source/browse", + "repository": "https://code.google.com/p/credis/source/browse", "description": "", - "authors": [""], + "authors": [], "active": true }, @@ -695,7 +694,7 @@ { "name": "then-redis", "language": "Node.js", - "repository": "https://github.com/mjijackson/then-redis", + "repository": "https://github.com/mjackson/then-redis", "description": "A small, promise-based Redis client for node", "authors": ["mjackson"], "active": true @@ -706,7 +705,7 @@ "language": "Node.js", "repository": "https://github.com/fictorial/redis-node-client", "description": "No longer maintained, does not work with node 0.3.", - "authors": ["fictorial"] + "authors": [] }, { @@ -793,7 +792,7 @@ "authors": ["hmartiros"], "active": true }, - + { "name": "redis3m", "language": "C++", @@ -821,7 +820,7 @@ { "name": "eredis", "language": "emacs lisp", - "repository": "http://code.google.com/p/eredis", + "repository": "https://code.google.com/p/eredis/", "description": "Full Redis API plus ways to pull Redis data into an org-mode table and push it back when edited", "authors": ["justinhj"] }, @@ -829,7 +828,7 @@ { "name": "Tiny Redis", "language": "D", - "url": "http://adilbaig.github.com/Tiny-Redis/", + "url": "http://adilbaig.github.io/Tiny-Redis/", "repository": "https://github.com/adilbaig/Tiny-Redis", "description": "A Redis client for D2. Supports pipelining, transactions and Lua scripting", "authors": ["aidezigns"] @@ -878,7 +877,7 @@ "active": true, "recommended": true }, - + { "name": "rust-redis", "language": "Rust", @@ -905,7 +904,7 @@ "authors": ["seancharles"], "active": true }, - + { "name": "SimpleRedisClient", "language": "C++", @@ -914,26 +913,26 @@ "authors": ["Levhav"], "active": true }, - + { "name": "RedisClient", "language": "Java", "repository": "https://github.com/caoxinyu/RedisClient", "description": "redis client GUI tool", - "authors": ["Cao XinYu"], + "authors": [], "active": true }, - + { "name": "redis", - "language": "Nimrod", - "repository": "https://github.com/Araq/Nimrod", - "url": "http://nimrod-lang.org/redis.html", - "description": "Redis client for Nimrod", + "language": "Nim", + "repository": "https://github.com/Araq/Nim", + "url": "http://nim-lang.org/docs/redis.html", + "description": "Redis client for Nim", "authors": [], "active": true }, - + { "name": "libvmod-redis", "language": "VCL", @@ -942,7 +941,7 @@ "authors": ["carlosabalde"], "active": true }, - + { "name": "redisclient", "language": "C++", @@ -955,8 +954,8 @@ { "name": "redis-octave", "language": "Matlab", - "repository": "https://github.com/markuman/redis-octave", - "description": "A Redis client in pure Octave ", + "repository": "https://github.com/markuman/go-redis", + "description": "A Redis client in pure Octave", "authors": ["markuman"] }, @@ -973,7 +972,7 @@ "language": "Dart", "url": "https://github.com/ra1u/redis-dart", "description": "Simple and fast client", - "authors": ["Luka Rahne"], + "authors": [], "active": true }, diff --git a/makefile b/makefile new file mode 100644 index 0000000000..5f1697004d --- /dev/null +++ b/makefile @@ -0,0 +1,4 @@ +clients: .PHONY + ruby -rjson -r./utils/clients -e 'Clients.check(JSON.parse(File.read("clients.json"), symbolize_names: true))' + +.PHONY: diff --git a/utils/clients.rb b/utils/clients.rb new file mode 100644 index 0000000000..2a352615f9 --- /dev/null +++ b/utils/clients.rb @@ -0,0 +1,79 @@ +require "net/http" +require "uri" + +module Clients + def self.check(clients) + errors = [] + + workers = clients.map do |client| + Thread.new do + Thread.current.abort_on_exception = true + + Checker.new(client, errors).check + end + end + + workers.each(&:join) + + if errors.empty? + puts("All is good.") + else + errors.group_by(&:first).each do |client, group| + puts(client[:name]) + + group.each do |_, message| + puts(sprintf(" %s", message)) + end + end + + exit(1) + end + end + + class Checker + def initialize(client, errors) + @client = client + @errors = errors + end + + def check + if @client[:url] + check_url(@client[:url]) + end + + if @client[:repository] + check_url(@client[:repository]) + end + + Array(@client[:authors]).each do |author| + check_author(author) + end + end + + def check_url(url) + uri = URI(url) + + if uri.scheme == "http" || uri.scheme == "https" + res = Net::HTTP.get_response(uri) + + assert(res.code == "200" || res.code == "302", sprintf("URL broken: %s (%s)", url, res.code)) + end + end + + def check_author(name) + valid = assert(name && name.size > 0 && name =~ /^\w+$/, sprintf("Invalid author %s", name.inspect)) + + if valid + uri = URI(sprintf("http://twitter-avatars.herokuapp.com/avatar/%s", name)) + + res = Net::HTTP.get_response(uri) + + assert(res.code == "302", sprintf("Invalid Twitter account: %s", name)) + end + end + + def assert(assertion, message) + assertion || (@errors.push([@client, message]) && false) + end + end +end From 2beaf18dd8cff44a17a4d1396248c1a746d80aca Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Wed, 10 Jun 2015 16:53:53 +0200 Subject: [PATCH 0302/2314] It's actually keys from all databases --- commands/flushall.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands/flushall.md b/commands/flushall.md index 0fa4219dba..be1f717eba 100644 --- a/commands/flushall.md +++ b/commands/flushall.md @@ -3,7 +3,7 @@ selected one. This command never fails. The time-complexity for this operation is O(N), N being the number of -keys in the database. +keys in all existing databases. @return From 0755ef6b1a022c9eb8f0ef9d7ee440bcb94003ec Mon Sep 17 00:00:00 2001 From: Sebastian Waisbrot Date: Sat, 13 Jun 2015 12:11:26 -0700 Subject: [PATCH 0303/2314] ZADD allows multiple flags Current documentation states that ZADD takes NX|XX|CH|INCR, but those flags can be composed. --- commands.json | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/commands.json b/commands.json index 79d297a56a..a306c13b8b 100644 --- a/commands.json +++ b/commands.json @@ -2300,9 +2300,21 @@ "type": "key" }, { - "name": "options", + "name": "options1", + "type": "enum", + "enum": ["NX","XX"], + "optional": true + }, + { + "name": "options2", + "type": "enum", + "enum": ["CH"], + "optional": true + }, + { + "name": "options3", "type": "enum", - "enum": ["NX","XX","CH","INCR"], + "enum": ["INCR"], "optional": true }, { From adba9cbfdb7e9048b95a3c37d93dd486ae83fb8e Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Sat, 13 Jun 2015 21:27:40 +0200 Subject: [PATCH 0304/2314] Give proper option names --- commands.json | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/commands.json b/commands.json index a306c13b8b..b8a4e0fffb 100644 --- a/commands.json +++ b/commands.json @@ -2300,19 +2300,19 @@ "type": "key" }, { - "name": "options1", + "name": "condition", "type": "enum", "enum": ["NX","XX"], "optional": true }, { - "name": "options2", + "name": "change", "type": "enum", "enum": ["CH"], "optional": true }, { - "name": "options3", + "name": "increment", "type": "enum", "enum": ["INCR"], "optional": true From cf2fbcaebe6b498ad9b142a99488d442a46ecd19 Mon Sep 17 00:00:00 2001 From: Itamar Haber Date: Tue, 16 Jun 2015 23:36:33 +0300 Subject: [PATCH 0305/2314] Cluster is no longer vaporware --- commands/eval.md | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/commands/eval.md b/commands/eval.md index 3bb64869fc..d0a74bf511 100644 --- a/commands/eval.md +++ b/commands/eval.md @@ -62,9 +62,7 @@ OK All Redis commands must be analyzed before execution to determine which keys the command will operate on. In order for this to be true for `EVAL`, keys must be passed explicitly. This is useful in many ways, but especially to make sure Redis Cluster -can forward your request to the appropriate cluster node (Redis Cluster -is a work in progress, but the scripting feature was designed in order to play -well with it). +can forward your request to the appropriate cluster node. Note this rule is not enforced in order to provide the user with opportunities to abuse the Redis single instance configuration, at the cost of From 492e417a73faed4ff5f3540918880565538771d9 Mon Sep 17 00:00:00 2001 From: Itamar Haber Date: Wed, 17 Jun 2015 01:27:13 +0300 Subject: [PATCH 0306/2314] "Escaped" KEYS + formatted KEYS Per @badboy_'s recommendation --- commands/eval.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/commands/eval.md b/commands/eval.md index d0a74bf511..3a2e71a7fa 100644 --- a/commands/eval.md +++ b/commands/eval.md @@ -9,7 +9,7 @@ It is just a Lua program that will run in the context of the Redis server. The second argument of `EVAL` is the number of arguments that follows the script (starting from the third argument) that represent Redis key names. -This arguments can be accessed by Lua using the `KEYS` global variable in the +This arguments can be accessed by Lua using the !`KEYS` global variable in the form of a one-based array (so `KEYS[1]`, `KEYS[2]`, ...). All the additional arguments should not represent key names and can be accessed @@ -52,7 +52,7 @@ OK The above script sets the key `foo` to the string `bar`. However it violates the `EVAL` command semantics as all the keys that the script -uses should be passed using the KEYS array: +uses should be passed using the !`KEYS` array: ``` > eval "return redis.call('set',KEYS[1],'bar')" 1 foo From e59916b3e25585b65893ec6b9ff2121905df72a3 Mon Sep 17 00:00:00 2001 From: Itamar Haber Date: Wed, 17 Jun 2015 14:12:07 +0300 Subject: [PATCH 0307/2314] Properly escaped `KEYS` --- commands/eval.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands/eval.md b/commands/eval.md index 3a2e71a7fa..8d7fb820c1 100644 --- a/commands/eval.md +++ b/commands/eval.md @@ -9,7 +9,7 @@ It is just a Lua program that will run in the context of the Redis server. The second argument of `EVAL` is the number of arguments that follows the script (starting from the third argument) that represent Redis key names. -This arguments can be accessed by Lua using the !`KEYS` global variable in the +This arguments can be accessed by Lua using the `!KEYS` global variable in the form of a one-based array (so `KEYS[1]`, `KEYS[2]`, ...). All the additional arguments should not represent key names and can be accessed From 4f335ce727e53ffa5a6b338d42091ee1f7c9701b Mon Sep 17 00:00:00 2001 From: Itamar Haber Date: Thu, 18 Jun 2015 00:33:27 +0300 Subject: [PATCH 0308/2314] Last time touching `KEYS` I'm such an lousy dev that even my docs don't compile --- commands/eval.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands/eval.md b/commands/eval.md index 8d7fb820c1..8e9b9953a0 100644 --- a/commands/eval.md +++ b/commands/eval.md @@ -52,7 +52,7 @@ OK The above script sets the key `foo` to the string `bar`. However it violates the `EVAL` command semantics as all the keys that the script -uses should be passed using the !`KEYS` array: +uses should be passed using the `!KEYS` array: ``` > eval "return redis.call('set',KEYS[1],'bar')" 1 foo From b6402360ae63e53501e3a9ff77220abda3b85ea7 Mon Sep 17 00:00:00 2001 From: Adriano Di Giovanni Date: Mon, 22 Jun 2015 09:35:42 +0200 Subject: [PATCH 0309/2314] add entry to tools.json --- tools.json | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/tools.json b/tools.json index 4640a9d23d..65f9371821 100644 --- a/tools.json +++ b/tools.json @@ -336,5 +336,13 @@ "repository": "https://github.com/josiahcarlson/rom", "description": "Redis object mapper for Python using declarative models, with search over numeric, full text, prefix, and suffix indexes", "authors": ["josiahcarlson"] + }, + { + "name": "RedisKeychain", + "language": "Javascript", + "url": "https://github.com/adriano-di-giovanni/node-redis-keychain", + "repository": "https://github.com/adriano-di-giovanni/node-redis-keychain", + "description": "A Node.js library for streamlining the configuration and maintenance of your Redis namespace", + "authors": ["codecreativity"] } ] From a60751412b24858afeb7db40fe0cdefb906dd691 Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 24 Jun 2015 10:15:05 +0200 Subject: [PATCH 0310/2314] Better Docker info in Sentinel doc thanks to Sam Saffron. --- topics/sentinel.md | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/topics/sentinel.md b/topics/sentinel.md index be8a3f7499..4f3e737ad1 100644 --- a/topics/sentinel.md +++ b/topics/sentinel.md @@ -402,17 +402,19 @@ Since Sentinels auto detect slaves using masters `INFO` output information, the detected slaves will not be reachable, and Sentinel will never be able to failover the master, since there are no good slaves from the point of view of the system, so there is currently no way to monitor with Sentinel a set of -master and slave instances deployed with Docker, unless you instruct Docker -to map the port 1:1. +master and slave instances deployed with Docker, **unless you instruct Docker +to map the port 1:1**. -For the first problem instead, in case you want to run a set of Sentinel -instances using Docker, you can use the following two Sentinel configuration -directives in order to force Sentinel to announce a specific set of IP -and port: +For the first problem, in case you want to run a set of Sentinel +instances using Docker with forwarded ports (or any other NAT setup where ports +are remapped), you can use the following two Sentinel configuration directives +in order to force Sentinel to announce a specific set of IP and port: sentinel announce-ip sentinel announce-port +Note that Docker has the ability to run in *host networking mode* (check the `--net=host` option for more information). This should create no issues since ports are not remapped in this setup. + A quick tutorial === From 9048615f66bb136efc6793ab2700808cbc7190cd Mon Sep 17 00:00:00 2001 From: Drew LeSueur Date: Wed, 24 Jun 2015 08:20:22 -0700 Subject: [PATCH 0311/2314] change REDIS_PORT to REDISPORT --- topics/quickstart.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/quickstart.md b/topics/quickstart.md index 6c09719db4..f52eefecc1 100644 --- a/topics/quickstart.md +++ b/topics/quickstart.md @@ -152,7 +152,7 @@ We assume you already copied **redis-server** and **redis-cli** executables unde sudo vi /etc/init.d/redis_6379 -Make sure to modify **REDIS_PORT** accordingly to the port you are using. +Make sure to modify **REDISPORT** accordingly to the port you are using. Both the pid file path and the configuration file name depend on the port number. * Copy the template configuration file you'll find in the root directory of the Redis distribution into /etc/redis/ using the port number as name, for instance: From 16741cb9a65951ac4c25be00014ddba8f7abd9ed Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 8 Jul 2015 14:48:47 +0200 Subject: [PATCH 0312/2314] GEOADD, GEORADIUS, GEORADIUSBYMEMBER documented. --- commands.json | 109 +++++++++++++++++++++++++++++++++- commands/geoadd.md | 52 ++++++++++++++++ commands/georadius.md | 49 +++++++++++++++ commands/georadiusbymember.md | 14 +++++ 4 files changed, 223 insertions(+), 1 deletion(-) create mode 100644 commands/geoadd.md create mode 100644 commands/georadius.md create mode 100644 commands/georadiusbymember.md diff --git a/commands.json b/commands.json index b8a4e0fffb..db418e7667 100644 --- a/commands.json +++ b/commands.json @@ -712,6 +712,113 @@ "since": "1.0.0", "group": "server" }, + "GEOADD": { + "summary": "Add one or more geospatial items in the geospatial index represented using a sorted set", + "complexity": "O(log(N)) for each item added, where N is the number of elements in the sorted set.", + "arguments": [ + { + "name": "key", + "type": "key" + }, + { + "name": ["longitude", "latitude", "member"], + "type": ["double", "double", "string"], + "multiple": true + } + ], + "since": "3.2.0", + "group": "geo" + }, + "GEORADIUS": { + "summary": "Query a sorted set representing a geospatial index to fetch members matching a given maximum distance from a point", + "complexity": "O(N+log(M)) where N is the number of elements inside the bounding box of the circular area delimited by center and radius and M is the number of items inside the index.", + "arguments": [ + { + "name": "key", + "type": "key" + }, + { + "name": "longitude", + "type": "double" + }, + { + "name": "latitude", + "type": "double" + }, + { + "name": "radius", + "type": "double" + }, + { + "name": "unit", + "type": "enum", + "enum": ["m", "km", "ft", "mi"] + }, + { + "name": "withcoord", + "type": "enum", + "enum": ["WITHCOORD"], + "optional": true + }, + { + "name": "withdist", + "type": "enum", + "enum": ["WITHDIST"], + "optional": true + }, + { + "name": "withhash", + "type": "enum", + "enum": ["WITHHASH"], + "optional": true + } + ], + "since": "3.2.0", + "group": "geo" + }, + "GEORADIUSBYMEMBER": { + "summary": "Query a sorted set representing a geospatial index to fetch members matching a given maximum distance from a point. The center coordinates are provided in terms of an existing member name", + "complexity": "O(N+log(M)) where N is the number of elements inside the bounding box of the circular area delimited by center and radius and M is the number of items inside the index.", + "arguments": [ + { + "name": "key", + "type": "key" + }, + { + "name": "member", + "type": "string" + }, + { + "name": "radius", + "type": "double" + }, + { + "name": "unit", + "type": "enum", + "enum": ["m", "km", "ft", "mi"] + }, + { + "name": "withcoord", + "type": "enum", + "enum": ["WITHCOORD"], + "optional": true + }, + { + "name": "withdist", + "type": "enum", + "enum": ["WITHDIST"], + "optional": true + }, + { + "name": "withhash", + "type": "enum", + "enum": ["WITHHASH"], + "optional": true + } + ], + "since": "3.2.0", + "group": "geo" + }, "GET": { "summary": "Get the value of a key", "complexity": "O(1)", @@ -2293,7 +2400,7 @@ }, "ZADD": { "summary": "Add one or more members to a sorted set, or update its score if it already exists", - "complexity": "O(log(N)) where N is the number of elements in the sorted set.", + "complexity": "O(log(N)) for each item added, where N is the number of elements in the sorted set.", "arguments": [ { "name": "key", diff --git a/commands/geoadd.md b/commands/geoadd.md new file mode 100644 index 0000000000..58051986e8 --- /dev/null +++ b/commands/geoadd.md @@ -0,0 +1,52 @@ +Adds the specified geospatial items (latitude, logitude, name) to the specified +key. Data is stored into the key as a sorted set, in a way that makes it possible to later retrieve items using a query by radius with the `GEORADIUS` or +`GEOREDIUSBYMEMBER` commands. + +The command takes arguments in the standard format x,y so the longitude must +be specified before the latitude. There are limits to the coordinates that +can be indexed: areas very near to the poles are not indexable. The exact +limits, as specified by EPSG:900913 / EPSG:3785 / OSGEO:41001 are the following: + +* Valid latitudes are from -180 to 180 degrees. +* Valid latitudes are from -85.05112878 to 85.05112878 degreees. + +The command will report an error when the user attempts to index coordinates outside the specified ranges. + +How it works? +--- + +The way the sorted set is populated is using a technique called +[Geohash](https://en.wikipedia.org/wiki/Geohash). Latitude and Longitude +bits are interleaved in order to form an unique 52 bit integer. We know +that a sorted set double score can represent a 52 bit integer without losing +precision. + +This format allows for radius querying by checking the 1+8 areas needed +to cover the whole radius, and discarding elements outside the radious. +The areas are checked by calculating the range of the box covered removing +enough bits from the less significant part of the sorted set score, and +computing the score range to query in the sorted set for each area. + +What Earth model it uses? +--- + +It just assumes that the Earth is a sphere, since the used distance forumla +is the Haversine formula. This formula is only an approximation when applied to the Earth, which is not a perfect sphere. The introduced errors are not an issue when used in the context of social network sites that need to query by radius +and most other applications. However in th worst case the error may be up to +0.5%, so you may want to consider other systems for error-critical applications. + +@return + +@integer-reply, specifically: + +* The number of elements added to the sorted sets, not including elements + already existing for which the score was updated. + +@examples + +```cli +GEOADD Sicily 13.361389 38.115556 "Palermo" 15.087269 37.502669 "Catania" +GEODIST Sicily Palermo Catania +GEORADIUS Sicily 15 37 100 km +GEORADIUS Sicily 15 37 200 km +``` diff --git a/commands/georadius.md b/commands/georadius.md new file mode 100644 index 0000000000..329bfaa28e --- /dev/null +++ b/commands/georadius.md @@ -0,0 +1,49 @@ +Return the members of a sorted set populated with geospatial information using `GEOADD`, which are within the borders of the area specified with the center location and the maximum distance from the center (the radius). + +The common use case for this command is to retrieve geospatial items near ot a specified point and no far than a given amount of meters (or other units). This allows, for example, to suggest mobile users of an applicaiton nearby places. + +The radius is specified in one of the following units: + +* **m** for meters. +* **km** for kilometers. +* **mi** for miles. +* **ft** for feet. + +The command optionally returns additional information using the following options: + +* **WITHDIST**: Also return the distance of the returned items from the specified center. The distance is returned in the same unit as the unit specified as the radius argument of the command. +* **WITHCOORD**: Also return the longitude,latitude coordinates of the matching items. +* **WITHASH**: Also return the raw geohash-encoded sorted set score of the item, in the form of a 52 bit unsigned integer. This is only useful for low level hacks or debugging and is otherwise of little interest for the general user. + +The command default is to return unsorted items. Two different sorting methods can be invoked using the following two options: + +* **ASC**: Sort returned items from the nearest to the fairest, relative to the center. +* **DESC**: Sort returned items from the fairest to the nearest, relative to the center. + +By default all the matching items are returned. It is possible to limit the results to the first N matching items by using the **COUNT ``** option. However note that internally the command needs to perform an effort proportional to the number of items matching the specified area, so to query very large areas with a very small `COUNT` option may be slow even if just a few results are returned. On the other hand `COUNT` can be a very effective way to reduce bandwidth usage if normally just the first results are used. + +@return + +@array-reply, specifically: + +* Without any `WITH` option specified, the command just returns a linear array like ["New York","Milan","Paris"]. +* If `WITHCOORD`, `WITHDIST` or `WITHHASH` options are specified, the command returns an array of arrays, where each sub-array represents a single item. + +When additional information is returned as an array of arrays for each item, the first item in the sub-array is always the name of the returned item. The other informations are returned in the following order as successive elements of the sub-array. + +1. The distance from the center as a floating point number, in the same unit specified in the radius. +2. The geohash integer. +3. The coordinates as a two items x,y array (longitude,latitude). + +So for example the command `GEORADIUS Sicily 15 37 200 km withcoord withdist` will return each item in the following way: + + ["Palermo","190.4424",["13.361389338970184","38.115556395496299"]] + +@examples + +```cli +GEOADD Sicily 13.361389 38.115556 "Palermo" 15.087269 37.502669 "Catania" +GEORADIUS Sicily 15 37 200 km WITHDIST +GEORADIUS Sicily 15 37 200 km WITHCOORD +GEORADIUS Sicily 15 37 200 km WITHDIST WITHCOORD +``` diff --git a/commands/georadiusbymember.md b/commands/georadiusbymember.md new file mode 100644 index 0000000000..8502e3156c --- /dev/null +++ b/commands/georadiusbymember.md @@ -0,0 +1,14 @@ +This command is exactly like `GEORADIUS` with the sole difference that instead +of taking, as the center of the area to query, a longitude and latitude value, it takes the name of a member already existing inside the geospatial index represented by the sorted set. + +The position of the specified member is used as the center of the query. + +Please check the example below and the `GEORADIUS` documentation for more information about the command and its options. + +@examples + +```cli +GEOADD Sicily 13.583333 37.316667 "Agrigento" +GEOADD Sicily 13.361389 38.115556 "Palermo" 15.087269 37.502669 "Catania" +GEORADIUSBYMEMBER Sicily Agrigento 100 km +``` From 085500975a77ff5b760a0247480123b52557496c Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 8 Jul 2015 15:53:04 +0200 Subject: [PATCH 0313/2314] GEODECODE documented. --- commands.json | 14 +++++++++++++- commands/geodecode.md | 28 ++++++++++++++++++++++++++++ 2 files changed, 41 insertions(+), 1 deletion(-) create mode 100644 commands/geodecode.md diff --git a/commands.json b/commands.json index db418e7667..0c881b32c0 100644 --- a/commands.json +++ b/commands.json @@ -729,6 +729,18 @@ "since": "3.2.0", "group": "geo" }, + "GEODECODE": { + "summary": "Decode a 52 bit geohash integer into a longitude, latitude pair", + "complexity": "O(1).", + "arguments": [ + { + "name": "integer", + "type": "integer" + } + ], + "since": "3.2.0", + "group": "geo" + }, "GEORADIUS": { "summary": "Query a sorted set representing a geospatial index to fetch members matching a given maximum distance from a point", "complexity": "O(N+log(M)) where N is the number of elements inside the bounding box of the circular area delimited by center and radius and M is the number of items inside the index.", @@ -777,7 +789,7 @@ "group": "geo" }, "GEORADIUSBYMEMBER": { - "summary": "Query a sorted set representing a geospatial index to fetch members matching a given maximum distance from a point. The center coordinates are provided in terms of an existing member name", + "summary": "Query a sorted set representing a geospatial index to fetch members matching a given maximum distance from a member", "complexity": "O(N+log(M)) where N is the number of elements inside the bounding box of the circular area delimited by center and radius and M is the number of items inside the index.", "arguments": [ { diff --git a/commands/geodecode.md b/commands/geodecode.md new file mode 100644 index 0000000000..b3e46c2bf2 --- /dev/null +++ b/commands/geodecode.md @@ -0,0 +1,28 @@ +Geospatial Redis commands encode positions of objects in a single 52 bit integer, using a technique called geohash. Those 52 bit integers are: + +1. Returned by `GEOAENCODE` as return value. +2. Used by `GEOADD` as sorted set scores of members. + +The `GEODECODE` command is able to translate the 52 bit integers back into a position expressed as longitude and latitude. The command also returns the corners of the box that the 52 bit integer identifies on the earth surface, since each 52 integer actually represent not a single point, but a small area. + +This command usefulness is limited to the rare situations where you want to +fetch raw data from the sorted set, for example with `ZRANGE`, and later +need to decode the scores into positions. The other obvious use is debugging. + +@return + +@array-reply, specifically: + +The command returns an array of three elements. Each element of the main array is an array of two elements, specifying a longitude and a latitude. So the returned value is in the following form: + +* center-longitude, center-latitude +* min-longitude, min-latitude +* max-longitude, max-latitude + +@examples + +```cli +GEOADD Sicily 13.361389 38.115556 "Palermo" 15.087269 37.502669 "Catania" +ZSCORE Sicily "Palermo" +DECODE 3479099956230698 +``` From a1e61db73f7581b2ba76f5563ccb1bba142cc0ea Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 8 Jul 2015 18:07:05 +0200 Subject: [PATCH 0314/2314] GEOENCODE documented. --- commands.json | 27 +++++++++++++++++++++++++++ commands/geodecode.md | 2 +- commands/geoencode.md | 37 +++++++++++++++++++++++++++++++++++++ 3 files changed, 65 insertions(+), 1 deletion(-) create mode 100644 commands/geoencode.md diff --git a/commands.json b/commands.json index 0c881b32c0..0a3eac5443 100644 --- a/commands.json +++ b/commands.json @@ -741,6 +741,33 @@ "since": "3.2.0", "group": "geo" }, + "GEOENCODE": { + "summary": "Encode a longitude and latitude pair into a 52 bit geohash integer", + "complexity": "O(1).", + "arguments": [ + { + "name": "longitude", + "type": "double" + }, + { + "name": "latitude", + "type": "double" + }, + { + "name": "radius", + "type": "double", + "optional": true + }, + { + "name": "unit", + "type": "enum", + "enum": ["m", "km", "ft", "mi"] + "optional": true + } + ], + "since": "3.2.0", + "group": "geo" + }, "GEORADIUS": { "summary": "Query a sorted set representing a geospatial index to fetch members matching a given maximum distance from a point", "complexity": "O(N+log(M)) where N is the number of elements inside the bounding box of the circular area delimited by center and radius and M is the number of items inside the index.", diff --git a/commands/geodecode.md b/commands/geodecode.md index b3e46c2bf2..386b97da29 100644 --- a/commands/geodecode.md +++ b/commands/geodecode.md @@ -24,5 +24,5 @@ The command returns an array of three elements. Each element of the main array i ```cli GEOADD Sicily 13.361389 38.115556 "Palermo" 15.087269 37.502669 "Catania" ZSCORE Sicily "Palermo" -DECODE 3479099956230698 +GEODECODE 3479099956230698 ``` diff --git a/commands/geoencode.md b/commands/geoencode.md new file mode 100644 index 0000000000..a22117204c --- /dev/null +++ b/commands/geoencode.md @@ -0,0 +1,37 @@ +Geospatial Redis commands encode positions of objects in a single 52 bit integer, using a technique called geohash. The encoding is further explained in the `GEODECODE` and `GEOADD` documentation. The `GEOENCODE` command, documented in this page, is able to convert a longitude and latitude pair into such 52 bit integer, which is used as as the *score* for the sorted set members representing geopositional information. + +Normally you don't need to use this command, unless you plan to implement low level code in the client side interacting with the Redis geo commands. This command may also be useful for debugging purposes. + +`GEOENCODE` takes as input: + +1. The longitude and latitude of a point on the Earth surface. +2. Optionally a radius represented by an integer and an unit. + +And returns a set of informations, including the representation of the position as a 52 bit integer, the min and max corners of the bounding box represented by the geo hash, the center point in the area covered by the geohash integer, and finally the two sorted set scores to query in order to retrieve all the elements included in the geohash area. + +The radius optionally provided to the command is used in order to compute the two scores returned by the command fo range query purposes. Moreover the returned geohash integer will only have the most significant bits set, according to the number of bits needed to approximate the specified radius. + +Use case +--- + +As already specified this command is mostly not needed if not for debugging. However there are actual use cases, which is, when there is to query for the same areas multiple times, or with a different granularity or area shape compared to what Redis `GEORADIUS` is able to provide, the client may implement using this command part of the logic on the client side. Score ranges representing given areas can be cached client side and used to retrieve elements directly using `ZRANGEBYSCORE`. + +@return + +@array-reply, specifically: + +The command returns an array of give elements in the following order: + +* The 52 bit geohash +* min-longitude, min-latitude of the area identified +* max-longitude, max-latitude of the area identified +* center-longitude, center-latitude +* min-score and max-score of the sorted set to retrieve the members inside the area + +@examples + +```cli +GEOADD Sicily 13.361389 38.115556 "Palermo" 15.087269 37.502669 "Catania" +ZSCORE Sicily "Palermo" +GEOENCODE 13.361389 38.115556 100 km +``` From b74d814e1f942297605132eff034d43777466493 Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 9 Jul 2015 10:26:31 +0200 Subject: [PATCH 0315/2314] Fix commands.json missing comma. --- commands.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands.json b/commands.json index 0a3eac5443..ba45f1baa7 100644 --- a/commands.json +++ b/commands.json @@ -761,7 +761,7 @@ { "name": "unit", "type": "enum", - "enum": ["m", "km", "ft", "mi"] + "enum": ["m", "km", "ft", "mi"], "optional": true } ], From c2f7900509ef2b3fc46fdee66336137030654e29 Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 9 Jul 2015 10:46:40 +0200 Subject: [PATCH 0316/2314] GEOHASH command documented. --- commands.json | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/commands.json b/commands.json index ba45f1baa7..1419a22949 100644 --- a/commands.json +++ b/commands.json @@ -768,6 +768,23 @@ "since": "3.2.0", "group": "geo" }, + "GEOHASH": { + "summary": "Returns members of a geospatial index as standard geohash strings", + "complexity": "O(log(N)) for each item added, where N is the number of elements in the sorted set.", + "arguments": [ + { + "name": "key", + "type": "key" + }, + { + "name": "member", + "type": "string", + "multiple": true + } + ], + "since": "3.2.0", + "group": "geo" + }, "GEORADIUS": { "summary": "Query a sorted set representing a geospatial index to fetch members matching a given maximum distance from a point", "complexity": "O(N+log(M)) where N is the number of elements inside the bounding box of the circular area delimited by center and radius and M is the number of items inside the index.", From 48250e9e3e38e1af91eacb03c6a679658dbd5737 Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Thu, 9 Jul 2015 11:04:08 +0200 Subject: [PATCH 0317/2314] Fix typo --- commands/geoadd.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands/geoadd.md b/commands/geoadd.md index 58051986e8..93f1950542 100644 --- a/commands/geoadd.md +++ b/commands/geoadd.md @@ -30,7 +30,7 @@ computing the score range to query in the sorted set for each area. What Earth model it uses? --- -It just assumes that the Earth is a sphere, since the used distance forumla +It just assumes that the Earth is a sphere, since the used distance formula is the Haversine formula. This formula is only an approximation when applied to the Earth, which is not a perfect sphere. The introduced errors are not an issue when used in the context of social network sites that need to query by radius and most other applications. However in th worst case the error may be up to 0.5%, so you may want to consider other systems for error-critical applications. From 4587dd85bdae8ac0cefed62b3ff72821ac114522 Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Thu, 9 Jul 2015 11:05:48 +0200 Subject: [PATCH 0318/2314] It's "poor man's multiplexing" --- topics/distlock.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/distlock.md b/topics/distlock.md index 935bf55feb..8d1b11da6a 100644 --- a/topics/distlock.md +++ b/topics/distlock.md @@ -159,7 +159,7 @@ Basically if there are infinite continuous network partitions, the system may be Performance, crash-recovery and fsync --- -Many users using Redis as a lock server need high performance in terms of both latency to acquire and release a lock, and number of acquire / release operations that it is possible to perform per second. In order to meet this requirement, the strategy to talk with the N Redis servers to reduce latency is definitely multiplexing (or poor’s man multiplexing, which is, putting the socket in non-blocking mode, send all the commands, and read all the commands later, assuming that the RTT between the client and each instance is similar). +Many users using Redis as a lock server need high performance in terms of both latency to acquire and release a lock, and number of acquire / release operations that it is possible to perform per second. In order to meet this requirement, the strategy to talk with the N Redis servers to reduce latency is definitely multiplexing (or poor man's multiplexing, which is, putting the socket in non-blocking mode, send all the commands, and read all the commands later, assuming that the RTT between the client and each instance is similar). However there is another consideration to do about persistence if we want to target a crash-recovery system model. From 5f9671cd21d53107ef165a0a7aedced7453f92f8 Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Thu, 9 Jul 2015 10:57:31 +0200 Subject: [PATCH 0319/2314] Force english language for spellchecking --- Rakefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Rakefile b/Rakefile index 8f4bd30f6b..293a723b55 100644 --- a/Rakefile +++ b/Rakefile @@ -36,7 +36,7 @@ task :spellcheck do ruby -pe 'gsub /^ .*$/, ""' | ruby -pe 'gsub /`[^`]+`/, ""' | ruby -e 'puts $stdin.read.gsub(/\[([^\]]+)\]\(([^\)]+)\)/m, "\\1").gsub(/^```.*```/m, "")' | - aspell -H -a --extra-dicts=./tmp/dict 2>/dev/null + aspell --lang=en -H -a --extra-dicts=./tmp/dict 2>/dev/null } words = `cat '#{file}' | #{command}`.lines.map do |line| From 2ac8facb17062b2057c4f4086d969ca2a8115248 Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Thu, 9 Jul 2015 10:57:44 +0200 Subject: [PATCH 0320/2314] Ignore case for spellchecking --- Rakefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Rakefile b/Rakefile index 293a723b55..2a14ab2cb9 100644 --- a/Rakefile +++ b/Rakefile @@ -36,7 +36,7 @@ task :spellcheck do ruby -pe 'gsub /^ .*$/, ""' | ruby -pe 'gsub /`[^`]+`/, ""' | ruby -e 'puts $stdin.read.gsub(/\[([^\]]+)\]\(([^\)]+)\)/m, "\\1").gsub(/^```.*```/m, "")' | - aspell --lang=en -H -a --extra-dicts=./tmp/dict 2>/dev/null + aspell --lang=en --ignore-case -H -a --extra-dicts=./tmp/dict 2>/dev/null } words = `cat '#{file}' | #{command}`.lines.map do |line| From 36bc1a7cc62cda02cb324bf35c31dcd506a7819b Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Thu, 9 Jul 2015 10:59:32 +0200 Subject: [PATCH 0321/2314] Add whole list of missing words to the wordlist --- wordlist | 349 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 349 insertions(+) diff --git a/wordlist b/wordlist index 7ba32f391c..1877d4bca9 100644 --- a/wordlist +++ b/wordlist @@ -1,66 +1,415 @@ +ACLs +AMD AOF API +CAS CJSON CLI +CP +CPUs +CRC +CRDTs +CRLF +CSV +Changelog Ctrl +DIctionary +DLM +DNS +Diskless +EBS +EP +EPSG +Erlang +EventMachine +Facebook +Fsyncing +GBits +GCC +GDB +GETs GHz +GPG +Gbits +Geospatial +GitHub +Github +Google +HLL +HLLs +HOWTO +HVM +Haversine +HyperLogLog +HyperLogLogs +IEEE IP +IPC +IPv +IRC +Inline JPEG JSON +LF +LLOOGG +LRU +Linode +Liveness Lua +MERCHANTABILITY +MX MacBook +Macbook +Maxmemory +Memcached +MongoDB +MySQL +NAS +NFS +NIC +NICs +NOSAVE +NUMA +NX +Nehalem +Netflix +NoSQL +Noordhuis +ODOWN +OOM +OSGEO +Ok +Opteron +PHP +PINGs +POSIX PRNG +PSYNC +PostgreSQL +Presharding RDB +RDD +RDDs +REDISPORT +REdis +REmote +RSS +RTT +RaiserFS Redis +Redlock +Reshard +Retwis +Runtime +SDOWN SHA +SIGBUS +SIGFPE +SIGILL +SIGSEGV +SIGTERM +SLES +SMS SQL +SQLite +SSD +SSL +Sanfilippo +SmartOS +Solaris +SoundClound +SystemTimer +TCP +TLB +TODO +Twemproxy +Tx UTF +UseOptimization +VM +VMware +VPS +ValueN +Virtualized +Westmere +Wikipedia +Xen Xeon Yukihiro +addr +afterwards +allkeys allocator +antirez +aof +apache +appendfsync +applicaiton +arity atomicity +auth +backend backticks +backtrace +benchmarked +benchmarking +bgsave bitwise blazingly blog boolean +br +bt btw +cardinalities cardinality +cas +cb +cgroups +changelogs checksum +chrt +cli +commandstats +conf config +configEpoch +cpu +cron dataset datasets +de decrement decrementing +denyoom +deserialize deserializing destkey desync +desynchronize +dev +dir +diskless +dormando +earts +eg +else's endian +endianess +epoll +ethernet +everybody's +everysec +executables +facto +failback +failover +failovers +fanout +fdatasync +fea +fermurb +filesystem +firewalled +firewalling +flw +fo +freenode +fsync functionalities +gb +geo +geohash +geopositional +geospatial +github globals +gmail +hashs +hbgc +hewowu +hgcarr +hiredis hostname +htop +hyperloglogs +hypervisor +iamonds incrementing +indexable indices infeasible +informations +init +inline +internet +io +iojob +iostat +ip +ists +jemalloc +jpeg +js +kB +kb +keyN keyspace +killable +kqueue +latencies +len lexicographically +libc +liveness +lloogg +localhost +login +logitude +logout +lookup +lookups +loopback +lru +lubs +macroscopical +macroscopically +malloc +mem +memcached +metadata +mget +misconfigured +mmap +movablekeys multi +mutex +mylist +mymaster +myzset +netcat +netsplits +newjobs +noeviction +noscript +numactl +ok +online +ot +overcommit +pades +pageview pcall +pid pipelined pipelining +pippo +pmessage +poor +pre +probabilistically +proc +programmatically +prstat +pubsub +qsort +queueing +radious +rc +rdb +readonly +realtime +rebalance +rebalancing +reconfigures +reconnection +reconnections +recv +redhatvm +redirections +redis +redissrc +reimplement +representable +reprovisioned +reshard +resharding +reshardings +resync +resynchronization +resynchronize +robj +roundtrips +rss +runid +runtime scalable +se semantical +sharding +si +sismember +slowlog +smaps snapshotting +src startup +strace subcommand subcommands +suboptimal substring +swappability +syscall +taskset +tcmalloc +tdtl +tdts +tdtss +techstacks +th +timeline timestamp +tp +tpaof +tradeoff +tradeoffs +transactional +trib +tt +ttc +ttl tuple tuples unary +unencrypted +unguessable +unix unordered +unreachability unsubscribe unsubscribed unsubscribes +unsubscription +untrusted +untuned unwatch +unwatches +urandom +userid +username +usr +utf +variadic +versa +versioned +versioning +virtualization +virtualized +vm +vmstat +vtype +wikifs +wildcards +xff +ZPOP From 9f975184161914b14860b7916c1b40c34a573ff7 Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 9 Jul 2015 11:16:23 +0200 Subject: [PATCH 0322/2314] Remove the since attribute from Geo commands. They are yet not avaiable in a stable release. New redis.io versions will show a warning in such a case. --- commands.json | 6 ------ 1 file changed, 6 deletions(-) diff --git a/commands.json b/commands.json index 1419a22949..631912fb50 100644 --- a/commands.json +++ b/commands.json @@ -726,7 +726,6 @@ "multiple": true } ], - "since": "3.2.0", "group": "geo" }, "GEODECODE": { @@ -738,7 +737,6 @@ "type": "integer" } ], - "since": "3.2.0", "group": "geo" }, "GEOENCODE": { @@ -765,7 +763,6 @@ "optional": true } ], - "since": "3.2.0", "group": "geo" }, "GEOHASH": { @@ -782,7 +779,6 @@ "multiple": true } ], - "since": "3.2.0", "group": "geo" }, "GEORADIUS": { @@ -829,7 +825,6 @@ "optional": true } ], - "since": "3.2.0", "group": "geo" }, "GEORADIUSBYMEMBER": { @@ -872,7 +867,6 @@ "optional": true } ], - "since": "3.2.0", "group": "geo" }, "GET": { From 866617984a61c838402e408a957a149dcd61b7cb Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 9 Jul 2015 11:34:21 +0200 Subject: [PATCH 0323/2314] GEOREDIUS -> GEORADIUS typo. --- commands/geoadd.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands/geoadd.md b/commands/geoadd.md index 93f1950542..68a66db899 100644 --- a/commands/geoadd.md +++ b/commands/geoadd.md @@ -1,6 +1,6 @@ Adds the specified geospatial items (latitude, logitude, name) to the specified key. Data is stored into the key as a sorted set, in a way that makes it possible to later retrieve items using a query by radius with the `GEORADIUS` or -`GEOREDIUSBYMEMBER` commands. +`GEORADIUSBYMEMBER` commands. The command takes arguments in the standard format x,y so the longitude must be specified before the latitude. There are limits to the coordinates that From a8d5423a906ffefef9a8f2c8824ab1b0dc6b323a Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 9 Jul 2015 11:36:02 +0200 Subject: [PATCH 0324/2314] Actually add GEOHASH markdown file. --- commands/geohash.md | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) create mode 100644 commands/geohash.md diff --git a/commands/geohash.md b/commands/geohash.md new file mode 100644 index 0000000000..9ebe65395c --- /dev/null +++ b/commands/geohash.md @@ -0,0 +1,33 @@ +Return valid [Geohash](https://en.wikipedia.org/wiki/Geohash) strings representing the position of one or more elements in a sorted set value representing a geospatial index (where elements were added using `GEOADD`). + +Normally Redis represents positions of elements using a variation of the Geohash +technique where positions are encoded using 52 bit integers. The encoding is +also different compared to the standard because the initial min and max +coordinates used during the encoding and decoding process are different. This +command however **returns a standard Geohash** in the form of a string as +described in the [Wikipedia article](https://en.wikipedia.org/wiki/Geohash) and compatible with the [geohash.org](http://geohash.org) web site. + +Geohash string properties +--- + +The command returns 11 characters Geohash strings, so no precision is loss +compared to the Redis internal 52 bit representation. The returned Geohashes +have the following properties: + +1. They can be shortened removing characters from the right. It will lose precision but will still point to the same area. +2. It is possible to use them in `geohash.org` URLs such as `http://geohash.org/`. This is an [example of such URL](http://geohash.org/sqdtr74hyu0). +3. Strings with a similar prefix are nearby, but the contrary is not true, it is possible that strings with different prefixes are nearby too. + +@return + +@array-reply, specifically: + +The command returns an array where each element is the Geohash corresponding to +each member name passed as arguments to the command. + +@examples + +```cli +GEOADD Sicily 13.361389 38.115556 "Palermo" 15.087269 37.502669 "Catania" +GEOHASH Sicily Palermo Catania +``` From 35471383d97c0ce7e12edd65ea0be87cff26a45a Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 9 Jul 2015 12:00:36 +0200 Subject: [PATCH 0325/2314] GEOPOS documented. --- commands.json | 18 +++++++++++++++++- commands/geohash.md | 2 +- 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/commands.json b/commands.json index 631912fb50..22df4964a7 100644 --- a/commands.json +++ b/commands.json @@ -767,7 +767,23 @@ }, "GEOHASH": { "summary": "Returns members of a geospatial index as standard geohash strings", - "complexity": "O(log(N)) for each item added, where N is the number of elements in the sorted set.", + "complexity": "O(log(N)) for each member requested, where N is the number of elements in the sorted set.", + "arguments": [ + { + "name": "key", + "type": "key" + }, + { + "name": "member", + "type": "string", + "multiple": true + } + ], + "group": "geo" + }, + "GEOPOS": { + "summary": "Returns longitude and latitude of members of a geospatial index", + "complexity": "O(log(N)) for each member requested, where N is the number of elements in the sorted set.", "arguments": [ { "name": "key", diff --git a/commands/geohash.md b/commands/geohash.md index 9ebe65395c..2517c3f28d 100644 --- a/commands/geohash.md +++ b/commands/geohash.md @@ -23,7 +23,7 @@ have the following properties: @array-reply, specifically: The command returns an array where each element is the Geohash corresponding to -each member name passed as arguments to the command. +each member name passed as argument to the command. @examples From 041a40f8b90b6ab1a9572a62dca84831a2e52048 Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 9 Jul 2015 12:06:29 +0200 Subject: [PATCH 0326/2314] GEODIST command documented. --- commands.json | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/commands.json b/commands.json index 22df4964a7..6060870562 100644 --- a/commands.json +++ b/commands.json @@ -797,6 +797,30 @@ ], "group": "geo" }, + "GEODIST": { + "summary": "Returns the distance between two members of a geospatial index", + "complexity": "O(log(N))", + "arguments": [ + { + "name": "key", + "type": "key" + }, + { + "name": "member1", + "type": "string" + }, + { + "name": "member2", + "type": "string" + }, + { + "name": "unit", + "type": "string", + "optional": true + } + ], + "group": "geo" + }, "GEORADIUS": { "summary": "Query a sorted set representing a geospatial index to fetch members matching a given maximum distance from a point", "complexity": "O(N+log(M)) where N is the number of elements inside the bounding box of the circular area delimited by center and radius and M is the number of items inside the index.", From e9ff302a56954a88387173e4d91c1546441d7161 Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 9 Jul 2015 12:09:03 +0200 Subject: [PATCH 0327/2314] Add geopos and geodist MD files. --- commands/geodist.md | 31 +++++++++++++++++++++++++++++++ commands/geopos.md | 22 ++++++++++++++++++++++ 2 files changed, 53 insertions(+) create mode 100644 commands/geodist.md create mode 100644 commands/geopos.md diff --git a/commands/geodist.md b/commands/geodist.md new file mode 100644 index 0000000000..af78cdff71 --- /dev/null +++ b/commands/geodist.md @@ -0,0 +1,31 @@ +Return the distance between two members in the geospatial index represented by the sorted set. + +Given a sorted set representing a geospatial index, populated using the `GEOADD` command, the command returns the distance between the two specified members in the specified unit. + +If one or both the members are missing, the command returns NULL. + +The unit must be one of the following, and defaults to meters: + +* **m** for meters. +* **km** for kilometers. +* **mi** for miles. +* **ft** for feet. + +The distance is computed assuming that the Earth is a perfect sphere, so errors up to 0.5% are possible in edge cases. + +@return + +@bulk-string-reply, specifically: + +The command returns the distance as a double (represented as a string) +in the specified unit, or NULL if one or both the elements are missing. + +@examples + +```cli +GEOADD Sicily 13.361389 38.115556 "Palermo" 15.087269 37.502669 "Catania" +GEODIST Sicily Palermo Catania +GEODIST Sicily Palermo Catania km +GEODIST Sicily Palermo Catania mi +GEODIST Sicily Foo Bar +``` diff --git a/commands/geopos.md b/commands/geopos.md new file mode 100644 index 0000000000..f0edbf74a2 --- /dev/null +++ b/commands/geopos.md @@ -0,0 +1,22 @@ +Return the positions (longitude,latitude) of all the speciied members of the geospatial index represented by the sorted set at *key*. + +Given a sorted set representing a geospatial index, populated using the `GEOADD` command, it is ofen useful to obtain back the coordinates of specified members. When the geospatial index is populated via `GEOADD` the coordinates are converted into a 52 bit geohash, so the coordinates returned may not be exactly the ones used in order to add the elements, but small errors may be introduced. + +The command can accept a variable number of arguments so it always returns an array of positions even when a signle element is specified. + +@return + +@array-reply, specifically: + +The command returns an array where each element is a two elements array +representing longitude and latitude (x,y) of each member name passed as +argument to the command. + +Non existing elements are reported as NULL elements of the array. + +@examples + +```cli +GEOADD Sicily 13.361389 38.115556 "Palermo" 15.087269 37.502669 "Catania" +GEOPOS Sicily Palermo Catania NonExisting +``` From 0303d59ec481afba0cde863a52c78a1c1406bb4d Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 9 Jul 2015 17:24:33 +0200 Subject: [PATCH 0328/2314] Mention sponsorship of the Redis CI hardware. --- topics/sponsors.md | 1 + 1 file changed, 1 insertion(+) diff --git a/topics/sponsors.md b/topics/sponsors.md index 6171a00a52..14f9fcdf73 100644 --- a/topics/sponsors.md +++ b/topics/sponsors.md @@ -7,6 +7,7 @@ Before May 2013 the project was sponsored by VMware with the work of [Salvatore In the past Redis accepted donations from the following companies: +* [VMware](http://vmware.com) and later [Pivotal](http://pivotal.io) provided a 24 GB RAM workstation for me to run the [Redis CI test](http://ci.redis.io) and other long running tests. Later I (Salvatore) equipped the server with an SSD drive in order to test in the same hardware with rotating and flash drives. * [Linode](http://linode.com) 15 January 2010, provided Virtual Machines for Redis testing in a virtualized environment. * [Slicehost](http://slicehost.com) 14 January 2010, provided Virtual Machines for Redis testing in a virtualized environment. * [Citrusbyte](http://citrusbyte.com) 18 Dec 2009, part of Virtual Memory. Citrusbyte is also the company developing the Redis-rb bindings for Redis and this very web site. From 9e8694673fe8a2f3b99e24dcb63cca5054a4e8ea Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 9 Jul 2015 17:41:02 +0200 Subject: [PATCH 0329/2314] GEOENCODE / GEODECODE removed. --- commands.json | 37 ------------------------------------- 1 file changed, 37 deletions(-) diff --git a/commands.json b/commands.json index 6060870562..eeedc2c259 100644 --- a/commands.json +++ b/commands.json @@ -728,43 +728,6 @@ ], "group": "geo" }, - "GEODECODE": { - "summary": "Decode a 52 bit geohash integer into a longitude, latitude pair", - "complexity": "O(1).", - "arguments": [ - { - "name": "integer", - "type": "integer" - } - ], - "group": "geo" - }, - "GEOENCODE": { - "summary": "Encode a longitude and latitude pair into a 52 bit geohash integer", - "complexity": "O(1).", - "arguments": [ - { - "name": "longitude", - "type": "double" - }, - { - "name": "latitude", - "type": "double" - }, - { - "name": "radius", - "type": "double", - "optional": true - }, - { - "name": "unit", - "type": "enum", - "enum": ["m", "km", "ft", "mi"], - "optional": true - } - ], - "group": "geo" - }, "GEOHASH": { "summary": "Returns members of a geospatial index as standard geohash strings", "complexity": "O(log(N)) for each member requested, where N is the number of elements in the sorted set.", From 5394bd94668af9106a245f6c27c2be5e74b1c94f Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Thu, 9 Jul 2015 18:18:40 +0200 Subject: [PATCH 0330/2314] Case-sensitivity --- topics/benchmarks.md | 10 +++++----- topics/latency.md | 2 +- topics/rdd-1.md | 4 ++-- topics/rdd-2.md | 2 +- topics/releases.md | 2 +- 5 files changed, 10 insertions(+), 10 deletions(-) diff --git a/topics/benchmarks.md b/topics/benchmarks.md index 584e43dd0a..33a861ffca 100644 --- a/topics/benchmarks.md +++ b/topics/benchmarks.md @@ -205,14 +205,14 @@ untuned box usually provides good enough performance for most applications. It is a good practice to use the ping program to quickly check the latency between the client and server hosts is normal before launching the benchmark. Regarding the bandwidth, it is generally useful to estimate -the throughput in Gbits/s and compare it to the theoretical bandwidth +the throughput in Gbit/s and compare it to the theoretical bandwidth of the network. For instance a benchmark setting 4 KB strings -in Redis at 100000 q/s, would actually consume 3.2 Gbits/s of bandwidth -and probably fit within a 10 GBits/s link, but not a 1 Gbits/s one. In many real +in Redis at 100000 q/s, would actually consume 3.2 Gbit/s of bandwidth +and probably fit within a 10 Gbit/s link, but not a 1 Gbit/s one. In many real world scenarios, Redis throughput is limited by the network well before being limited by the CPU. To consolidate several high-throughput Redis instances -on a single server, it worth considering putting a 10 Gbits/s NIC -or multiple 1 Gbits/s NICs with TCP/IP bonding. +on a single server, it worth considering putting a 10 Gbit/s NIC +or multiple 1 Gbit/s NICs with TCP/IP bonding. + CPU is another very important factor. Being single-threaded, Redis favors fast CPUs with large caches and not many cores. At this game, Intel CPUs are currently the winners. It is not uncommon to get only half the performance on diff --git a/topics/latency.md b/topics/latency.md index f1476ec1de..53a07a975c 100644 --- a/topics/latency.md +++ b/topics/latency.md @@ -115,7 +115,7 @@ Latency induced by network and communication -------------------------------------------- Clients connect to Redis using a TCP/IP connection or a Unix domain connection. -The typical latency of a 1 GBits/s network is about 200 us, while the latency +The typical latency of a 1 Gbit/s network is about 200 us, while the latency with a Unix domain socket can be as low as 30 us. It actually depends on your network and system hardware. On top of the communication itself, the system adds some more latency (due to thread scheduling, CPU caches, NUMA placement, diff --git a/topics/rdd-1.md b/topics/rdd-1.md index 0ea8402aa3..823189b65e 100644 --- a/topics/rdd-1.md +++ b/topics/rdd-1.md @@ -1,7 +1,7 @@ # Redis Design Draft 1 -- Redis Design Drafts * Author: Salvatore Sanfilippo `antirez@gmail.com` -* Github issue: none +* GitHub issue: none ## History of revisions @@ -16,7 +16,7 @@ feedback before implementing a given feature. The way the community can provide feedback about a RDD is simply writing a message to the Redis mailing list, or commenting in the associated -Github issue if any. +GitHub issue if any. Drafts are published only for features already approved as potentially very interesting for the project by the current Redis project maintainer. diff --git a/topics/rdd-2.md b/topics/rdd-2.md index f1afc06bdf..919d0bc508 100644 --- a/topics/rdd-2.md +++ b/topics/rdd-2.md @@ -1,7 +1,7 @@ # Redis Design Draft 2 -- RDB version 7 info fields * Author: Salvatore Sanfilippo `antirez@gmail.com` -* Github issue [#1048](https://github.com/antirez/redis/issues/1048) +* GitHub issue [#1048](https://github.com/antirez/redis/issues/1048) ## History of revisions diff --git a/topics/releases.md b/topics/releases.md index a6c78d9086..7962b782ac 100644 --- a/topics/releases.md +++ b/topics/releases.md @@ -20,7 +20,7 @@ Unstable tree === The unstable version of Redis is always located in the `unstable` branch in -the [Redis Github Repository](http://github.com/antirez/redis). +the [Redis GitHub Repository](http://github.com/antirez/redis). This is the source tree where most of the new features are developed and is not considered to be production ready: it may contain critical bugs, From 747c611c267f8c8b9bb862e12b21795e4be870c0 Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Thu, 9 Jul 2015 18:18:50 +0200 Subject: [PATCH 0331/2314] Check for words by case --- Rakefile | 2 +- wordlist | 7 +++---- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/Rakefile b/Rakefile index 2a14ab2cb9..293a723b55 100644 --- a/Rakefile +++ b/Rakefile @@ -36,7 +36,7 @@ task :spellcheck do ruby -pe 'gsub /^ .*$/, ""' | ruby -pe 'gsub /`[^`]+`/, ""' | ruby -e 'puts $stdin.read.gsub(/\[([^\]]+)\]\(([^\)]+)\)/m, "\\1").gsub(/^```.*```/m, "")' | - aspell --lang=en --ignore-case -H -a --extra-dicts=./tmp/dict 2>/dev/null + aspell --lang=en -H -a --extra-dicts=./tmp/dict 2>/dev/null } words = `cat '#{file}' | #{command}`.lines.map do |line| diff --git a/wordlist b/wordlist index 1877d4bca9..8d68dbba82 100644 --- a/wordlist +++ b/wordlist @@ -24,16 +24,14 @@ Erlang EventMachine Facebook Fsyncing -GBits GCC GDB GETs GHz GPG -Gbits +Gbit Geospatial GitHub -Github Google HLL HLLs @@ -136,6 +134,7 @@ Wikipedia Xen Xeon Yukihiro +ZPOP addr afterwards allkeys @@ -157,6 +156,7 @@ bgsave bitwise blazingly blog +blpop boolean br bt @@ -412,4 +412,3 @@ vtype wikifs wildcards xff -ZPOP From a667c80ab774163727df76f6c9b259071fbb7692 Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Thu, 9 Jul 2015 18:22:30 +0200 Subject: [PATCH 0332/2314] Fix spelling mistakes --- commands/georadius.md | 2 +- topics/twitter-clone.md | 2 +- wordlist | 2 -- 3 files changed, 2 insertions(+), 4 deletions(-) diff --git a/commands/georadius.md b/commands/georadius.md index 329bfaa28e..14dbe4ba9f 100644 --- a/commands/georadius.md +++ b/commands/georadius.md @@ -1,6 +1,6 @@ Return the members of a sorted set populated with geospatial information using `GEOADD`, which are within the borders of the area specified with the center location and the maximum distance from the center (the radius). -The common use case for this command is to retrieve geospatial items near ot a specified point and no far than a given amount of meters (or other units). This allows, for example, to suggest mobile users of an applicaiton nearby places. +The common use case for this command is to retrieve geospatial items near ot a specified point and no far than a given amount of meters (or other units). This allows, for example, to suggest mobile users of an application nearby places. The radius is specified in one of the following units: diff --git a/topics/twitter-clone.md b/topics/twitter-clone.md index 01a1386a81..1378882066 100644 --- a/topics/twitter-clone.md +++ b/topics/twitter-clone.md @@ -433,7 +433,7 @@ You can find the code that sets or removes a following / follower relation in th Making it horizontally scalable --- -Gentle reader, if you reached this point you are already a hero. Thank you. Before talking about scaling horizontally it is worth checking performance on a single server. Retwis is *extremely fast*, without any kind of cache. On a very slow and loaded server, an apache benchmark with 100 parallel clients issuing 100000 requests measured the average pageview to take 5 milliseconds. This means you can serve millions of users every day with just a single Linux box, and this one was monkey ass slow... Imagine the results with more recent hardware. +Gentle reader, if you reached this point you are already a hero. Thank you. Before talking about scaling horizontally it is worth checking performance on a single server. Retwis is *extremely fast*, without any kind of cache. On a very slow and loaded server, an Apache benchmark with 100 parallel clients issuing 100000 requests measured the average pageview to take 5 milliseconds. This means you can serve millions of users every day with just a single Linux box, and this one was monkey ass slow... Imagine the results with more recent hardware. However you can't go with a single server forever, how do you scale a key-value store? diff --git a/wordlist b/wordlist index 8d68dbba82..5a84b96f33 100644 --- a/wordlist +++ b/wordlist @@ -141,9 +141,7 @@ allkeys allocator antirez aof -apache appendfsync -applicaiton arity atomicity auth From 20fb124f635dd3e313dbf1b29ea74e9e5a48ab17 Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Thu, 9 Jul 2015 18:53:09 +0200 Subject: [PATCH 0333/2314] Cleanup wordlist --- wordlist | 16 +++------------- 1 file changed, 3 insertions(+), 13 deletions(-) diff --git a/wordlist b/wordlist index 5a84b96f33..dcd3fc8937 100644 --- a/wordlist +++ b/wordlist @@ -57,7 +57,6 @@ Lua MERCHANTABILITY MX MacBook -Macbook Maxmemory Memcached MongoDB @@ -93,9 +92,10 @@ REdis REmote RSS RTT -RaiserFS Redis Redlock +Reiser +ReiserFS Reshard Retwis Runtime @@ -158,7 +158,6 @@ blpop boolean br bt -btw cardinalities cardinality cas @@ -190,10 +189,9 @@ dir diskless dormando earts -eg else's endian -endianess +endianness epoll ethernet everybody's @@ -211,7 +209,6 @@ filesystem firewalled firewalling flw -fo freenode fsync functionalities @@ -220,10 +217,8 @@ geo geohash geopositional geospatial -github globals gmail -hashs hbgc hewowu hgcarr @@ -237,7 +232,6 @@ incrementing indexable indices infeasible -informations init inline internet @@ -250,7 +244,6 @@ jemalloc jpeg js kB -kb keyN keyspace killable @@ -263,7 +256,6 @@ liveness lloogg localhost login -logitude logout lookup lookups @@ -312,7 +304,6 @@ prstat pubsub qsort queueing -radious rc rdb readonly @@ -388,7 +379,6 @@ unreachability unsubscribe unsubscribed unsubscribes -unsubscription untrusted untuned unwatch From 6461c2ab9231dfed0200ac22d77fefa2d6453d42 Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Thu, 9 Jul 2015 18:53:53 +0200 Subject: [PATCH 0334/2314] Multiple spelling fixes --- commands/cluster delslots.md | 2 +- commands/config get.md | 2 +- commands/config set.md | 2 +- commands/geoadd.md | 4 ++-- commands/geoencode.md | 4 ++-- commands/georadius.md | 2 +- commands/pfcount.md | 2 +- topics/benchmarks.md | 2 +- topics/cluster-tutorial.md | 2 +- topics/data-types.md | 2 +- topics/latency.md | 10 +++++----- topics/mass-insert.md | 2 +- topics/pubsub.md | 2 +- topics/sentinel-old.md | 2 +- topics/virtual-memory.md | 2 +- 15 files changed, 21 insertions(+), 21 deletions(-) diff --git a/commands/cluster delslots.md b/commands/cluster delslots.md index 2e5339b7c3..4e888a3f9d 100644 --- a/commands/cluster delslots.md +++ b/commands/cluster delslots.md @@ -7,7 +7,7 @@ forget which master is serving the hash slots specified as arguments. In the context of a node that has received a `DELSLOTS` command and has consequently removed the associations for the passed hash slots, we say those hash slots are *unbound*. Note that the existence of -unbound hashs slots occurs naturally when a node has not been +unbound hash slots occurs naturally when a node has not been configured to handle them (something that can be done with the `ADDSLOTS` command) and if it has not received any information about who owns those hash slots (something that it can learn from heartbeat diff --git a/commands/config get.md b/commands/config get.md index e527089955..c8b7259729 100644 --- a/commands/config get.md +++ b/commands/config get.md @@ -45,7 +45,7 @@ save 300 10 ``` that means, save after 900 seconds if there is at least 1 change to the dataset, -and after 300 seconds if there are at least 10 changes to the datasets, will be +and after 300 seconds if there are at least 10 changes to the dataset, will be reported by `CONFIG GET` as "900 1 300 10". @return diff --git a/commands/config set.md b/commands/config set.md index 85969a7968..42492ae4d3 100644 --- a/commands/config set.md +++ b/commands/config set.md @@ -33,7 +33,7 @@ save 300 10 ``` that means, save after 900 seconds if there is at least 1 change to the dataset, -and after 300 seconds if there are at least 10 changes to the datasets, should +and after 300 seconds if there are at least 10 changes to the dataset, should be set using `CONFIG SET SAVE "900 1 300 10"`. It is possible to switch persistence from RDB snapshotting to append-only file diff --git a/commands/geoadd.md b/commands/geoadd.md index 93f1950542..8c07c97add 100644 --- a/commands/geoadd.md +++ b/commands/geoadd.md @@ -1,4 +1,4 @@ -Adds the specified geospatial items (latitude, logitude, name) to the specified +Adds the specified geospatial items (latitude, longitude, name) to the specified key. Data is stored into the key as a sorted set, in a way that makes it possible to later retrieve items using a query by radius with the `GEORADIUS` or `GEOREDIUSBYMEMBER` commands. @@ -22,7 +22,7 @@ that a sorted set double score can represent a 52 bit integer without losing precision. This format allows for radius querying by checking the 1+8 areas needed -to cover the whole radius, and discarding elements outside the radious. +to cover the whole radius, and discarding elements outside the radius. The areas are checked by calculating the range of the box covered removing enough bits from the less significant part of the sorted set score, and computing the score range to query in the sorted set for each area. diff --git a/commands/geoencode.md b/commands/geoencode.md index a22117204c..3334e32121 100644 --- a/commands/geoencode.md +++ b/commands/geoencode.md @@ -7,9 +7,9 @@ Normally you don't need to use this command, unless you plan to implement low le 1. The longitude and latitude of a point on the Earth surface. 2. Optionally a radius represented by an integer and an unit. -And returns a set of informations, including the representation of the position as a 52 bit integer, the min and max corners of the bounding box represented by the geo hash, the center point in the area covered by the geohash integer, and finally the two sorted set scores to query in order to retrieve all the elements included in the geohash area. +And returns a set of information, including the representation of the position as a 52 bit integer, the min and max corners of the bounding box represented by the geo hash, the center point in the area covered by the geohash integer, and finally the two sorted set scores to query in order to retrieve all the elements included in the geohash area. -The radius optionally provided to the command is used in order to compute the two scores returned by the command fo range query purposes. Moreover the returned geohash integer will only have the most significant bits set, according to the number of bits needed to approximate the specified radius. +The radius optionally provided to the command is used in order to compute the two scores returned by the command for range query purposes. Moreover the returned geohash integer will only have the most significant bits set, according to the number of bits needed to approximate the specified radius. Use case --- diff --git a/commands/georadius.md b/commands/georadius.md index 14dbe4ba9f..5bb11c7c6f 100644 --- a/commands/georadius.md +++ b/commands/georadius.md @@ -29,7 +29,7 @@ By default all the matching items are returned. It is possible to limit the resu * Without any `WITH` option specified, the command just returns a linear array like ["New York","Milan","Paris"]. * If `WITHCOORD`, `WITHDIST` or `WITHHASH` options are specified, the command returns an array of arrays, where each sub-array represents a single item. -When additional information is returned as an array of arrays for each item, the first item in the sub-array is always the name of the returned item. The other informations are returned in the following order as successive elements of the sub-array. +When additional information is returned as an array of arrays for each item, the first item in the sub-array is always the name of the returned item. The other information is returned in the following order as successive elements of the sub-array. 1. The distance from the center as a floating point number, in the same unit specified in the radius. 2. The geohash integer. diff --git a/commands/pfcount.md b/commands/pfcount.md index 967a405787..71d10930c0 100644 --- a/commands/pfcount.md +++ b/commands/pfcount.md @@ -56,6 +56,6 @@ Both representations are prefixed with a 16 bytes header, that includes a magic, The HyperLogLog, being a Redis string, can be retrieved with `GET` and restored with `SET`. Calling `PFADD`, `PFCOUNT` or `PFMERGE` commands with a corrupted HyperLogLog is never a problem, it may return random values but does not affect the stability of the server. Most of the times when corrupting a sparse representation, the server recognizes the corruption and returns an error. -The representation is neutral from the point of view of the processor word size and endianess, so the same representation is used by 32 bit and 64 bit processor, big endian or little endian. +The representation is neutral from the point of view of the processor word size and endianness, so the same representation is used by 32 bit and 64 bit processor, big endian or little endian. More details about the Redis HyperLogLog implementation can be found in [this blog post](http://antirez.com/news/75). The source code of the implementation in the `hyperloglog.c` file is also easy to read and understand, and includes a full specification for the exact encoding used for the sparse and dense representations. diff --git a/topics/benchmarks.md b/topics/benchmarks.md index 33a861ffca..2498e658e0 100644 --- a/topics/benchmarks.md +++ b/topics/benchmarks.md @@ -104,7 +104,7 @@ multiple commands at once, a feature often exploited by real world applications. Redis pipelining is able to dramatically improve the number of operations per second a server is able do deliver. -This is an example of running the benchmark in a Macbook air 11" using a +This is an example of running the benchmark in a MacBook Air 11" using a pipelining of 16 commands: $ redis-benchmark -n 1000000 -t set,get -P 16 -q diff --git a/topics/cluster-tutorial.md b/topics/cluster-tutorial.md index b30d538acd..6fa5d0c67b 100644 --- a/topics/cluster-tutorial.md +++ b/topics/cluster-tutorial.md @@ -98,7 +98,7 @@ to be part of the same hash slot by using a concept called *hash tags*. Hash tags are documented in the Redis Cluster specification, but the gist is that if there is a substring between {} brackets in a key, only what is -inside the string is hashed, so fo example `this{foo}key` and `another{foo}key` +inside the string is hashed, so for example `this{foo}key` and `another{foo}key` are guaranteed to be in the same hash slot, and can be used together in a command with multiple keys as arguments. diff --git a/topics/data-types.md b/topics/data-types.md index 534b2ad06c..f67c225c6b 100644 --- a/topics/data-types.md +++ b/topics/data-types.md @@ -87,7 +87,7 @@ As usually check the [full list of Set commands](/commands#set) for more informa Hashes --- -Redis Hashes are maps between string fields and string values, so they are the perfect data type to represent objects (eg: A User with a number of fields like name, surname, age, and so forth): +Redis Hashes are maps between string fields and string values, so they are the perfect data type to represent objects (e.g. A User with a number of fields like name, surname, age, and so forth): @cli HMSET user:1000 username antirez password P1pp0 age 34 diff --git a/topics/latency.md b/topics/latency.md index 53a07a975c..c421e848e5 100644 --- a/topics/latency.md +++ b/topics/latency.md @@ -216,11 +216,11 @@ Forking is an expensive operation on most Unix-like systems, since it involves copying a good number of objects linked to the process. This is especially true for the page table associated to the virtual memory mechanism. -For instance on a Linux/AMD64 system, the memory is divided in 4 KB pages. +For instance on a Linux/AMD64 system, the memory is divided in 4 kB pages. To convert virtual addresses to physical addresses, each process stores a page table (actually represented as a tree) containing at least a pointer per page of the address space of the process. So a large 24 GB Redis instance -requires a page table of 24 GB / 4 KB * 8 = 48 MB. +requires a page table of 24 GB / 4 kB * 8 = 48 MB. When a background save is performed, this instance will have to be forked, which will involve allocating and copying 48 MB of memory. It takes time @@ -349,7 +349,7 @@ to do is to grep for the Swap field across all the file: Swap: 0 kB Swap: 0 kB -If everything is 0 kb, or if there are sporadic 4k entries, everything is +If everything is 0 kB, or if there are sporadic 4k entries, everything is perfectly normal. Actually in our example instance (the one of a real web site running Redis and serving hundreds of users every second) there are a few entries that show more swapped pages. To investigate if this is a serious @@ -419,7 +419,7 @@ memory map: Swap: 0 kB As you can see from the output, there is a map of 720896 kB -(with just 12 kB swapped) and 156 kb more swapped in another map: +(with just 12 kB swapped) and 156 kB more swapped in another map: basically a very small amount of our memory is swapped so this is not going to create any problem at all. @@ -680,7 +680,7 @@ Unfortunately, and on top of the extra operational complexity, there is also a significant drawback of running Redis with huge pages. The COW mechanism granularity is the page. With 2 MB pages, the probability a page is modified during a background -save operation is 512 times higher than with 4 KB pages. The actual +save operation is 512 times higher than with 4 kB pages. The actual memory required for a background save therefore increases a lot, especially if the write traffic is truly random, with poor locality. With huge pages, using twice the memory while saving is not anymore diff --git a/topics/mass-insert.md b/topics/mass-insert.md index 6a7fa9c1c5..25a2b0d7ff 100644 --- a/topics/mass-insert.md +++ b/topics/mass-insert.md @@ -41,7 +41,7 @@ as fast as possible. In the past the way to do this was to use the However this is not a very reliable way to perform mass import because netcat does not really know when all the data was transferred and can't check for -errors. In the unstable branch of Redis at github the `redis-cli` utility +errors. In the unstable branch of Redis at GitHub the `redis-cli` utility supports a new mode called **pipe mode** that was designed in order to perform mass insertion. diff --git a/topics/pubsub.md b/topics/pubsub.md index 5a0bfb1e16..27c0f6cfe2 100644 --- a/topics/pubsub.md +++ b/topics/pubsub.md @@ -161,7 +161,7 @@ message types, the last argument is the count of subscriptions still active. This number is actually the total number of channels and patterns the client is still subscribed to. So the client will exit the Pub/Sub state only when this count drops to zero as a result of -unsubscription from all the channels and patterns. +unsubscribing from all the channels and patterns. ## Programming example diff --git a/topics/sentinel-old.md b/topics/sentinel-old.md index f56fcec5ac..46c8178c91 100644 --- a/topics/sentinel-old.md +++ b/topics/sentinel-old.md @@ -27,7 +27,7 @@ Redis Sentinel is compatible with Redis 2.4.16 or greater, and redis 2.6.0-rc6 o Obtaining Sentinel --- -Currently Sentinel is part of the Redis *unstable* branch at github. +Currently Sentinel is part of the Redis *unstable* branch at GitHub. To compile it you need to clone the *unstable* branch and compile Redis. You'll see a `redis-sentinel` executable in your `src` directory. diff --git a/topics/virtual-memory.md b/topics/virtual-memory.md index 5dd69f9edb..6a3fd54d89 100644 --- a/topics/virtual-memory.md +++ b/topics/virtual-memory.md @@ -170,7 +170,7 @@ The recommendation is to use Linux ext3 file system, or any other file system with good support for *sparse files*. What are sparse files? Sparse files are files where a lot of the content happens to be empty. Advanced -file systems like ext2, ext3, ext4, RaiserFS, Raiser4, and many others, are +file systems like ext2, ext3, ext4, ReiserFS, Reiser4, and many others, are able to encode these files in a more efficient way and will allocate more space for the file when needed, that is, when more actual blocks of the file will be used. From 295c9d51b9b33589fb2643ea0cbd049fbd331558 Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Thu, 9 Jul 2015 18:59:30 +0200 Subject: [PATCH 0335/2314] Typos in geopos --- commands/geopos.md | 6 +++--- wordlist | 1 + 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/commands/geopos.md b/commands/geopos.md index f0edbf74a2..19dd377f1c 100644 --- a/commands/geopos.md +++ b/commands/geopos.md @@ -1,8 +1,8 @@ -Return the positions (longitude,latitude) of all the speciied members of the geospatial index represented by the sorted set at *key*. +Return the positions (longitude,latitude) of all the specified members of the geospatial index represented by the sorted set at *key*. -Given a sorted set representing a geospatial index, populated using the `GEOADD` command, it is ofen useful to obtain back the coordinates of specified members. When the geospatial index is populated via `GEOADD` the coordinates are converted into a 52 bit geohash, so the coordinates returned may not be exactly the ones used in order to add the elements, but small errors may be introduced. +Given a sorted set representing a geospatial index, populated using the `GEOADD` command, it is often useful to obtain back the coordinates of specified members. When the geospatial index is populated via `GEOADD` the coordinates are converted into a 52 bit geohash, so the coordinates returned may not be exactly the ones used in order to add the elements, but small errors may be introduced. -The command can accept a variable number of arguments so it always returns an array of positions even when a signle element is specified. +The command can accept a variable number of arguments so it always returns an array of positions even when a single element is specified. @return diff --git a/wordlist b/wordlist index dcd3fc8937..1e16e641fc 100644 --- a/wordlist +++ b/wordlist @@ -30,6 +30,7 @@ GETs GHz GPG Gbit +Geohashes Geospatial GitHub Google From 41e639081ba9b81bcd7c40117943d38806e6ad0c Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Thu, 9 Jul 2015 19:03:41 +0200 Subject: [PATCH 0336/2314] Yes, it is a word --- wordlist | 1 + 1 file changed, 1 insertion(+) diff --git a/wordlist b/wordlist index 1e16e641fc..ce2dddee1a 100644 --- a/wordlist +++ b/wordlist @@ -380,6 +380,7 @@ unreachability unsubscribe unsubscribed unsubscribes +unsubscribing untrusted untuned unwatch From 4fe52125e0950804994686d03fdd933efaa842b5 Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Thu, 9 Jul 2015 19:09:39 +0200 Subject: [PATCH 0337/2314] Use new container-based travis --- .travis.yml | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/.travis.yml b/.travis.yml index 243168dfbb..2ddb297c82 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,10 +1,13 @@ language: ruby +sudo: false +addons: + apt: + packages: + - aspell + - aspell-en rvm: - 1.9.3 gemfile: - .travis/Gemfile - -before_install: - - sudo apt-get install -y aspell aspell-en From 420adad8e9475ffc1c34569bb5968e049c4926e4 Mon Sep 17 00:00:00 2001 From: Eliot Shepard Date: Thu, 9 Jul 2015 14:09:54 -0400 Subject: [PATCH 0338/2314] Corrected "longitude" for "latitude", misc english usage --- commands/geoadd.md | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/commands/geoadd.md b/commands/geoadd.md index 470c4811cb..0665596bfc 100644 --- a/commands/geoadd.md +++ b/commands/geoadd.md @@ -1,18 +1,17 @@ Adds the specified geospatial items (latitude, longitude, name) to the specified -key. Data is stored into the key as a sorted set, in a way that makes it possible to later retrieve items using a query by radius with the `GEORADIUS` or -`GEORADIUSBYMEMBER` commands. +key. Data is stored into the key as a sorted set, in a way that makes it possible to later retrieve items using a query by radius with the `GEORADIUS` or `GEORADIUSBYMEMBER` commands. The command takes arguments in the standard format x,y so the longitude must be specified before the latitude. There are limits to the coordinates that can be indexed: areas very near to the poles are not indexable. The exact limits, as specified by EPSG:900913 / EPSG:3785 / OSGEO:41001 are the following: -* Valid latitudes are from -180 to 180 degrees. +* Valid longitudes are from -180 to 180 degrees. * Valid latitudes are from -85.05112878 to 85.05112878 degreees. The command will report an error when the user attempts to index coordinates outside the specified ranges. -How it works? +How does it work? --- The way the sorted set is populated is using a technique called @@ -27,7 +26,7 @@ The areas are checked by calculating the range of the box covered removing enough bits from the less significant part of the sorted set score, and computing the score range to query in the sorted set for each area. -What Earth model it uses? +What Earth model does it use? --- It just assumes that the Earth is a sphere, since the used distance formula @@ -39,7 +38,7 @@ and most other applications. However in th worst case the error may be up to @integer-reply, specifically: -* The number of elements added to the sorted sets, not including elements +* The number of elements added to the sorted set, not including elements already existing for which the score was updated. @examples From 5497129f087841a95f74b0afca29061f510c1667 Mon Sep 17 00:00:00 2001 From: Damian Janowski Date: Fri, 10 Jul 2015 11:36:53 -0300 Subject: [PATCH 0339/2314] Use Make for spell checking. --- .gems | 2 + .travis.yml | 4 +- .travis/Gemfile | 2 +- README.md | 27 ++++------ Rakefile | 49 ++----------------- bin/text | 39 +++++++++++++++ .../{client getname.md => client-getname.md} | 0 commands/{client kill.md => client-kill.md} | 0 commands/{client list.md => client-list.md} | 0 commands/{client pause.md => client-pause.md} | 0 .../{client setname.md => client-setname.md} | 0 ...luster addslots.md => cluster-addslots.md} | 0 ...ts.md => cluster-count-failure-reports.md} | 0 ...ysinslot.md => cluster-countkeysinslot.md} | 0 ...luster delslots.md => cluster-delslots.md} | 0 ...luster failover.md => cluster-failover.md} | 0 .../{cluster forget.md => cluster-forget.md} | 0 ...keysinslot.md => cluster-getkeysinslot.md} | 0 commands/{cluster info.md => cluster-info.md} | 0 ...{cluster keyslot.md => cluster-keyslot.md} | 0 commands/{cluster meet.md => cluster-meet.md} | 0 .../{cluster nodes.md => cluster-nodes.md} | 0 ...ster replicate.md => cluster-replicate.md} | 0 .../{cluster reset.md => cluster-reset.md} | 0 ...er saveconfig.md => cluster-saveconfig.md} | 0 ...g-epoch.md => cluster-set-config-epoch.md} | 0 ...{cluster setslot.md => cluster-setslot.md} | 0 .../{cluster slaves.md => cluster-slaves.md} | 0 .../{cluster slots.md => cluster-slots.md} | 0 .../{command count.md => command-count.md} | 0 ...{command getkeys.md => command-getkeys.md} | 0 commands/{command info.md => command-info.md} | 0 commands/{config get.md => config-get.md} | 0 ...onfig resetstat.md => config-resetstat.md} | 0 .../{config rewrite.md => config-rewrite.md} | 0 commands/{config set.md => config-set.md} | 0 commands/{debug object.md => debug-object.md} | 0 .../{debug segfault.md => debug-segfault.md} | 0 commands/geohash.md | 2 +- .../{script exists.md => script-exists.md} | 0 commands/{script flush.md => script-flush.md} | 0 commands/{script kill.md => script-kill.md} | 0 commands/{script load.md => script-load.md} | 0 makefile | 35 +++++++++++-- 44 files changed, 90 insertions(+), 70 deletions(-) create mode 100644 .gems create mode 100755 bin/text rename commands/{client getname.md => client-getname.md} (100%) rename commands/{client kill.md => client-kill.md} (100%) rename commands/{client list.md => client-list.md} (100%) rename commands/{client pause.md => client-pause.md} (100%) rename commands/{client setname.md => client-setname.md} (100%) rename commands/{cluster addslots.md => cluster-addslots.md} (100%) rename commands/{cluster count-failure-reports.md => cluster-count-failure-reports.md} (100%) rename commands/{cluster countkeysinslot.md => cluster-countkeysinslot.md} (100%) rename commands/{cluster delslots.md => cluster-delslots.md} (100%) rename commands/{cluster failover.md => cluster-failover.md} (100%) rename commands/{cluster forget.md => cluster-forget.md} (100%) rename commands/{cluster getkeysinslot.md => cluster-getkeysinslot.md} (100%) rename commands/{cluster info.md => cluster-info.md} (100%) rename commands/{cluster keyslot.md => cluster-keyslot.md} (100%) rename commands/{cluster meet.md => cluster-meet.md} (100%) rename commands/{cluster nodes.md => cluster-nodes.md} (100%) rename commands/{cluster replicate.md => cluster-replicate.md} (100%) rename commands/{cluster reset.md => cluster-reset.md} (100%) rename commands/{cluster saveconfig.md => cluster-saveconfig.md} (100%) rename commands/{cluster set-config-epoch.md => cluster-set-config-epoch.md} (100%) rename commands/{cluster setslot.md => cluster-setslot.md} (100%) rename commands/{cluster slaves.md => cluster-slaves.md} (100%) rename commands/{cluster slots.md => cluster-slots.md} (100%) rename commands/{command count.md => command-count.md} (100%) rename commands/{command getkeys.md => command-getkeys.md} (100%) rename commands/{command info.md => command-info.md} (100%) rename commands/{config get.md => config-get.md} (100%) rename commands/{config resetstat.md => config-resetstat.md} (100%) rename commands/{config rewrite.md => config-rewrite.md} (100%) rename commands/{config set.md => config-set.md} (100%) rename commands/{debug object.md => debug-object.md} (100%) rename commands/{debug segfault.md => debug-segfault.md} (100%) rename commands/{script exists.md => script-exists.md} (100%) rename commands/{script flush.md => script-flush.md} (100%) rename commands/{script kill.md => script-kill.md} (100%) rename commands/{script load.md => script-load.md} (100%) diff --git a/.gems b/.gems new file mode 100644 index 0000000000..6f5dc2c08e --- /dev/null +++ b/.gems @@ -0,0 +1,2 @@ +batch -v 1.0.4 +redcarpet -v 3.3.2 diff --git a/.travis.yml b/.travis.yml index 2ddb297c82..0d587a6957 100644 --- a/.travis.yml +++ b/.travis.yml @@ -7,7 +7,9 @@ addons: - aspell-en rvm: - - 1.9.3 + - 2.2 + +script: make gemfile: - .travis/Gemfile diff --git a/.travis/Gemfile b/.travis/Gemfile index 6c9643ddf3..5691697595 100644 --- a/.travis/Gemfile +++ b/.travis/Gemfile @@ -2,4 +2,4 @@ source "https://rubygems.org" gem "rake" gem "batch" -gem "rdiscount" +gem "redcarpet" diff --git a/README.md b/README.md index d51fb29f99..550d5f76c7 100644 --- a/README.md +++ b/README.md @@ -103,30 +103,21 @@ sudo apt-get install par ## Checking your work -Once you're done, the very least you should do is make sure that all files -compile properly. -You can do this by running Rake inside your working directory. +You should check your changes using Make: ``` -$ rake parse +$ make ``` -The parse task has the following dependencies: +This will make sure that JSON and Markdown files compile and that all +text files have no typos. -* batch -* rdiscount +You need to install a few Ruby gems and [Aspell][han] to run these checks. +The gems are listed in the `.gems` file. Install them with the +following command: ``` -gem install batch rdiscount +$ gem install $(sed -e 's/ -v /:/' .gems) ``` -Additionally, if you have [Aspell][han] installed, you can spell check the -documentation: - -[han]: http://aspell.net/ - -``` -$ rake spellcheck -``` - -Exceptions can be added to `./wordlist`. +The spell checking exceptions should be added to `./wordlist`. diff --git a/Rakefile b/Rakefile index 293a723b55..44e2faaa8b 100644 --- a/Rakefile +++ b/Rakefile @@ -1,55 +1,12 @@ -task :default => [:parse, :spellcheck] +task :default => [:parse] task :parse do require "json" require "batch" - require "rdiscount" - Batch.each(Dir["**/*.json"] + Dir["**/*.md"]) do |file| - if File.extname(file) == ".md" - RDiscount.new(File.read(file)).to_html - else - JSON.parse(File.read(file)) - end - end -end - -task :spellcheck do - require "json" - - `mkdir -p tmp` - - IO.popen("aspell --lang=en create master ./tmp/dict", "w") do |io| - words = JSON.parse(File.read("commands.json")). - keys. - map { |str| str.split(/[ -]/) }. - flatten(1) - - io.puts(words.join("\n")) - io.puts(File.read("wordlist")) + Batch.each(Dir["**/*.json"]) do |file| + JSON.parse(File.read(file)) end - - errors = false - - Dir["**/*.md"].each do |file| - command = %q{ - ruby -pe 'gsub /^ .*$/, ""' | - ruby -pe 'gsub /`[^`]+`/, ""' | - ruby -e 'puts $stdin.read.gsub(/\[([^\]]+)\]\(([^\)]+)\)/m, "\\1").gsub(/^```.*```/m, "")' | - aspell --lang=en -H -a --extra-dicts=./tmp/dict 2>/dev/null - } - - words = `cat '#{file}' | #{command}`.lines.map do |line| - line[/^& ([^ ]+)/, 1] - end.compact - - if words.size > 0 - errors = true - puts("#{file}: #{words.uniq.sort.join(" ")}") - end - end - - abort("Spelling errors found.") if errors end namespace :format do diff --git a/bin/text b/bin/text new file mode 100755 index 0000000000..ca25fa4f60 --- /dev/null +++ b/bin/text @@ -0,0 +1,39 @@ +#!/usr/bin/env ruby + +require "redcarpet" +require "redcarpet/render_strip" + +class Renderer < Redcarpet::Render::StripDown + def link(link, title, content) + content + end + + def image(link, title, content) + content + end + + def block_code(*args) + "" + end + + def codespan(*args) + "" + end + + def block_html(*args) + "" + end + + def raw_html(*args) + "" + end +end + +engine = Redcarpet::Markdown.new( + Renderer.new, + no_intra_emphasis: true, + fenced_code_blocks: true, + superscript: true +) + +puts(engine.render(File.read(ARGV[0]))) diff --git a/commands/client getname.md b/commands/client-getname.md similarity index 100% rename from commands/client getname.md rename to commands/client-getname.md diff --git a/commands/client kill.md b/commands/client-kill.md similarity index 100% rename from commands/client kill.md rename to commands/client-kill.md diff --git a/commands/client list.md b/commands/client-list.md similarity index 100% rename from commands/client list.md rename to commands/client-list.md diff --git a/commands/client pause.md b/commands/client-pause.md similarity index 100% rename from commands/client pause.md rename to commands/client-pause.md diff --git a/commands/client setname.md b/commands/client-setname.md similarity index 100% rename from commands/client setname.md rename to commands/client-setname.md diff --git a/commands/cluster addslots.md b/commands/cluster-addslots.md similarity index 100% rename from commands/cluster addslots.md rename to commands/cluster-addslots.md diff --git a/commands/cluster count-failure-reports.md b/commands/cluster-count-failure-reports.md similarity index 100% rename from commands/cluster count-failure-reports.md rename to commands/cluster-count-failure-reports.md diff --git a/commands/cluster countkeysinslot.md b/commands/cluster-countkeysinslot.md similarity index 100% rename from commands/cluster countkeysinslot.md rename to commands/cluster-countkeysinslot.md diff --git a/commands/cluster delslots.md b/commands/cluster-delslots.md similarity index 100% rename from commands/cluster delslots.md rename to commands/cluster-delslots.md diff --git a/commands/cluster failover.md b/commands/cluster-failover.md similarity index 100% rename from commands/cluster failover.md rename to commands/cluster-failover.md diff --git a/commands/cluster forget.md b/commands/cluster-forget.md similarity index 100% rename from commands/cluster forget.md rename to commands/cluster-forget.md diff --git a/commands/cluster getkeysinslot.md b/commands/cluster-getkeysinslot.md similarity index 100% rename from commands/cluster getkeysinslot.md rename to commands/cluster-getkeysinslot.md diff --git a/commands/cluster info.md b/commands/cluster-info.md similarity index 100% rename from commands/cluster info.md rename to commands/cluster-info.md diff --git a/commands/cluster keyslot.md b/commands/cluster-keyslot.md similarity index 100% rename from commands/cluster keyslot.md rename to commands/cluster-keyslot.md diff --git a/commands/cluster meet.md b/commands/cluster-meet.md similarity index 100% rename from commands/cluster meet.md rename to commands/cluster-meet.md diff --git a/commands/cluster nodes.md b/commands/cluster-nodes.md similarity index 100% rename from commands/cluster nodes.md rename to commands/cluster-nodes.md diff --git a/commands/cluster replicate.md b/commands/cluster-replicate.md similarity index 100% rename from commands/cluster replicate.md rename to commands/cluster-replicate.md diff --git a/commands/cluster reset.md b/commands/cluster-reset.md similarity index 100% rename from commands/cluster reset.md rename to commands/cluster-reset.md diff --git a/commands/cluster saveconfig.md b/commands/cluster-saveconfig.md similarity index 100% rename from commands/cluster saveconfig.md rename to commands/cluster-saveconfig.md diff --git a/commands/cluster set-config-epoch.md b/commands/cluster-set-config-epoch.md similarity index 100% rename from commands/cluster set-config-epoch.md rename to commands/cluster-set-config-epoch.md diff --git a/commands/cluster setslot.md b/commands/cluster-setslot.md similarity index 100% rename from commands/cluster setslot.md rename to commands/cluster-setslot.md diff --git a/commands/cluster slaves.md b/commands/cluster-slaves.md similarity index 100% rename from commands/cluster slaves.md rename to commands/cluster-slaves.md diff --git a/commands/cluster slots.md b/commands/cluster-slots.md similarity index 100% rename from commands/cluster slots.md rename to commands/cluster-slots.md diff --git a/commands/command count.md b/commands/command-count.md similarity index 100% rename from commands/command count.md rename to commands/command-count.md diff --git a/commands/command getkeys.md b/commands/command-getkeys.md similarity index 100% rename from commands/command getkeys.md rename to commands/command-getkeys.md diff --git a/commands/command info.md b/commands/command-info.md similarity index 100% rename from commands/command info.md rename to commands/command-info.md diff --git a/commands/config get.md b/commands/config-get.md similarity index 100% rename from commands/config get.md rename to commands/config-get.md diff --git a/commands/config resetstat.md b/commands/config-resetstat.md similarity index 100% rename from commands/config resetstat.md rename to commands/config-resetstat.md diff --git a/commands/config rewrite.md b/commands/config-rewrite.md similarity index 100% rename from commands/config rewrite.md rename to commands/config-rewrite.md diff --git a/commands/config set.md b/commands/config-set.md similarity index 100% rename from commands/config set.md rename to commands/config-set.md diff --git a/commands/debug object.md b/commands/debug-object.md similarity index 100% rename from commands/debug object.md rename to commands/debug-object.md diff --git a/commands/debug segfault.md b/commands/debug-segfault.md similarity index 100% rename from commands/debug segfault.md rename to commands/debug-segfault.md diff --git a/commands/geohash.md b/commands/geohash.md index 2517c3f28d..5098609a10 100644 --- a/commands/geohash.md +++ b/commands/geohash.md @@ -4,7 +4,7 @@ Normally Redis represents positions of elements using a variation of the Geohash technique where positions are encoded using 52 bit integers. The encoding is also different compared to the standard because the initial min and max coordinates used during the encoding and decoding process are different. This -command however **returns a standard Geohash** in the form of a string as +command however **returns a standd Geohash** in the form of a string as described in the [Wikipedia article](https://en.wikipedia.org/wiki/Geohash) and compatible with the [geohash.org](http://geohash.org) web site. Geohash string properties diff --git a/commands/script exists.md b/commands/script-exists.md similarity index 100% rename from commands/script exists.md rename to commands/script-exists.md diff --git a/commands/script flush.md b/commands/script-flush.md similarity index 100% rename from commands/script flush.md rename to commands/script-flush.md diff --git a/commands/script kill.md b/commands/script-kill.md similarity index 100% rename from commands/script kill.md rename to commands/script-kill.md diff --git a/commands/script load.md b/commands/script-load.md similarity index 100% rename from commands/script load.md rename to commands/script-load.md diff --git a/makefile b/makefile index 5f1697004d..71aa80c94f 100644 --- a/makefile +++ b/makefile @@ -1,4 +1,33 @@ -clients: .PHONY - ruby -rjson -r./utils/clients -e 'Clients.check(JSON.parse(File.read("clients.json"), symbolize_names: true))' +MD_FILES:=$(shell find {commands,topics} -name '*.md') +TEXT_FILES:=$(patsubst %.md,tmp/%.txt,$(MD_FILES)) +SPELL_FILES:=$(patsubst %.txt,%.spell,$(TEXT_FILES)) -.PHONY: +spell: tmp/commands tmp/topics $(SPELL_FILES) + find tmp -name '*.spell' | xargs cat > tmp/all.spell + cat tmp/all.spell + test -s tmp/all.spell && exit 1 + +$(TEXT_FILES): tmp/%.txt: %.md + ./bin/text $< > $@ + +$(SPELL_FILES): %.spell: %.txt tmp/dict + aspell -a --extra-dicts=./tmp/dict 2>/dev/null < $< | \ + awk -v FILE=$(patsubst tmp/%.spell,%.md,$@) '/^\&/ { print FILE, $$2 }' | \ + sort -f | uniq > $@ + +tmp/commands: + mkdir -p tmp/commands + +tmp/topics: + mkdir -p tmp/topics + +tmp/commands.txt: commands.json + ruby -rjson -e 'puts JSON.parse(File.read("$<")).keys.map { |str| str.split(/[ -]/) }.flatten(1)' > $@ + +tmp/dict: wordlist tmp/commands.txt + cat $^ | aspell --lang=en create master ./$@ + +clean: + rm -rf tmp/* + +.PHONY: clean From 024b29705db3d8d178b78b67878c9aedf2cd6093 Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Fri, 10 Jul 2015 17:53:08 +0200 Subject: [PATCH 0340/2314] Correct SoundCloud --- topics/sentinel.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/sentinel.md b/topics/sentinel.md index fbecd42631..2cee8d3228 100644 --- a/topics/sentinel.md +++ b/topics/sentinel.md @@ -1073,7 +1073,7 @@ replication and the discarding nature of the "virtual" merge function of the sys 1. Use synchronous replication (and a proper consensus algorithm to run a replicated state machine). 2. Use an eventually consistent system where different versions of the same object can be merged. -Redis currently is not able to use any of the above systems, and is currently outside the development goals. However there are proxies implementing solution "2" on top of Redis stores such as SoundClound [Roshi](https://github.com/soundcloud/roshi), or Netflix [Dynomite](https://github.com/Netflix/dynomite). +Redis currently is not able to use any of the above systems, and is currently outside the development goals. However there are proxies implementing solution "2" on top of Redis stores such as SoundCloud [Roshi](https://github.com/soundcloud/roshi), or Netflix [Dynomite](https://github.com/Netflix/dynomite). Sentinel persistent state --- From 27d006981c7805615596c8044aa0c236be5fee84 Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Fri, 10 Jul 2015 18:00:55 +0200 Subject: [PATCH 0341/2314] Correct it in the wordlist as well --- wordlist | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/wordlist b/wordlist index ce2dddee1a..5d2b70cfd3 100644 --- a/wordlist +++ b/wordlist @@ -116,7 +116,7 @@ SSL Sanfilippo SmartOS Solaris -SoundClound +SoundCloud SystemTimer TCP TLB From 6742e2e2f3e8200bb11a2c9e9c85eccfc1f1ebd8 Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Fri, 10 Jul 2015 18:02:14 +0200 Subject: [PATCH 0342/2314] Corrected wordlist --- wordlist | 32 -------------------------------- 1 file changed, 32 deletions(-) diff --git a/wordlist b/wordlist index ce2dddee1a..760d39ad8b 100644 --- a/wordlist +++ b/wordlist @@ -76,7 +76,6 @@ Noordhuis ODOWN OOM OSGEO -Ok Opteron PHP PINGs @@ -157,12 +156,9 @@ blazingly blog blpop boolean -br -bt cardinalities cardinality cas -cb cgroups changelogs checksum @@ -176,7 +172,6 @@ cpu cron dataset datasets -de decrement decrementing denyoom @@ -204,25 +199,18 @@ failover failovers fanout fdatasync -fea -fermurb filesystem firewalled firewalling -flw freenode fsync functionalities -gb geo geohash geopositional geospatial globals gmail -hbgc -hewowu -hgcarr hiredis hostname htop @@ -236,14 +224,11 @@ infeasible init inline internet -io iojob iostat -ip ists jemalloc jpeg -js kB keyN keyspace @@ -284,9 +269,7 @@ newjobs noeviction noscript numactl -ok online -ot overcommit pades pageview @@ -305,7 +288,6 @@ prstat pubsub qsort queueing -rc rdb readonly realtime @@ -334,15 +316,12 @@ rss runid runtime scalable -se semantical sharding -si sismember slowlog smaps snapshotting -src startup strace subcommand @@ -353,22 +332,14 @@ swappability syscall taskset tcmalloc -tdtl -tdts -tdtss techstacks -th timeline timestamp -tp tpaof tradeoff tradeoffs transactional trib -tt -ttc -ttl tuple tuples unary @@ -396,9 +367,6 @@ versioned versioning virtualization virtualized -vm vmstat vtype -wikifs wildcards -xff From c0c8b60bac94353fc16adea005cefa0504b15dee Mon Sep 17 00:00:00 2001 From: Damian Janowski Date: Fri, 10 Jul 2015 14:46:05 -0300 Subject: [PATCH 0343/2314] Fix Gemfile. --- .travis/Gemfile.lock | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/.travis/Gemfile.lock b/.travis/Gemfile.lock index 6b833d174d..e0b553c524 100644 --- a/.travis/Gemfile.lock +++ b/.travis/Gemfile.lock @@ -3,7 +3,7 @@ GEM specs: batch (0.0.3) rake (0.9.2.2) - rdiscount (1.6.8) + redcarpet (3.3.2) PLATFORMS ruby @@ -11,4 +11,7 @@ PLATFORMS DEPENDENCIES batch rake - rdiscount + redcarpet + +BUNDLED WITH + 1.10.5 From bf6220b3ebc8a0286236bcc3bd71b6cf5637acd3 Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Fri, 10 Jul 2015 18:16:22 +0200 Subject: [PATCH 0344/2314] Don't use shell globbing --- makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/makefile b/makefile index 71aa80c94f..b5381ce6e6 100644 --- a/makefile +++ b/makefile @@ -1,4 +1,4 @@ -MD_FILES:=$(shell find {commands,topics} -name '*.md') +MD_FILES:=$(shell find commands topics -name '*.md') TEXT_FILES:=$(patsubst %.md,tmp/%.txt,$(MD_FILES)) SPELL_FILES:=$(patsubst %.txt,%.spell,$(TEXT_FILES)) From 4ebfc76c295a095dfea35bb63ed2f67f763fb3a7 Mon Sep 17 00:00:00 2001 From: Damian Janowski Date: Fri, 10 Jul 2015 14:51:19 -0300 Subject: [PATCH 0345/2314] Succeed when no spelling errors are found. --- makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/makefile b/makefile index b5381ce6e6..bd480bc2c8 100644 --- a/makefile +++ b/makefile @@ -5,7 +5,7 @@ SPELL_FILES:=$(patsubst %.txt,%.spell,$(TEXT_FILES)) spell: tmp/commands tmp/topics $(SPELL_FILES) find tmp -name '*.spell' | xargs cat > tmp/all.spell cat tmp/all.spell - test -s tmp/all.spell && exit 1 + test ! -s tmp/all.spell $(TEXT_FILES): tmp/%.txt: %.md ./bin/text $< > $@ From b1a45b37fe202ff103dad2cd039572ba7b8a6abb Mon Sep 17 00:00:00 2001 From: Damian Janowski Date: Fri, 10 Jul 2015 14:52:51 -0300 Subject: [PATCH 0346/2314] Use a different name for the combined errors file. --- makefile | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/makefile b/makefile index bd480bc2c8..1c8ffcee4d 100644 --- a/makefile +++ b/makefile @@ -3,9 +3,9 @@ TEXT_FILES:=$(patsubst %.md,tmp/%.txt,$(MD_FILES)) SPELL_FILES:=$(patsubst %.txt,%.spell,$(TEXT_FILES)) spell: tmp/commands tmp/topics $(SPELL_FILES) - find tmp -name '*.spell' | xargs cat > tmp/all.spell - cat tmp/all.spell - test ! -s tmp/all.spell + find tmp -name '*.spell' | xargs cat > tmp/spelling-errors + cat tmp/spelling-errors + test ! -s tmp/spelling-errors $(TEXT_FILES): tmp/%.txt: %.md ./bin/text $< > $@ From 435859c37c9cc9d2d84fcd625f97e49d4221324f Mon Sep 17 00:00:00 2001 From: Damian Janowski Date: Fri, 10 Jul 2015 15:21:47 -0300 Subject: [PATCH 0347/2314] More spelling and formatting fixes. --- commands/bitop.md | 8 ++++---- commands/bitpos.md | 2 +- commands/blpop.md | 2 +- commands/client-kill.md | 2 +- commands/client-list.md | 40 +++++++++++++++++++-------------------- commands/cluster-meet.md | 6 +++--- commands/cluster-nodes.md | 22 ++++++++++++--------- commands/config-get.md | 2 +- commands/eval.md | 20 ++++++++++---------- wordlist | 2 ++ 10 files changed, 56 insertions(+), 50 deletions(-) diff --git a/commands/bitop.md b/commands/bitop.md index 53b2638cf0..1f16dc88f6 100644 --- a/commands/bitop.md +++ b/commands/bitop.md @@ -4,10 +4,10 @@ store the result in the destination key. The `BITOP` command supports four bitwise operations: **AND**, **OR**, **XOR** and **NOT**, thus the valid forms to call the command are: -* BITOP AND _destkey srckey1 srckey2 srckey3 ... srckeyN_ -* BITOP OR _destkey srckey1 srckey2 srckey3 ... srckeyN_ -* BITOP XOR _destkey srckey1 srckey2 srckey3 ... srckeyN_ -* BITOP NOT _destkey srckey_ +* `BITOP AND _destkey srckey1 srckey2 srckey3 ... srckeyN_` +* `BITOP OR _destkey srckey1 srckey2 srckey3 ... srckeyN_` +* `BITOP XOR _destkey srckey1 srckey2 srckey3 ... srckeyN_` +* `BITOP NOT _destkey srckey_` As you can see **NOT** is special as it only takes an input key, because it performs inversion of bits so it only makes sense as an unary operator. diff --git a/commands/bitpos.md b/commands/bitpos.md index 85f406e5b0..ad645a09f6 100644 --- a/commands/bitpos.md +++ b/commands/bitpos.md @@ -25,7 +25,7 @@ The command returns the position of the first bit set to 1 or 0 according to the If we look for set bits (the bit argument is 1) and the string is empty or composed of just zero bytes, -1 is returned. -If we look for clear bits (the bit argument is 0) and the string only contains bit set to 1, the function returns the first bit not part of the string on the right. So if the string is three bytes set to the value 0xff the command `BITPOS key 0` will return 24, since up to bit 23 all the bits are 1. +If we look for clear bits (the bit argument is 0) and the string only contains bit set to 1, the function returns the first bit not part of the string on the right. So if the string is three bytes set to the value `0xff` the command `BITPOS key 0` will return 24, since up to bit 23 all the bits are 1. Basically, the function considers the right of the string as padded with zeros if you look for clear bits and specify no range or the _start_ argument **only**. diff --git a/commands/blpop.md b/commands/blpop.md index e20ae5f583..b0777f9e2f 100644 --- a/commands/blpop.md +++ b/commands/blpop.md @@ -63,7 +63,7 @@ If the above condition happens using a Redis 2.6 server or greater, Client **A** Instead Redis 2.4 works in a different way: clients are served *in the context* of the push operation, so as long as `LPUSH foo a b c` starts pushing the first element to the list, it will be delivered to the Client **A**, that will receive `a` (the first element pushed). -The behavior of Redis 2.4 creates a lot of problems when replicating or persisting data into the AOF file, so the much more generic and semantically simpler behaviour was introduced into Redis 2.6 to prevent problems. +The behavior of Redis 2.4 creates a lot of problems when replicating or persisting data into the AOF file, so the much more generic and semantically simpler behavior was introduced into Redis 2.6 to prevent problems. Note that for the same reason a Lua script or a `MULTI/EXEC` block may push elements into a list and afterward **delete the list**. In this case the blocked clients will not be served at all and will continue to be blocked as long as no data is present on the list after the execution of a single command, transaction, or script. diff --git a/commands/client-kill.md b/commands/client-kill.md index 6e6290fc43..72d8470ffb 100644 --- a/commands/client-kill.md +++ b/commands/client-kill.md @@ -2,7 +2,7 @@ The `CLIENT KILL` command closes a given client connection. Up to Redis 2.8.11 i CLIENT KILL addr:port -The ip:port should match a line returned by the `CLIENT LIST` command (`addr` field). +The `ip:port` should match a line returned by the `CLIENT LIST` command (`addr` field). However starting with Redis 2.8.12 or greater, the command accepts the following form: diff --git a/commands/client-list.md b/commands/client-list.md index b507666cb6..4279cf3c9a 100644 --- a/commands/client-list.md +++ b/commands/client-list.md @@ -5,29 +5,29 @@ connections server in a mostly human readable format. @bulk-string-reply: a unique string, formatted as follows: -* One client connection per line (separated by LF) -* Each line is composed of a succession of property=value fields separated - by a space character. +* One client connection per line (separated by LF) +* Each line is composed of a succession of `property=value` fields separated + by a space character. Here is the meaning of the fields: -* id: an unique 64-bit client ID (introduced in Redis 2.8.12). -* addr: address/port of the client -* fd: file descriptor corresponding to the socket -* age: total duration of the connection in seconds -* idle: idle time of the connection in seconds -* flags: client flags (see below) -* db: current database ID -* sub: number of channel subscriptions -* psub: number of pattern matching subscriptions -* multi: number of commands in a MULTI/EXEC context -* qbuf: query buffer length (0 means no query pending) -* qbuf-free: free space of the query buffer (0 means the buffer is full) -* obl: output buffer length -* oll: output list length (replies are queued in this list when the buffer is full) -* omem: output buffer memory usage -* events: file descriptor events (see below) -* cmd: last command played +* `id`: an unique 64-bit client ID (introduced in Redis 2.8.12). +* `addr`: address/port of the client +* `fd`: file descriptor corresponding to the socket +* `age`: total duration of the connection in seconds +* `idle`: idle time of the connection in seconds +* `flags`: client flags (see below) +* `db`: current database ID +* `sub`: number of channel subscriptions +* `psub`: number of pattern matching subscriptions +* `multi`: number of commands in a MULTI/EXEC context +* `qbuf`: query buffer length (0 means no query pending) +* `qbuf-free`: free space of the query buffer (0 means the buffer is full) +* `obl`: output buffer length +* `oll`: output list length (replies are queued in this list when the buffer is full) +* `omem`: output buffer memory usage +* `events`: file descriptor events (see below) +* `cmd`: last command played The client flags can be a combination of: diff --git a/commands/cluster-meet.md b/commands/cluster-meet.md index c7f53a58b7..ee0bb9a97a 100644 --- a/commands/cluster-meet.md +++ b/commands/cluster-meet.md @@ -18,9 +18,9 @@ So, if we link node A with node B via `CLUSTER MEET`, and B with C, A and C will Another example: if we imagine a cluster formed of the following four nodes called A, B, C and D, we may send just the following set of commands to A: -1. CLUSTER MEET B-ip B-port -2. CLUSTER MEET C-ip C-port -3. CLUSTER MEET D-ip D-port +1. `CLUSTER MEET B-ip B-port` +2. `CLUSTER MEET C-ip C-port` +3. `CLUSTER MEET D-ip D-port` As a side effect of `A` knowing and being known by all the other nodes, it will send gossip sections in the heartbeat packets that will allow each other node to create a link with each other one, forming a full mesh in a matter of seconds, even if the cluster is large. diff --git a/commands/cluster-nodes.md b/commands/cluster-nodes.md index 9d318bea25..d48b5635a2 100644 --- a/commands/cluster-nodes.md +++ b/commands/cluster-nodes.md @@ -20,16 +20,20 @@ The output of the command is just a space-separated CSV string, where each line represents a node in the cluster. The following is an example of output: -* 07c37dfeb235213a872192d90877d0cd55635b91 127.0.0.1:30004 slave e7d1eecce10fd6bb5eb35b9f99a514335d9ba9ca 0 1426238317239 4 connected -* 67ed2db8d677e59ec4a4cefb06858cf2a1a89fa1 127.0.0.1:30002 master - 0 1426238316232 2 connected 5461-10922 -* 292f8b365bb7edb5e285caf0b7e6ddc7265d2f4f 127.0.0.1:30003 master - 0 1426238318243 3 connected 10923-16383 -* 6ec23923021cf3ffec47632106199cb7f496ce01 127.0.0.1:30005 slave 67ed2db8d677e59ec4a4cefb06858cf2a1a89fa1 0 1426238316232 5 connected -* 824fe116063bc5fcf9f4ffd895bc17aee7731ac3 127.0.0.1:30006 slave 292f8b365bb7edb5e285caf0b7e6ddc7265d2f4f 0 1426238317741 6 connected -* e7d1eecce10fd6bb5eb35b9f99a514335d9ba9ca 127.0.0.1:30001 myself,master - 0 0 1 connected 0-5460 +``` +07c37dfeb235213a872192d90877d0cd55635b91 127.0.0.1:30004 slave e7d1eecce10fd6bb5eb35b9f99a514335d9ba9ca 0 1426238317239 4 connected +67ed2db8d677e59ec4a4cefb06858cf2a1a89fa1 127.0.0.1:30002 master - 0 1426238316232 2 connected 5461-10922 +292f8b365bb7edb5e285caf0b7e6ddc7265d2f4f 127.0.0.1:30003 master - 0 1426238318243 3 connected 10923-16383 +6ec23923021cf3ffec47632106199cb7f496ce01 127.0.0.1:30005 slave 67ed2db8d677e59ec4a4cefb06858cf2a1a89fa1 0 1426238316232 5 connected +824fe116063bc5fcf9f4ffd895bc17aee7731ac3 127.0.0.1:30006 slave 292f8b365bb7edb5e285caf0b7e6ddc7265d2f4f 0 1426238317741 6 connected +e7d1eecce10fd6bb5eb35b9f99a514335d9ba9ca 127.0.0.1:30001 myself,master - 0 0 1 connected 0-5460 +``` Each line is composed of the following fields: -`` `` `` `` `` `` `` `` `` `` `...` `` +``` + ... +``` The meaning of each filed is the following: @@ -48,8 +52,8 @@ Meaning of the flags (field number 3): * **myself** The node you are contacting. * **master** Node is a master. * **slave** Node is a slave. -* **fail?** Node is in PFAIL state. Not reachable for the node you are contacting, but still logically reachable (not in FAIL state). -* **fail** Node is in FAIL state. It was not reachable for multiple nodes that promoted the PFAIL state to FAIL. +* **fail?** Node is in `PFAIL` state. Not reachable for the node you are contacting, but still logically reachable (not in `FAIL` state). +* **fail** Node is in `FAIL` state. It was not reachable for multiple nodes that promoted the `PFAIL` state to `FAIL`. * **handshake** Untrusted node, we are handshaking. * **noaddr** No address known for this node. * **noflags** No flags at all. diff --git a/commands/config-get.md b/commands/config-get.md index c8b7259729..c498e00e09 100644 --- a/commands/config-get.md +++ b/commands/config-get.md @@ -31,7 +31,7 @@ following important differences: [hgcarr22rc]: http://github.com/antirez/redis/raw/2.8/redis.conf * Where bytes or other quantities are specified, it is not possible to use - the `redis.conf` abbreviated form (10k 2gb ... and so forth), everything + the `redis.conf` abbreviated form (`10k`, `2gb` ... and so forth), everything should be specified as a well-formed 64-bit integer, in the base unit of the configuration directive. * The save parameter is a single string of space-separated integers. diff --git a/commands/eval.md b/commands/eval.md index 8e9b9953a0..28a84d1ff3 100644 --- a/commands/eval.md +++ b/commands/eval.md @@ -500,16 +500,16 @@ was the cause of bugs. The Redis Lua interpreter loads the following Lua libraries: -* base lib. -* table lib. -* string lib. -* math lib. -* debug lib. -* struct lib. -* cjson lib. -* cmsgpack lib. -* bitop lib -* redis.sha1hex function. +* `base` lib. +* `table` lib. +* `string` lib. +* `math` lib. +* `debug` lib. +* `struct` lib. +* `cjson` lib. +* `cmsgpack` lib. +* `bitop` lib. +* `redis.sha1hex` function. Every Redis instance is _guaranteed_ to have all the above libraries so you can be sure that the environment for your Redis scripts is always the same. diff --git a/wordlist b/wordlist index 760d39ad8b..87535a3b8a 100644 --- a/wordlist +++ b/wordlist @@ -151,6 +151,7 @@ backtrace benchmarked benchmarking bgsave +bitop bitwise blazingly blog @@ -335,6 +336,7 @@ tcmalloc techstacks timeline timestamp +timestamps tpaof tradeoff tradeoffs From 30390a86fb3cd6b5f354e06c94e9623384425299 Mon Sep 17 00:00:00 2001 From: Damian Janowski Date: Fri, 10 Jul 2015 15:29:42 -0300 Subject: [PATCH 0348/2314] Try installing gems ourselves. --- .travis.yml | 6 +++--- .travis/Gemfile | 5 ----- .travis/Gemfile.lock | 17 ----------------- 3 files changed, 3 insertions(+), 25 deletions(-) delete mode 100644 .travis/Gemfile delete mode 100644 .travis/Gemfile.lock diff --git a/.travis.yml b/.travis.yml index 0d587a6957..bd18461522 100644 --- a/.travis.yml +++ b/.travis.yml @@ -9,7 +9,7 @@ addons: rvm: - 2.2 -script: make +install: + - gem install $(sed -e 's/ -v /:/' .gems) -gemfile: - - .travis/Gemfile +script: make diff --git a/.travis/Gemfile b/.travis/Gemfile deleted file mode 100644 index 5691697595..0000000000 --- a/.travis/Gemfile +++ /dev/null @@ -1,5 +0,0 @@ -source "https://rubygems.org" - -gem "rake" -gem "batch" -gem "redcarpet" diff --git a/.travis/Gemfile.lock b/.travis/Gemfile.lock deleted file mode 100644 index e0b553c524..0000000000 --- a/.travis/Gemfile.lock +++ /dev/null @@ -1,17 +0,0 @@ -GEM - remote: https://rubygems.org/ - specs: - batch (0.0.3) - rake (0.9.2.2) - redcarpet (3.3.2) - -PLATFORMS - ruby - -DEPENDENCIES - batch - rake - redcarpet - -BUNDLED WITH - 1.10.5 From 3fb38eb023a24a4c01b5fd55a2c98d6690931a6f Mon Sep 17 00:00:00 2001 From: Damian Janowski Date: Fri, 10 Jul 2015 23:08:53 -0300 Subject: [PATCH 0349/2314] More silent Travis output. --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index bd18461522..a905458e99 100644 --- a/.travis.yml +++ b/.travis.yml @@ -12,4 +12,4 @@ rvm: install: - gem install $(sed -e 's/ -v /:/' .gems) -script: make +script: make -s From 8213512024078512ffc179c9b0ff2a0e14d9bfb6 Mon Sep 17 00:00:00 2001 From: Itamar Haber Date: Sun, 3 May 2015 10:42:03 +0300 Subject: [PATCH 0350/2314] Added missing glob-style patterns Specifically the character set's complement and range. A separate pattern-matching.md topic that's linked from `KEYS` and `[HSZ]?SCAN` doc pages seems to be a good idea <- thoughts? --- commands/keys.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/commands/keys.md b/commands/keys.md index 9263ea2724..670bce96ac 100644 --- a/commands/keys.md +++ b/commands/keys.md @@ -21,6 +21,8 @@ Supported glob-style patterns: * `h?llo` matches `hello`, `hallo` and `hxllo` * `h*llo` matches `hllo` and `heeeello` * `h[ae]llo` matches `hello` and `hallo,` but not `hillo` +* `h[^e]llo` matches `hallo`, `hbllo`, ... but not `hello` +* `h[a-b]llo` matches `hallo` and `hbllo` Use `\` to escape special characters if you want to match them verbatim. From 8d5ac1d1ccfd1b43319474103db17c075640816f Mon Sep 17 00:00:00 2001 From: brettmclean Date: Fri, 24 Apr 2015 17:07:22 -0600 Subject: [PATCH 0351/2314] Edits for grammar and clarity --- topics/cluster-spec.md | 611 +++++++++++++++++++++-------------------- 1 file changed, 306 insertions(+), 305 deletions(-) diff --git a/topics/cluster-spec.md b/topics/cluster-spec.md index ab569141b5..3bce2611a6 100644 --- a/topics/cluster-spec.md +++ b/topics/cluster-spec.md @@ -16,33 +16,33 @@ Redis Cluster is a distributed implementation of Redis with the following goals, * High performance and linear scalability up to 1000 nodes. There are no proxies, asynchronous replication is used, and no merge operations are performed on values. * Acceptable degree of write safety: the system tries (in a best-effort way) to retain all the writes originating from clients connected with the majority of the master nodes. Usually there are small windows where acknowledged writes can be lost. Windows to lose acknowledged writes are larger when clients are in a minority partition. -* Availability: Redis Cluster is able to survive to partitions where the majority of the master nodes are reachable and there is at least a reachable slave for every master node that is no longer reachable. Moreover using *replicas migration*, masters no longer replicated by any slave, will receive one from a master which is covered by multiple slaves. +* Availability: Redis Cluster is able to survive to partitions where the majority of the master nodes are reachable and there is at least a reachable slave for every master node that is no longer reachable. Moreover using *replicas migration*, masters no longer replicated by any slave will receive one from a master which is covered by multiple slaves. What is described in this document is implemented in Redis 3.0 or greater. Implemented subset --- -Redis Cluster implements all the single keys commands available in the -non distributed version of Redis. Commands performing complex multi key +Redis Cluster implements all the single key commands available in the +non-distributed version of Redis. Commands performing complex multi-key operations like Set type unions or intersections are implemented as well as long as the keys all belong to the same node. Redis Cluster implements a concept called **hash tags** that can be used in order to force certain keys to be stored in the same node. However during manual reshardings, multi-key operations may become unavailable for some time -while single keys operations are always available. +while single key operations are always available. Redis Cluster does not support multiple databases like the stand alone version -of Redis, there is just database 0, and the `SELECT` command is not allowed. +of Redis. There is just database 0 and the `SELECT` command is not allowed. -Clients and Servers roles in the Redis cluster protocol +Clients and Servers roles in the Redis Cluster protocol --- -In Redis cluster nodes are responsible for holding the data, +In Redis Cluster nodes are responsible for holding the data, and taking the state of the cluster, including mapping keys to the right nodes. -Cluster nodes are also able to auto-discover other nodes, detect non working -nodes, and performing slave nodes promotion to master when needed, in order +Cluster nodes are also able to auto-discover other nodes, detect non-working +nodes, and promote slave nodes to master when needed in order to continue to operate when a failure occurs. To perform their tasks all the cluster nodes are connected using a @@ -52,7 +52,7 @@ bus. Nodes use a gossip protocol to propagate information about the cluster in order to discover new nodes, to send ping packets to make sure all the other nodes are working properly, and to send cluster messages needed to signal specific conditions. The cluster bus is also used in order to -propagate Pub/Sub messages across the cluster, and to orchestrate manual +propagate Pub/Sub messages across the cluster and to orchestrate manual failovers when requested by users (manual failovers are failovers which are not initiated by the Redis Cluster failure detector, but by the system administrator directly). @@ -67,71 +67,71 @@ keys and nodes can improve the performance in a sensible way. Write safety --- -Redis Cluster uses asynchronous replication between nodes, and **last failover wins** implicit merge function. It means that the last elected master dataset eventually replaces all the other replicas. This means that there are always windows when it is possible to lose writes during partitions. However these windows are very different in the case of a client that is connected to the majority of masters, and a client that is connected to the minority of masters. +Redis Cluster uses asynchronous replication between nodes, and **last failover wins** implicit merge function. This means that the last elected master dataset eventually replaces all the other replicas. There is always a window of time when it is possible to lose writes during partitions. However these windows are very different in the case of a client that is connected to the majority of masters, and a client that is connected to the minority of masters. Redis Cluster tries harder to retain writes that are performed by clients connected to the majority of masters, compared to writes performed into the minority -side. The following are examples of scenarios that lead to lose of acknowledged -writes received in the majority partitions, during failures: +side. The following are examples of scenarios that lead to loss of acknowledged +writes received in the majority partitions during failures: -1. A write may reach a master, but while the master may be able to reply to the client, the write may not be propagated to slaves via the asynchronous replication used between master and slave nodes. If the master dies without the write reaching the slaves, the write is lost forever in case the master is unreachable for a long enough period that one of its slaves is promoted. This is usually hard to observe in case of total, sudden failure of a master node, since masters try to reply to clients (with the acknowledge of the write) and slaves (propagating the write) about at the same time. However it is a real world failure mode. +1. A write may reach a master, but while the master may be able to reply to the client, the write may not be propagated to slaves via the asynchronous replication used between master and slave nodes. If the master dies without the write reaching the slaves, the write is lost forever if the master is unreachable for a long enough period that one of its slaves is promoted. This is usually hard to observe in the case of a total, sudden failure of a master node since masters try to reply to clients (with the acknowledge of the write) and slaves (propagating the write) at about the same time. However it is a real world failure mode. 2. Another theoretically possible failure mode where writes are lost is the following: * A master is unreachable because of a partition. * It gets failed over by one of its slaves. * After some time it may be reachable again. -* A client with a not updated routing table may write to the old master before it is converted into a slave (of the new master) by the cluster. +* A client with an out-of-date routing table may write to the old master before it is converted into a slave (of the new master) by the cluster. -The second failure mode is unlikely to happen because master nodes not able to communicate with the majority of the other masters for enough time to be failed over, no longer accept writes, and when the partition is fixed writes are still refused for a small amount of time to allow other nodes to inform about configuration changes. This failure mode also requires that the client has a yet not updated table. +The second failure mode is unlikely to happen because master nodes unable to communicate with the majority of the other masters for enough time to be failed over will no longer accept writes, and when the partition is fixed writes are still refused for a small amount of time to allow other nodes to inform about configuration changes. This failure mode also requires that the client's routing table has not yet been updated. -Writes targeting the minority side of a partition has fairly larger windows to get lost. For example Redis Cluster loses a non trivial amount of writes on partitions where there is a minority of masters and at least one or more clients, since all the writes sent to the masters may potentially get lost if the masters are failed over in the majority side. +Writes targeting the minority side of a partition have a larger window in which to get lost. For example, Redis Cluster loses a non-trivial number of writes on partitions where there is a minority of masters and at least one or more clients, since all the writes sent to the masters may potentially get lost if the masters are failed over in the majority side. -Specifically, for a master to be failed over, it must be not reachable by the majority of masters for at least `NODE_TIMEOUT`, so if the partition is fixed before that time, no write is lost. When the partition lasts for more than `NODE_TIMEOUT`, all the writes performed in the minority side up to that point may be lost. However the minority side of a Redis Cluster will start refusing writes as soon as `NODE_TIMEOUT` time has elapsed without contact with the majority, so there is a maximum window after which the minority becomes no longer available, hence no write is accepted and lost after that time. +Specifically, for a master to be failed over it must be unreachable by the majority of masters for at least `NODE_TIMEOUT`, so if the partition is fixed before that time, no writes are lost. When the partition lasts for more than `NODE_TIMEOUT`, all the writes performed in the minority side up to that point may be lost. However the minority side of a Redis Cluster will start refusing writes as soon as `NODE_TIMEOUT` time has elapsed without contact with the majority, so there is a maximum window after which the minority becomes no longer available. Hence, no writes are accepted or lost after that time. Availability --- -Redis Cluster is not available in the minority side of the partition. In the majority side of the partition assuming that there are at least the majority of masters and a slave for every unreachable master, the cluster returns available after `NODE_TIMEOUT` time, plus a few more seconds required for a slave to get elected and failover its master (failovers usually are executed in a matter of 1 or 2 seconds). +Redis Cluster is not available in the minority side of the partition. In the majority side of the partition assuming that there are at least the majority of masters and a slave for every unreachable master, the cluster becomes available again after `NODE_TIMEOUT` time plus a few more seconds required for a slave to get elected and failover its master (failovers are usually executed in a matter of 1 or 2 seconds). -This means that Redis Cluster is designed to survive to failures of a few nodes in the cluster, but is not a suitable solution for applications that require availability in the event of large net splits. +This means that Redis Cluster is designed to survive failures of a few nodes in the cluster, but it is not a suitable solution for applications that require availability in the event of large net splits. In the example of a cluster composed of N master nodes where every node has a single slave, the majority side of the cluster will remain available as long as a single node is partitioned away, and will remain available with a probability of `1-(1/(N*2-1))` when two nodes are partitioned away (after the first node fails we are left with `N*2-1` nodes in total, and the probability of the only master without a replica to fail is `1/(N*2-1))`. -For example in a cluster with 5 nodes and a single slave per node, there is a `1/(5*2-1) = 0.1111` probabilities that after two nodes are partitioned away from the majority, the cluster will no longer be available, that is about 11% of probabilities. +For example, in a cluster with 5 nodes and a single slave per node, there is a `1/(5*2-1) = 11.11%` probability that after two nodes are partitioned away from the majority, the cluster will no longer be available. Thanks to a Redis Cluster feature called **replicas migration** the Cluster availability is improved in many real world scenarios by the fact that replicas migrate to orphaned masters (masters no longer having replicas). So at every successful failure event, the cluster may reconfigure the slaves -layout in order to resist better to the next failure. +layout in order to better resist the next failure. Performance --- In Redis Cluster nodes don't proxy commands to the right node in charge for a given key, but instead they redirect clients to the right nodes serving a given portion of the key space. -Eventually clients obtain an up to date representation of the cluster and which node serves which subset of keys, so during normal operations clients directly contact the right nodes in order to send a given command. +Eventually clients obtain an up-to-date representation of the cluster and which node serves which subset of keys, so during normal operations clients directly contact the right nodes in order to send a given command. -Because of the use of asynchronous replication, nodes do not wait for other nodes acknowledgment of writes (if not explicitly requested using the `WAIT` command). +Because of the use of asynchronous replication, nodes do not wait for other nodes' acknowledgment of writes (if not explicitly requested using the `WAIT` command). -Also, because multiple keys commands are only limited to *near* keys, data is never moved between nodes if not in case of resharding. +Also, because multiple-key commands are only limited to *near* keys, data is never moved between nodes except when resharding. -So normal operations are handled exactly as in the case of a single Redis instance. This means that in a Redis Cluster with N master nodes you can expect the same performance as a single Redis instance multiplied by N as the design allows to scale linearly. At the same time the query is usually performed in a single round trip, since clients usually retain persistent connections with the nodes, so latency figures are also the same as the single standalone Redis node case. +Normal operations are handled exactly as in the case of a single Redis instance. This means that in a Redis Cluster with N master nodes you can expect the same performance as a single Redis instance multiplied by N as the design scales linearly. At the same time the query is usually performed in a single round trip, since clients usually retain persistent connections with the nodes, so latency figures are also the same as the single standalone Redis node case. -Very high performances and scalability while preserving weak but +Very high performance and scalability while preserving weak but reasonable forms of data safety and availability is the main goal of Redis Cluster. Why merge operations are avoided --- -Redis Cluster design avoids conflicting versions of the same key-value pair in multiple nodes as in the case of the Redis data model this is not always desirable: values in Redis are often very large, it is common to see lists or sorted sets with millions of elements. Also data types are semantically complex. Transferring and merging these kind of values can be a major bottleneck and/or may require a non trivial involvement of application-side logic, additional memory to store meta-data, and so forth. +Redis Cluster design avoids conflicting versions of the same key-value pair in multiple nodes as in the case of the Redis data model this is not always desirable. Values in Redis are often very large; it is common to see lists or sorted sets with millions of elements. Also data types are semantically complex. Transferring and merging these kind of values can be a major bottleneck and/or may require the non-trivial involvement of application-side logic, additional memory to store meta-data, and so forth. -There are no strict technological limits here, CRDTs or synchronously replicated -state machines can model complex data types similar to Redis, however the +There are no strict technological limits here. CRDTs or synchronously replicated +state machines can model complex data types similar to Redis. However, the actual run time behavior of such systems would not be similar to Redis Cluster. Redis Cluster was designed in order to cover the exact use cases of the -non clustered Redis version. +non-clustered Redis version. Overview of Redis Cluster main components === @@ -143,13 +143,14 @@ The key space is split into 16384 slots, effectively setting an upper limit for the cluster size of 16384 master nodes (however the suggested max size of nodes is in the order of ~ 1000 nodes). -Each master nodes in a cluster handles a subset of the 16384 hash slots. -When the cluster is **stable**, that means there is no cluster -reconfiguration in progress (where hash slots are moved from one node -to another), a single hash slot will be served exactly by a single node -(however the serving node can have one or more slaves that will replace -it in the case of net splits or failures, and that can be used in order -to scale read operations where reading stale data is acceptable). +Each master node in a cluster handles a subset of the 16384 hash slots. +The cluster is **stable** when there is no cluster reconfiguration in +progress (i.e. where hash slots are being moved from one node +to another). When the cluster is stable, a single hash slot will be +served by a single node (however the serving node can have one or more +slaves that will replace it in the case of net splits or failures, +and that can be used in order to scale read operations where +reading stale data is acceptable). The base algorithm used to map keys to hash slots is the following (read the next paragraph for the hash tag exception to this rule): @@ -167,10 +168,10 @@ The CRC16 is specified as follows: * Xor constant to output CRC: 0000 * Output for "123456789": 31C3 -14 out of 16 bit of the output of CRC16 are used (this is why there is +14 out of 16 CRC16 output bits are used (this is why there is a modulo 16384 operation in the formula above). -In our tests CRC16 behaved remarkably well in distributing different kind of +In our tests CRC16 behaved remarkably well in distributing different kinds of keys evenly across the 16384 slots. **Note**: A reference implementation of the CRC16 algorithm used is available in the Appendix A of this document. @@ -185,13 +186,13 @@ multi-key operations in Redis Cluster. In order to implement hash tags, the hash slot for a key is computed in a slightly different way in certain conditions. -Basically if the key contains a "{...}" pattern only the substring between +If the key contains a "{...}" pattern only the substring between `{` and `}` is hashed in order to obtain the hash slot. However since it is possible that there are multiple occurrences of `{` or `}` the algorithm is well specified by the following rules: * IF the key contains a `{` character. -* AND IF there is a `}` character on the right of `{` +* AND IF there is a `}` character to the right of `{` * AND IF there are one or more characters between the first occurrence of `{` and the first occurrence of `}`. Then instead of hashing the key, only what is between the first occurrence of `{` and the following first occurrence of `}` is hashed. @@ -255,7 +256,7 @@ deleted by the system administrator, or a *hard reset* is requested via the `CLUSTER RESET` command. The node ID is used to identify every node across the whole cluster. -It is possible for a given node to change IP and address without any need +It is possible for a given node to change its IP address without any need to also change the node ID. The cluster is also able to detect the change in IP/port and reconfigure using the gossip protocol running over the cluster bus. @@ -267,19 +268,18 @@ cluster configuration detail of this specific node, and is eventually consistent across the cluster. Some other information, like the last time a node was pinged, is instead local to each node. -This is a list of information each node has associated in each other node -that knows it: The node ID, IP and port of the node, a set of flags, what is -the master of the node if it is flagged as `slave`, last time the node -was pinged and the last time the pong was received, the current *configuration -epoch* of the node (explained later in this specification), the link state -and finally the set of hash slots served. +Every node maintains the following information about other nodes that it is +aware of in the cluster: The node ID, IP and port of the node, a set of +flags, what is the master of the node if it is flagged as `slave`, last time +the node was pinged and the last time the pong was received, the current +*configuration epoch* of the node (explained later in this specification), +the link state and finally the set of hash slots served. A detailed [explanation of all the node fields](http://redis.io/commands/cluster-nodes) is described in the `CLUSTER NODES` documentation. -The `CLUSTER NODES` command, that can be sent to each of the nodes in the cluster, provides as output the state of the cluster and the information for each node -according to the local view the queried node has of the cluster. +The `CLUSTER NODES` command can be sent to any node in the cluster and provides the state of the cluster and the information for each node according to the local view the queried node has of the cluster. -The following is an example of output of `CLUSTER NODES` sent to a master +The following is sample output of the `CLUSTER NODES` command sent to a master node in a small cluster of three nodes. $ redis-cli cluster nodes @@ -292,18 +292,18 @@ In the above listing the different fields are in order: node id, address:port, f The Cluster bus --- -Every Redis Cluster node has an additional TCP port in order to receive +Every Redis Cluster node has an additional TCP port for receiving incoming connections from other Redis Cluster nodes. This port is at a fixed -offset compared to the normal TCP port used to receive incoming connections +offset from the normal TCP port used to receive incoming connections from clients. To obtain the Redis Cluster port, 10000 should be added to -the normal commands port, so for example if a Redis node is listening for -client connections to port 6379, the Cluster bus port 16379 will also be +the normal commands port. For example, if a Redis node is listening for +client connections on port 6379, the Cluster bus port 16379 will also be opened. -Node to Node communication happens exclusively using the Cluster bus and -the Cluster bus protocol, which is a binary protocol composed of frames +Node-to-node communication happens exclusively using the Cluster bus and +the Cluster bus protocol: a binary protocol composed of frames of different types and sizes. The Cluster bus binary protocol is not -publicly documented since it is not indented for external software devices +publicly documented since it is not intended for external software devices to talk with Redis Cluster nodes using this protocol. However you can obtain more details about the Cluster bus protocol by reading the `cluster.h` and `cluster.c` files in the Redis Cluster source code. @@ -311,7 +311,7 @@ obtain more details about the Cluster bus protocol by reading the Cluster topology --- -Redis cluster is a full mesh where every node is connected with every other node using a TCP connection. +Redis Cluster is a full mesh where every node is connected with every other node using a TCP connection. In a cluster of N nodes, every node has N-1 outgoing TCP connections, and N-1 incoming connections. @@ -321,20 +321,20 @@ refresh the connection with the node by reconnecting from scratch. While Redis Cluster nodes form a full mesh, **nodes use a gossip protocol and a configuration update mechanism in order to avoid exchanging too many -messages between nodes during normal conditions**, so the number of message +messages between nodes during normal conditions**, so the number of messages exchanged is not exponential. Nodes handshake --- -Nodes always accept connection in the cluster bus port, and even reply to +Nodes always accept connections on the cluster bus port, and even reply to pings when received, even if the pinging node is not trusted. -However all the other packets will be discarded by the receiving node if the +However, all other packets will be discarded by the receiving node if the sending node is not considered part of the cluster. A node will accept another node as part of the cluster only in two ways: -* If a node will present itself with a `MEET` message. A meet message is exactly +* If a node presents itself with a `MEET` message. A meet message is exactly like a `PING` message, but forces the receiver to accept the node as part of the cluster. Nodes will send `MEET` messages to other nodes **only if** the system administrator requests this via the following command: @@ -342,9 +342,9 @@ the cluster. Nodes will send `MEET` messages to other nodes **only if** the syst * A node will also register another node as part of the cluster if a node that is already trusted will gossip about this other node. So if A knows B, and B knows C, eventually B will send gossip messages to A about C. When this happens, A will register C as part of the network, and will try to connect with C. -This means that as long as we join nodes in any connected graph, they'll eventually form a fully connected graph automatically. This means that basically the cluster is able to auto-discover other nodes, but only if there is a trusted relationship that was forced by the system administrator. +This means that as long as we join nodes in any connected graph, they'll eventually form a fully connected graph automatically. This means that the cluster is able to auto-discover other nodes, but only if there is a trusted relationship that was forced by the system administrator. -This mechanism makes the cluster more robust but prevents that different Redis clusters will accidentally mix after change of IP addresses or other network related events. +This mechanism makes the cluster more robust but prevents different Redis clusters from accidentally mixing after change of IP addresses or other network related events. Redirection and resharding === @@ -366,32 +366,32 @@ to the client with a MOVED error, like in the following example: -MOVED 3999 127.0.0.1:6381 The error includes the hash slot of the key (3999) and the ip:port of the -instance that can serve the query. The client need to reissue the query -to the specified node, specified by IP address and port. +instance that can serve the query. The client needs to reissue the query +to the specified node's IP address and port. Note that even if the client waits a long time before reissuing the query, and in the meantime the cluster configuration changed, the destination node will reply again with a MOVED error if the hash slot 3999 is now served by another node. The same happens if the contacted node had no updated information. So while from the point of view of the cluster nodes are identified by -IDs we try to simply our interface with the client just exposing a map +IDs we try to simplify our interface with the client just exposing a map between hash slots and Redis nodes identified by IP:port pairs. The client is not required to, but should try to memorize that hash slot 3999 is served by 127.0.0.1:6381. This way once a new command needs to -be issued it can compute the hash slot of the target key and pick the -right node with higher chances. +be issued it can compute the hash slot of the target key and have a +greater chance of choosing the right node. An alternative is to just refresh the whole client-side cluster layout -when a MOVED redirection is received, using the `CLUSTER NODES` or -`CLUSTER SLOTS` commands, since when a redirection is encountered, likely -multiple slots were reconfigured, not just one, so to update the configuration -as soon as possible for the client is often the best strategy. +using the `CLUSTER NODES` or `CLUSTER SLOTS` commands +when a MOVED redirection is received. When a redirection is encountered, it +is likely multiple slots were reconfigured rather than just one, so updating +the client configuration as soon as possible is often the best strategy. Note that when the Cluster is stable (no ongoing changes in the configuration), eventually all the clients will obtain a map of hash slots -> nodes, making the cluster efficient, with clients directly addressing the right nodes -without redirections nor proxies or other single point of failure entities. +without redirections, proxies or other single point of failure entities. A client **must be also able to handle -ASK redirections** that are described later in this document, otherwise it is not a complete Redis Cluster client. @@ -399,24 +399,24 @@ later in this document, otherwise it is not a complete Redis Cluster client. Cluster live reconfiguration --- -Redis cluster supports the ability to add and remove nodes while the cluster -is running. Actually adding or removing a node is abstracted into the same -operation, that is, moving a hash slot from a node to another. This means +Redis Cluster supports the ability to add and remove nodes while the cluster +is running. Adding or removing a node is abstracted into the same +operation: moving a hash slot from one node to another. This means that the same basic mechanism can be used in order to rebalance the cluster, add or remove nodes, and so forth. -* To add a new node to the cluster an empty node is added to the cluster and some hash slot is moved from existing nodes to the new node. +* To add a new node to the cluster an empty node is added to the cluster and some set of hash slots are moved from existing nodes to the new node. * To remove a node from the cluster the hash slots assigned to that node are moved to other existing nodes. * To rebalance the cluster a given set of hash slots are moved between nodes. -So the core of the implementation is the ability to move hash slots around. -Actually from a practical point of view a hash slot is just a set of keys, so -what Redis cluster really does during *resharding* is to move keys from -an instance to another instance. Moving an hash slot means moving all the keys +The core of the implementation is the ability to move hash slots around. +From a practical point of view a hash slot is just a set of keys, so +what Redis Cluster really does during *resharding* is to move keys from +an instance to another instance. Moving a hash slot means moving all the keys that happen to hash into this hash slot. To understand how this works we need to show the `CLUSTER` subcommands -that are used to manipulate the slots translation table in a Redis cluster node. +that are used to manipulate the slots translation table in a Redis Cluster node. The following subcommands are available (among others not useful in this case): @@ -431,7 +431,7 @@ The first two commands, `ADDSLOTS` and `DELSLOTS`, are simply used to assign master node that it will be in charge of storing and serving content for the specified hash slot. -After the hash slots are assigned they will propagate across all the cluster +After the hash slots are assigned they will propagate across the cluster using the gossip protocol, as specified later in the *configuration propagation* section. @@ -445,19 +445,19 @@ or for debugging tasks: in practice it is rarely used. The `SETSLOT` subcommand is used to assign a slot to a specific node ID if the `SETSLOT NODE` form is used. Otherwise the slot can be set in the two special states `MIGRATING` and `IMPORTING`. Those two special states -are used in order to migrate an hash slot from one node to another. +are used in order to migrate a hash slot from one node to another. -* When a slot is set as MIGRATING, the node will accept all the requests -for queries that are about this hash slot, but only if the key in question +* When a slot is set as MIGRATING, the node will accept all queries that +are about this hash slot, but only if the key in question exists, otherwise the query is forwarded using a `-ASK` redirection to the node that is target of the migration. -* When a slot is set as IMPORTING, the node will accept all the requests -for queries that are about this hash slot, but only if the request is -preceded by an `ASKING` command. Otherwise if not `ASKING` command was given +* When a slot is set as IMPORTING, the node will accept all queries that +are about this hash slot, but only if the request is +preceded by an `ASKING` command. If the `ASKING` command was not given by the client, the query is redirected to the real hash slot owner via -a `-MOVED` redirection error, like would happen normally. +a `-MOVED` redirection error, as would happen normally. -Let's make this more clear with an example of hash slot migration. +Let's make this clearer with an example of hash slot migration. Assume that we have two Redis master nodes, called A and B. We want to move hash slot 8 from A to B, so we issue commands like this: @@ -468,13 +468,13 @@ All the other nodes will continue to point clients to node "A" every time they are queried with a key that belongs to hash slot 8, so what happens is that: -* All the queries about already existing keys are processed by "A". -* All the queries about non existing keys in A are processed by "B", because "A" will redirect clients to "B". +* All queries about existing keys are processed by "A". +* All queries about non-existing keys in A are processed by "B", because "A" will redirect clients to "B". This way we no longer create new keys in "A". -In the meantime, a special program used during reshardings, that is -called `redis-trib`, and is the default Redis cluster configuration utility, -will make sure to migrate existing keys in hash slot 8 from A to B. +In the meantime, a special program called `redis-trib` used during reshardings +and Redis Cluster configuration will migrate existing keys in +hash slot 8 from A to B. This is performed using the following command: CLUSTER GETKEYSINSLOT slot count @@ -489,37 +489,37 @@ there are no race conditions). This is how `MIGRATE` works: `MIGRATE` will connect to the target instance, send a serialized version of the key, and once an OK code is received will delete the old key from its own -dataset. So from the point of view of an external client a key either exists -in A or B in a given time. +dataset. From the point of view of an external client a key exists either +in A or B at any given time. -In Redis cluster there is no need to specify a database other than 0, but -`MIGRATE` can be used for other tasks as well not involving Redis cluster so -it is a general enough command. +In Redis Cluster there is no need to specify a database other than 0, but +`MIGRATE` is a general command that can be used for other tasks not +involving Redis Cluster. `MIGRATE` is optimized to be as fast as possible even when moving complex -keys such as long lists, but of course in Redis Cluster reconfiguring the +keys such as long lists, but in Redis Cluster reconfiguring the cluster where big keys are present is not considered a wise procedure if there are latency constraints in the application using the database. -When finally the migration process is finished, the `SETSLOT NODE ` command is send to the two nodes involved in the migration in order to -set the slots in normal state again. Moreover the same command is usually -send to all the other instances in order not to wait for the natural +When the migration process is finally finished, the `SETSLOT NODE ` command is sent to the two nodes involved in the migration in order to +set the slots to their normal state again. The same command is usually +sent to all other nodes to avoid waiting for the natural propagation of the new configuration across the cluster. ASK redirection --- -In the previous section we briefly talked about ASK redirection, why can't -we simply use the MOVED redirection? Because while MOVED means that +In the previous section we briefly talked about ASK redirection. Why can't +we simply use MOVED redirection? Because while MOVED means that we think the hash slot is permanently served by a different node and the next queries should be tried against the specified node, ASK means to -only ask the next query to the specified node. +send only the next query to the specified node. -This is needed because the next query about hash slot 8 can be about the -key that is still in A, so we always want that the client will try A and +This is needed because the next query about hash slot 8 can be about a +key that is still in A, so we always want the client to try A and then B if needed. Since this happens only for one hash slot out of 16384 -available the performance hit on the cluster is acceptable. +available, the performance hit on the cluster is acceptable. -However we need to force that client behavior, so in order to make sure +We need to force that client behavior, so to make sure that clients will only try node B after A was tried, node B will only accept queries of a slot that is set as IMPORTING if the client sends the ASKING command before sending the query. @@ -527,24 +527,24 @@ ASKING command before sending the query. Basically the ASKING command sets a one-time flag on the client that forces a node to serve a query about an IMPORTING slot. -So the full semantics of the ASK redirection is the following, from the -point of view of the client. +The full semantics of ASK redirection from the +point of view of the client is as follows: -* If ASK redirection is received send only the query that was redirected to the specified node, but continue sending the next queries to the old node. +* If ASK redirection is received, send only the query that was redirected to the specified node but continue sending subsequent queries to the old node. * Start the redirected query with the ASKING command. -* Don't update local client tables to map hash slot 8 to B for now. +* Don't yet update local client tables to map hash slot 8 to B. -Once the hash slot 8 migration is completed, A will send a MOVED message and +Once hash slot 8 migration is completed, A will send a MOVED message and the client may permanently map hash slot 8 to the new IP and port pair. -Note that however if a buggy client will perform the map earlier this is not +Note that if a buggy client performs the map earlier this is not a problem since it will not send the ASKING command before issuing the query, so B will redirect the client to A using a MOVED redirection error. -Slots migration is explained in similar terms but with a different wording, -for the sake of redundancy in the documentation, in the `CLUSTER SETSLOT` +Slots migration is explained in similar terms but with different wording +(for the sake of redundancy in the documentation) in the `CLUSTER SETSLOT` command documentation. -Clients first connection and handling of redirections. +Clients first connection and handling of redirections --- While it is possible to have a Redis Cluster client implementation that does not @@ -553,22 +553,22 @@ nodes serving it) in memory and only works by contacting random nodes waiting to be redirected, such a client would be very inefficient. Redis Cluster clients should try to be smart enough to memorize the slots -configuration. However this configuration is not *required* to be up to date, -since contacting the wrong node will simply result in a redirection, that will -trigger an update of the client view. +configuration. However this configuration is not *required* to be up to date. +Since contacting the wrong node will simply result in a redirection, that +should trigger an update of the client view. Clients usually need to fetch a complete list of slots and mapped node -addresses at two different moments: +addresses in two different situations: * At startup in order to populate the initial slots configuration. * When a `MOVED` redirection is received. -Note that a client may handle the `MOVED` redirection updating just the moved -slot in its table, however this is usually not efficient since often the -configuration of multiple slots is modified at once (for example if a slave -is promoted to master, all the slots served by the old master will be remapped). -It is much simpler to react to a `MOVED` redirection fetching the full map -of slots to nodes from scratch. +Note that a client may handle the `MOVED` redirection by updating just the +moved slot in its table, however this is usually not efficient since often +the configuration of multiple slots is modified at once (for example if a +slave is promoted to master, all the slots served by the old master will +be remapped). It is much simpler to react to a `MOVED` redirection by +fetching the full map of slots to nodes from scratch. In order to retrieve the slots configuration Redis Cluster offers an alternative to the `CLUSTER NODES` command that does not @@ -605,43 +605,43 @@ The first two sub-elements of every element of the returned array are the start-end slots of the range. The additional elements represent address-port pairs. The first address-port pair is the master serving the slot, and the additional address-port pairs are all the slaves serving the same slot -that are not in an error condition (the FAIL flag is not set). +that are not in an error condition (i.e. the FAIL flag is not set). For example the first element of the output says that slots from 5461 to 10922 (start and end included) are served by 127.0.0.1:7001, and it is possible to scale read-only load contacting the slave at 127.0.0.1:7004. -`CLUSTER SLOTS` does not guarantee to return ranges that will cover all the +`CLUSTER SLOTS` is not guaranteed to return ranges that cover the full 16384 slots if the cluster is misconfigured, so clients should initialize the slots configuration map filling the target nodes with NULL objects, and -report an error if the user will try to execute commands about keys +report an error if the user tries to execute commands about keys that belong to unassigned slots. -However before returning an error to the caller, when a slot is found to +Before returning an error to the caller when a slot is found to be unassigned, the client should try to fetch the slots configuration again to check if the cluster is now configured properly. Multiple keys operations --- -Using hash tags clients are free to use multiple-keys operations. +Using hash tags, clients are free to use multiple-key operations. For example the following operation is valid: MSET {user:1000}.name Angela {user:1000}.surname White -However multi-key operations may become unavailable when a resharding of the +Multi-key operations may become unavailable when a resharding of the hash slot the keys belong to is in progress. -More specifically, even during a resharding, the multi-key operations -targeting keys that all exist and are still all in the same node (either +More specifically, even during a resharding the multi-key operations +targeting keys that all exist and are all still in the same node (either the source or destination node) are still available. -Operations about keys that don't exist or are, during the resharding, split +Operations on keys that don't exist or are - during the resharding - split between the source and destination nodes, will generate a `-TRYAGAIN` error. The client can try the operation after some time, or report back the error. -As soon as the migration of the specified hash slot has terminated, all the -multi key operations are available again for this hash slot. +As soon as migration of the specified hash slot has terminated, all +multi-key operations are available again for that hash slot. Scaling reads using slave nodes --- @@ -650,11 +650,11 @@ Normally slave nodes will redirect clients to the authoritative master for the hash slot involved in a given command, however clients can use slaves in order to scale reads using the `READONLY` command. -`READONLY` tells a Redis cluster slave node that the client is ok reading +`READONLY` tells a Redis Cluster slave node that the client is ok reading possibly stale data and is not interested in running write queries. When the connection is in readonly mode, the cluster will send a redirection -to the client only in the context of an operation involving keys not served +to the client only if the operation involves keys not served by the slave's master node. This may happen because: 1. The client sent a command about hash slots never served by the master of this slave. @@ -668,7 +668,7 @@ The readonly state of the connection can be cleared using the `READWRITE` comman Fault Tolerance === -Nodes heartbeat and gossip messages +Heartbeat and gossip messages --- Redis Cluster nodes continuously exchange ping and pong packets. Those two kind of packets have the same structure, and both carry important configuration information. The only actual difference is the message type field. We'll refer to the sum of ping and pong packets as *heartbeat packets*. @@ -677,34 +677,35 @@ Usually nodes send ping packets that will trigger the receivers to reply with po Usually a node will ping a few random nodes every second so that the total number of ping packets sent (and pong packets received) by each node is a constant amount regardless of the number of nodes in the cluster. -However every node makes sure to ping every other node that we don't either sent a ping or received a pong for longer than half the `NODE_TIMEOUT` time. Before `NODE_TIMEOUT` has elapsed, nodes also try to reconnect the TCP link with another node to make sure nodes are not believed to be unreachable only because there is a problem in the current TCP connection. +However every node makes sure to ping every other node that hasn't sent a ping or received a pong for longer than half the `NODE_TIMEOUT` time. Before `NODE_TIMEOUT` has elapsed, nodes also try to reconnect the TCP link with another node to make sure nodes are not believed to be unreachable only because there is a problem in the current TCP connection. -The amount of messages globally exchanged can be sizable if `NODE_TIMEOUT` is set to a small figure and the number of nodes (N) is very large, since every node will try to ping every other node for which we don't have fresh information for half the `NODE_TIMEOUT` time. +The number of messages globally exchanged can be sizable if `NODE_TIMEOUT` is set to a small figure and the number of nodes (N) is very large, since every node will try to ping every other node for which they don't have fresh information every half the `NODE_TIMEOUT` time. -For example in a 100 nodes cluster with a node timeout set to 60 seconds, every node will try to send 99 pings every 30 seconds, with a total amount of pings of 3.3 per second, that multiplied for 100 nodes is 330 pings per second in the total cluster. +For example in a 100 node cluster with a node timeout set to 60 seconds, every node will try to send 99 pings every 30 seconds, with a total amount of pings of 3.3 per second. Multiplied by 100 nodes, this is 330 pings per second in the total cluster. -There are ways to lower the number of messages, however no issue currently was -even reported with the bandwidth used by Redis Cluster failure detection, so -for now the obvious and direct design is used. Note that even in the above -example, the 330 packets per seconds exchanged are evenly divided among 100 -different nodes, so the traffic each node receives is acceptable. +There are ways to lower the number of messages, however there have been no +reported issues with the bandwidth currently used by Redis Cluster failure +detection, so for now the obvious and direct design is used. Note that even +in the above example, the 330 packets per second exchanged are evenly +divided among 100 different nodes, so the traffic each node receives +is acceptable. -Heartbeat packets content +Heartbeat packet content --- -Ping and pong packets contain a header that is common to all the kind of packets (for instance packets to request a failover vote), and a special Gossip Section that is specific of Ping and Pong packets. +Ping and pong packets contain a header that is common to all types of packets (for instance packets to request a failover vote), and a special Gossip Section that is specific of Ping and Pong packets. The common header has the following information: -* Node ID, that is a 160 bit pseudorandom string that is assigned the first time a node is created and remains the same for all the life of a Redis Cluster node. -* The `currentEpoch` and `configEpoch` fields of the sending node, that are used in order to mount the distributed algorithms used by Redis Cluster (this is explained in detail in the next sections). If the node is a slave the `configEpoch` is the last known `configEpoch` of its master. +* Node ID, a 160 bit pseudorandom string that is assigned the first time a node is created and remains the same for all the life of a Redis Cluster node. +* The `currentEpoch` and `configEpoch` fields of the sending node that are used to mount the distributed algorithms used by Redis Cluster (this is explained in detail in the next sections). If the node is a slave the `configEpoch` is the last known `configEpoch` of its master. * The node flags, indicating if the node is a slave, a master, and other single-bit node information. * A bitmap of the hash slots served by the sending node, or if the node is a slave, a bitmap of the slots served by its master. -* The sender TCP base port (that is, the port used by Redis to accept client commands, add 10000 to this to obtain the cluster port). +* The sender TCP base port (that is, the port used by Redis to accept client commands; add 10000 to this to obtain the cluster bus port). * The state of the cluster from the point of view of the sender (down or ok). * The master node ID of the sending node, if it is a slave. -Ping and pong packets also contain a gossip section. This section offers to the receiver a view of what the sender node thinks about other nodes in the cluster. The gossip section only contains information about a few random nodes among the set of known nodes of the sender. The amount of nodes mentioned in a gossip section is proportional to the cluster size. +Ping and pong packets also contain a gossip section. This section offers to the receiver a view of what the sender node thinks about other nodes in the cluster. The gossip section only contains information about a few random nodes among the set of nodes known to the sender. The number of nodes mentioned in a gossip section is proportional to the cluster size. For every node added in the gossip section the following fields are reported: @@ -717,23 +718,23 @@ Gossip sections allow receiving nodes to get information about the state of othe Failure detection --- -Redis Cluster failure detection is used to recognize when a master or slave node is no longer reachable by the majority of nodes, and as a result of this event, either promote a slave to the role of master, or when this is not possible, put the cluster in an error state to stop receiving queries from clients. +Redis Cluster failure detection is used to recognize when a master or slave node is no longer reachable by the majority of nodes and then respond by promoting a slave to the role of master. When slave promotion is not possible the cluster is put in an error state to stop receiving queries from clients. -As already mentioned, every node takes a list of flags associated with other known nodes. There are two flags that are used for failure detection that are called `PFAIL` and `FAIL`. `PFAIL` means *Possible failure*, and is a non acknowledged failure type. `FAIL` means that a node is failing and that this condition was confirmed by a majority of masters within a fixed amount of time. +As already mentioned, every node takes a list of flags associated with other known nodes. There are two flags that are used for failure detection that are called `PFAIL` and `FAIL`. `PFAIL` means *Possible failure*, and is a non-acknowledged failure type. `FAIL` means that a node is failing and that this condition was confirmed by a majority of masters within a fixed amount of time. **PFAIL flag:** A node flags another node with the `PFAIL` flag when the node is not reachable for more than `NODE_TIMEOUT` time. Both master and slave nodes can flag another node as `PFAIL`, regardless of its type. -The concept of non reachability for a Redis Cluster node is that we have an **active ping** (a ping that we sent for which we still have to get a reply) pending for more than `NODE_TIMEOUT`, so for this mechanism to work the `NODE_TIMEOUT` must be large compared to the network round trip time. In order to add reliability during normal operations, nodes will try to reconnect with other nodes in the cluster as soon as half of the `NODE_TIMEOUT` has elapsed without a reply to a ping. This mechanism ensures that connections are kept alive so broken connections should usually not result into false failure reports between nodes. +The concept of non-reachability for a Redis Cluster node is that we have an **active ping** (a ping that we sent for which we have yet to get a reply) pending for longer than `NODE_TIMEOUT`. For this mechanism to work the `NODE_TIMEOUT` must be large compared to the network round trip time. In order to add reliability during normal operations, nodes will try to reconnect with other nodes in the cluster as soon as half of the `NODE_TIMEOUT` has elapsed without a reply to a ping. This mechanism ensures that connections are kept alive so broken connections usually won't result in false failure reports between nodes. **FAIL flag:** -The `PFAIL` flag alone is just a local information every node has about other nodes, but it is not used in order to act and is not sufficient to trigger a slave promotion. For a node to be really considered down the `PFAIL` condition needs to be escalated to a `FAIL` condition. +The `PFAIL` flag alone is just local information every node has about other nodes, but it is not sufficient to trigger a slave promotion. For a node to be considered down the `PFAIL` condition needs to be escalated to a `FAIL` condition. -As outlined in the node heartbeats section of this document, every node sends gossip messages to every other node including the state of a few random known nodes. So every node eventually receives the set of node flags for every other node. This way every node has a mechanism to signal other nodes about failure conditions they detected. +As outlined in the node heartbeats section of this document, every node sends gossip messages to every other node including the state of a few random known nodes. Every node eventually receives a set of node flags for every other node. This way every node has a mechanism to signal other nodes about failure conditions they have detected. -This mechanism is used in order to escalate a `PFAIL` condition to a `FAIL` condition, when the following set of conditions are met: +A `PFAIL` condition is escalated to a `FAIL` condition when the following set of conditions are met: * Some node, that we'll call A, has another node B flagged as `PFAIL`. * Node A collected, via gossip sections, information about the state of B from the point of view of the majority of masters in the cluster. @@ -746,24 +747,24 @@ If all the above conditions are true, Node A will: The `FAIL` message will force every receiving node to mark the node in `FAIL` state, whether or not it already flagged the node in `PFAIL` state. -Note that *the FAIL flag is mostly one way*, that is, a node can go from `PFAIL` to `FAIL`, but for the `FAIL` flag to be cleared there are only two possibilities: +Note that *the FAIL flag is mostly one way*. That is, a node can go from `PFAIL` to `FAIL`, but a `FAIL` flag can only be cleared in the following situations: -* The node is already reachable and it is a slave. In this case the `FAIL` flag can be cleared as slaves are not failed over. -* The node is already reachable and it is a master not serving any slot. In this case the `FAIL` flag can be cleared as masters without slots do not really participate to the cluster, and are waiting to be configured in order to join the cluster. -* The node is already reachable, is a master, but a long time (N times the `NODE_TIMEOUT`) has elapsed without any detectable slave promotion. Better for it to rejoin the cluster and continue in this case. +* The node is already reachable and is a slave. In this case the `FAIL` flag can be cleared as slaves are not failed over. +* The node is already reachable and is a master not serving any slot. In this case the `FAIL` flag can be cleared as masters without slots do not really participate in the cluster and are waiting to be configured in order to join the cluster. +* The node is already reachable and is a master, but a long time (N times the `NODE_TIMEOUT`) has elapsed without any detectable slave promotion. It's better for it to rejoin the cluster and continue in this case. It is useful to note that while the `PFAIL` -> `FAIL` transition uses a form of agreement, the agreement used is weak: -1. Nodes collect views of other nodes during some time, so even if the majority of master nodes need to "agree", actually this is just state that we collected from different nodes at different times and we are not sure, nor we require, that at a given moment the majority of masters agreed. However we discard failure reports which are old, so the failure was signaled by the majority of masters within a window of time. +1. Nodes collect views of other nodes over some time period, so even if the majority of master nodes need to "agree", actually this is just state that we collected from different nodes at different times and we are not sure, nor we require, that at a given moment the majority of masters agreed. However we discard failure reports which are old, so the failure was signaled by the majority of masters within a window of time. 2. While every node detecting the `FAIL` condition will force that condition on other nodes in the cluster using the `FAIL` message, there is no way to ensure the message will reach all the nodes. For instance a node may detect the `FAIL` condition and because of a partition will not be able to reach any other node. -However the Redis Cluster failure detection has a liveness requirement: eventually all the nodes should agree about the state of a given node. There are two cases that can originate from split brain conditions, either some minority of nodes believe the node is in `FAIL` state, or a minority of nodes believe the node is not in `FAIL` state. In both the cases eventually the cluster will have a single view of the state of a given node: +However the Redis Cluster failure detection has a liveness requirement: eventually all the nodes should agree about the state of a given node. There are two cases that can originate from split brain conditions. Either some minority of nodes believe the node is in `FAIL` state, or a minority of nodes believe the node is not in `FAIL` state. In both the cases eventually the cluster will have a single view of the state of a given node: -**Case 1**: If an actual majority of masters flagged a node as `FAIL`, because of the failure detector and the *chain effect* it generates, every other node will flag the master as `FAIL` eventually, since in the specified window of time enough failures will be reported. +**Case 1**: If a majority of masters have flagged a node as `FAIL`, because of failure detection and the *chain effect* it generates, every other node will eventually flag the master as `FAIL`, since in the specified window of time enough failures will be reported. -**Case 2**: When only a minority of masters flagged a node as `FAIL`, the slave promotion will not happen (as it uses a more formal algorithm that makes sure everybody will know about the promotion eventually) and every node will clear the `FAIL` state for the `FAIL` state clearing rules above (no promotion after some time > of N times the `NODE_TIMEOUT`). +**Case 2**: When only a minority of masters have flagged a node as `FAIL`, the slave promotion will not happen (as it uses a more formal algorithm that makes sure everybody knows about the promotion eventually) and every node will clear the `FAIL` state as per the `FAIL` state clearing rules above (i.e. no promotion after N times the `NODE_TIMEOUT` has elapsed). -**Basically the `FAIL` flag is only used as a trigger to run the safe part of the algorithm** for the slave promotion. In theory a slave may act independently and start a slave promotion when its master is not reachable, and wait for the masters to refuse to provide the acknowledgment, if the master is actually reachable by the majority. However the added complexity of the `PFAIL -> FAIL` state, the weak agreement, and the `FAIL` message to force the propagation of the state in the shortest amount of time in the reachable part of the cluster, have practical advantages. Because of this mechanisms usually all the nodes will stop accepting writes about at the same time if the cluster is in an error condition, that is a desirable feature from the point of view of applications using Redis Cluster. Also not needed election attempts, initiated by slaves that can't reach its master for local problems (that is otherwise reachable by the majority of the other master nodes), are avoided. +**The `FAIL` flag is only used as a trigger to run the safe part of the algorithm** for the slave promotion. In theory a slave may act independently and start a slave promotion when its master is not reachable, and wait for the masters to refuse to provide the acknowledgment if the master is actually reachable by the majority. However the added complexity of the `PFAIL -> FAIL` state, the weak agreement, and the `FAIL` message forcing the propagation of the state in the shortest amount of time in the reachable part of the cluster, have practical advantages. Because of these mechanisms, usually all the nodes will stop accepting writes at about the same time if the cluster is in an error state. This is a desirable feature from the point of view of applications using Redis Cluster. Also erroneous election attempts initiated by slaves that can't reach its master due to local problems (the master is otherwise reachable by the majority of other master nodes) are avoided. Configuration handling, propagation, and failovers === @@ -771,7 +772,7 @@ Configuration handling, propagation, and failovers Cluster current epoch --- -Redis Cluster uses a concept similar to the Raft algorithm "term". In Redis Cluster the term is called epoch instead, and it is used in order to give an incremental version to events, so that when multiple nodes provide conflicting information, it is possible for another node to understand which state is the most up to date. +Redis Cluster uses a concept similar to the Raft algorithm "term". In Redis Cluster the term is called epoch instead, and it is used in order to give incremental versioning to events. When multiple nodes provide conflicting information, it becomes possible for another node to understand which state is the most up to date. The `currentEpoch` is a 64 bit unsigned number. @@ -779,11 +780,11 @@ At node creation every Redis Cluster node, both slaves and master nodes, set the Every time a packet is received from another node, if the epoch of the sender (part of the cluster bus messages header) is greater than the local node epoch, the `currentEpoch` is updated to the sender epoch. -Because of this semantics eventually all the nodes will agree to the greatest `configEpoch` in the cluster. +Because of these semantics, eventually all the nodes will agree to the greatest `configEpoch` in the cluster. This information is used when the state of the cluster is changed and a node seeks agreement in order to perform some action. -Currently this happens only during slave promotion, as described in the next section. Basically the epoch is a logical clock for the cluster and dictates whatever a given information wins over one with a smaller epoch. +Currently this happens only during slave promotion, as described in the next section. Basically the epoch is a logical clock for the cluster and dictates that given information wins over one with a smaller epoch. Configuration epoch --- @@ -793,18 +794,18 @@ Every master always advertises its `configEpoch` in ping and pong packets along The `configEpoch` is set to zero in masters when a new node is created. A new `configEpoch` is created during slave election. Slaves trying to replace -failing masters increment their epoch and try to get the authorization from +failing masters increment their epoch and try to get authorization from a majority of masters. When a slave is authorized, a new unique `configEpoch` -is created, the slave turns into a master using the new `configEpoch`. +is created and the slave turns into a master using the new `configEpoch`. -As explained in the next sections the `configEpoch` helps to resolve conflicts due to different nodes claiming diverging configurations (a condition that may happen because of network partitions and node failures). +As explained in the next sections the `configEpoch` helps to resolve conflicts when different nodes claim divergent configurations (a condition that may happen because of network partitions and node failures). -Slave nodes also advertise the `configEpoch` field in ping and pong packets, but in case of slaves the field represents the `configEpoch` of its master the last time they exchanged packets. This allows other instances to detect when a slave has an old configuration that needs to be updated (Master nodes will not grant votes to slaves with an old configuration). +Slave nodes also advertise the `configEpoch` field in ping and pong packets, but in the case of slaves the field represents the `configEpoch` of its master as of the last time they exchanged packets. This allows other instances to detect when a slave has an old configuration that needs to be updated (master nodes will not grant votes to slaves with an old configuration). -Every time the `configEpoch` changes for some known node, it is permanently stored in the nodes.conf file by all the nodes that received this information. The same also happens for the `currentEpoch` value. This two variables are guaranteed to be saved and `fsync-ed` to disk when updated before a node continues its operations. +Every time the `configEpoch` changes for some known node, it is permanently stored in the nodes.conf file by all the nodes that receive this information. The same also happens for the `currentEpoch` value. These two variables are guaranteed to be saved and `fsync-ed` to disk when updated before a node continues its operations. -New, incremental, and guaranteed to be unique `configEpoch` values are generated -using a simple algorithm during failovers. +The `configEpoch` values generated using a simple algorithm during failovers +are guaranteed to be new, incremental, and unique. Slave election and promotion --- @@ -812,21 +813,21 @@ Slave election and promotion Slave election and promotion is handled by slave nodes, with the help of master nodes that vote for the slave to promote. A slave election happens when a master is in `FAIL` state from the point of view of at least one of its slaves that has the prerequisites in order to become a master. -In order for a slave to promote itself to master, it requires to start an election and win it. All the slaves for a given master can start an election if the master is in `FAIL` state, however only one slave will win the election and promote itself to master. +In order for a slave to promote itself to master, it needs to start an election and win it. All the slaves for a given master can start an election if the master is in `FAIL` state, however only one slave will win the election and promote itself to master. A slave starts an election when the following conditions are met: * The slave's master is in `FAIL` state. * The master was serving a non-zero number of slots. -* The slave replication link was disconnected from the master for no longer than a given amount of time, in order to ensure to promote a slave with a reasonable data freshness. This time is user configurable. +* The slave replication link was disconnected from the master for no longer than a given amount of time, in order to ensure the promoted slave's data is reasonably fresh. This time is user configurable. In order to be elected, the first step for a slave is to increment its `currentEpoch` counter, and request votes from master instances. -Votes are requested by the slave by broadcasting a `FAILOVER_AUTH_REQUEST` packet to every master node of the cluster. Then it waits for replies to arrive for a maximum time of two times the `NODE_TIMEOUT`, but always for at least 2 seconds. +Votes are requested by the slave by broadcasting a `FAILOVER_AUTH_REQUEST` packet to every master node of the cluster. Then it waits for a maximum time of two times the `NODE_TIMEOUT` for replies to arrive (but always for at least 2 seconds). -Once a master voted for a given slave, replying positively with a `FAILOVER_AUTH_ACK`, it can no longer vote for another slave of the same master for a period of `NODE_TIMEOUT * 2`. In this period it will not be able to reply to other authorization requests for the same master. This is not needed to guarantee safety, but useful to avoid multiple slaves to get elected (even if with a different `configEpoch`) about at the same time, which is usually not wanted. +Once a master has voted for a given slave, replying positively with a `FAILOVER_AUTH_ACK`, it can no longer vote for another slave of the same master for a period of `NODE_TIMEOUT * 2`. In this period it will not be able to reply to other authorization requests for the same master. This is not needed to guarantee safety, but useful for preventing multiple slaves from getting elected (even if with a different `configEpoch`) at around the same time, which is usually not wanted. -A slave discards all the `AUTH_ACK` replies that are received having an epoch that is less than the `currentEpoch` at the time the vote request was sent, in order to never count as valid votes that are about a previous election. +A slave discards any `AUTH_ACK` replies with an epoch that is less than the `currentEpoch` at the time the vote request was sent. This ensures it doesn't count votes intended for a previous election. Once the slave receives ACKs from the majority of masters, it wins the election. Otherwise if the majority is not reached within the period of two times `NODE_TIMEOUT` (but always at least 2 seconds), the election is aborted and a new one will be tried again after `NODE_TIMEOUT * 4` (and always at least 4 seconds). @@ -834,33 +835,33 @@ Otherwise if the majority is not reached within the period of two times `NODE_TI Slave rank --- -A slave does not try to get elected as soon as the master is in `FAIL` state, but there is a little delay, that is computed as: +As soon as a master is in `FAIL` state, a slave waits a short period of time before trying to get elected. That delay is computed as follows: DELAY = 500 milliseconds + random delay between 0 and 500 milliseconds + SLAVE_RANK * 1000 milliseconds. -The fixed delay ensures that we wait for the `FAIL` state to propagate across the cluster, otherwise the slave may try to get elected when the masters are still not aware of the `FAIL` state, refusing to grant their vote. +The fixed delay ensures that we wait for the `FAIL` state to propagate across the cluster, otherwise the slave may try to get elected while the masters are still unaware of the `FAIL` state, refusing to grant their vote. -The random delay is used to desynchronize slaves so they'll likely start an election in different moments. +The random delay is used to desynchronize slaves so they're unlikely to start an election at the same time. -The `SLAVE_RANK` is the rank of this slave regarding the amount of replication -stream it processed from the master. Slaves exchange messages when the master -is failing in order to establish a (best effort) rank: the slave with the most -updated replication offset is at rank 0, the second most updated at rank 1, and so forth. In this way the most updated slaves try to get elected before others. +The `SLAVE_RANK` is the rank of this slave regarding the amount of data +replication it has processed from the master. Slaves exchange messages when +the master is failing in order to establish a (best effort) rank: the slave +with the most updated replication offset is at rank 0, the second most updated at rank 1, and so forth. In this way the most updated slaves try to get elected before others. -However if a slave of higher rank fails to be elected, the others will try -shortly, so the order is not enforced in a strict way. +Rank order is not strictly enforced; if a slave of higher rank fails to be +elected, the others will try shortly. -Once a slave wins the election, it obtains a new unique and incremental `configEpoch` which is higher than any other existing master. It starts advertising itself as master in ping and pong packets, providing the set of served slots with a `configEpoch` that will win over the past ones. +Once a slave wins the election, it obtains a new unique and incremental `configEpoch` which is higher than that of any other existing master. It starts advertising itself as master in ping and pong packets, providing the set of served slots with a `configEpoch` that will win over the past ones. -In order to speedup the reconfiguration of other nodes, a pong packet is broadcasted to all the nodes of the cluster (however nodes not currently reachable will eventually receive a ping or pong packet and will be reconfigured, or will receive an `UPDATE` packet from another node, if the information it publishes via heartbeat packets are detected to be out of date). +In order to speedup the reconfiguration of other nodes, a pong packet is broadcasted to all the nodes of the cluster. Currently unreachable nodes will eventually be reconfigured when they receive a ping or pong packet from another node or will receive an `UPDATE` packet from another node if the information it publishes via heartbeat packets are detected to be out of date. -The other nodes will detect that there is a new master serving the same slots served by the old master but with a greater `configEpoch`, and will upgrade the configuration. Slaves of the old master, or the failed over master that rejoins the cluster, will not just upgrade the configuration but will also configure to replicate from the new master. How nodes rejoining the cluster are configured is explained in one of the next sections. +The other nodes will detect that there is a new master serving the same slots served by the old master but with a greater `configEpoch`, and will upgrade their configuration. Slaves of the old master (or the failed over master if it rejoins the cluster) will not just upgrade the configuration but will also reconfigure to replicate from the new master. How nodes rejoining the cluster are configured is explained in the next sections. Masters reply to slave vote request --- -In the previous section it was discussed how slaves try to get elected, this section explains what happens from the point of view of a master that is requested to vote for a given slave. +In the previous section it was discussed how slaves try to get elected. This section explains what happens from the point of view of a master that is requested to vote for a given slave. Masters receive requests for votes in form of `FAILOVER_AUTH_REQUEST` requests from slaves. @@ -868,7 +869,7 @@ For a vote to be granted the following conditions need to be met: 1. A master only votes a single time for a given epoch, and refuses to vote for older epochs: every master has a lastVoteEpoch field and will refuse to vote again as long as the `currentEpoch` in the auth request packet is not greater than the lastVoteEpoch. When a master replies positively to a vote request, the lastVoteEpoch is updated accordingly, and safely stored on disk. 2. A master votes for a slave only if the slave's master is flagged as `FAIL`. -3. Auth requests with a `currentEpoch` that is less than the master `currentEpoch` are ignored. Because of this the Master reply will always have the same `currentEpoch` as the auth request. If the same slave asks again to be voted, incrementing the `currentEpoch`, it is guaranteed that an old delayed reply from the master can not be accepted for the new vote. +3. Auth requests with a `currentEpoch` that is less than the master `currentEpoch` are ignored. Because of this the master reply will always have the same `currentEpoch` as the auth request. If the same slave asks again to be voted, incrementing the `currentEpoch`, it is guaranteed that an old delayed reply from the master can not be accepted for the new vote. Example of the issue caused by not using rule number 3: @@ -878,15 +879,15 @@ Master `currentEpoch` is 5, lastVoteEpoch is 1 (this may happen after a few fail * Slave tries to be elected with epoch 4 (3+1), master replies with an ok with `currentEpoch` 5, however the reply is delayed. * Slave will try to be elected again, at a later time, with epoch 5 (4+1), the delayed reply reaches the slave with `currentEpoch` 5, and is accepted as valid. -4. Masters don't vote a slave of the same master before `NODE_TIMEOUT * 2` has elapsed since a slave of that master was already voted. This is not strictly required as it is not possible that two slaves win the election in the same epoch, but in practical terms it ensures that normally when a slave is elected it has plenty of time to inform the other slaves avoiding that another slave will win a new election, doing a new unwanted failover. -5. Masters don't try to select the best slave in any way, simply if the slave's master is in `FAIL` state and the master did not voted in the current term, the positive vote is granted. However the best slave is the most likely to start the election and win it before the other slaves, since it usually will be able to start the voting process earlier, because of its *higher rank* as explained in the previous section. +4. Masters don't vote for a slave of the same master before `NODE_TIMEOUT * 2` has elapsed if a slave of that master was already voted for. This is not strictly required as it is not possible for two slaves to win the election in the same epoch. However, in practical terms it ensures that when a slave is elected it has plenty of time to inform the other slaves and avoid the possibility that another slave will win a new election, performing an unnecessary second failover. +5. Masters make no effort to select the best slave in any way. If the slave's master is in `FAIL` state and the master did not vote in the current term, a positive vote is granted. The best slave is the most likely to start an election and win it before the other slaves, since it will usually be able to start the voting process earlier because of its *higher rank* as explained in the previous section. 6. When a master refuses to vote for a given slave there is no negative response, the request is simply ignored. -7. Masters don't grant the vote to slaves sending a `configEpoch` that is less than any `configEpoch` in the master table for the slots claimed by the slave. Remember that the slave sends the `configEpoch` of its master, and the bitmap of the slots served by its master. What this means is basically that the slave requesting the vote must have a configuration, for the slots it wants to failover, that is newer or equal the one of the master granting the vote. +7. Masters don't vote for slaves sending a `configEpoch` that is less than any `configEpoch` in the master table for the slots claimed by the slave. Remember that the slave sends the `configEpoch` of its master, and the bitmap of the slots served by its master. This means that the slave requesting the vote must have a configuration for the slots it wants to failover that is newer or equal the one of the master granting the vote. Practical example of configuration epoch usefulness during partitions --- -This section illustrates how the concept of epoch is used to make the slave promotion process more resistant to partitions. +This section illustrates how the epoch concept is used to make the slave promotion process more resistant to partitions. * A master is no longer reachable indefinitely. The master has three slaves A, B, C. * Slave A wins the election and is promoted to master. @@ -895,16 +896,16 @@ This section illustrates how the concept of epoch is used to make the slave prom * A partition makes B not available for the majority of the cluster. * The previous partition is fixed, and A is available again. -At this point B is down, and A is available again, having a role of master (actually `UPDATE` messages would reconfigure it promptly, but here we assume all get lost). At the same time, slave C will try to get elected in order to fail over B. This is what happens: +At this point B is down and A is available again with a role of master (actually `UPDATE` messages would reconfigure it promptly, but here we assume all `UPDATE` messages were lost). At the same time, slave C will try to get elected in order to fail over B. This is what happens: 1. B will try to get elected and will succeed, since for the majority of masters its master is actually down. It will obtain a new incremental `configEpoch`. -2. A will not be able to claim to be the master for its hash slots, because the other nodes already have the same hash slots associated with an higher configuration epoch (the one of B) compared to the one published by A. +2. A will not be able to claim to be the master for its hash slots, because the other nodes already have the same hash slots associated with a higher configuration epoch (the one of B) compared to the one published by A. 3. So, all the nodes will upgrade their table to assign the hash slots to C, and the cluster will continue its operations. -As you'll see in the next sections, actually a stale node rejoining a cluster -will usually get notified as soon as possible about the configuration change, since as soon -as it pings any other node, the receiver will detect it has stale information -and will send an `UPDATE` message. +As you'll see in the next sections, a stale node rejoining a cluster +will usually get notified as soon as possible about the configuration change +because as soon as it pings any other node, the receiver will detect it +has stale information and will send an `UPDATE` message. Hash slots configuration propagation --- @@ -916,11 +917,11 @@ time to rejoin the cluster in a sensible way. There are two ways hash slot configurations are propagated: -1. Heartbeat messages. The sender of a ping or pong packet always adds information about the set of hash slots it (or its master, if it is a slave) servers. -2. `UPDATE` messages. Since in every heartbeat packet there is information about the sender `configEpoch` and set of hash slots served, if a receiver of an heartbeat packet will find the sender information not updated, it will send a packet with the new information, forcing the stale node to update its info. +1. Heartbeat messages. The sender of a ping or pong packet always adds information about the set of hash slots it (or its master, if it is a slave) serves. +2. `UPDATE` messages. Since in every heartbeat packet there is information about the sender `configEpoch` and set of hash slots served, if a receiver of a heartbeat packet finds the sender information is stale, it will send a packet with new information, forcing the stale node to update its info. -The receiver of an heartbeat or `UPDATE` message uses certain simple rules in -order to update its table mapping hash slots to nodes. When a new Redis Cluster node is created, its local hash slot table is simple initialized to `NULL` entries, so that each hash slot is not bound, not linked to any node. Something like the following: +The receiver of a heartbeat or `UPDATE` message uses certain simple rules in +order to update its table mapping hash slots to nodes. When a new Redis Cluster node is created, its local hash slot table is simply initialized to `NULL` entries so that each hash slot is not bound or linked to any node. This looks similar to the following: ``` 0 -> NULL @@ -932,9 +933,9 @@ order to update its table mapping hash slots to nodes. When a new Redis Cluster The first rule followed by a node in order to update its hash slot table is the following: -**Rule 1**: If a hash slot is unassigned (set to `NULL`), and a known node claims it, I'll modify my hash slot table associating the claimed hash slots to it. +**Rule 1**: If a hash slot is unassigned (set to `NULL`), and a known node claims it, I'll modify my hash slot table and associate the claimed hash slots to it. -So if we receive an heartbeat from node A, claiming to serve hash slots 1 and 2 with a configuration epoch value of 3, the table will be modified into: +So if we receive a heartbeat from node A claiming to serve hash slots 1 and 2 with a configuration epoch value of 3, the table will be modified to: ``` 0 -> NULL @@ -944,25 +945,25 @@ So if we receive an heartbeat from node A, claiming to serve hash slots 1 and 2 16383 -> NULL ``` -Because of this rule, when a new cluster is created, it is only needed to manually assign (using the `CLUSTER ADDSLOTS` command, via the redis-trib command line tool, or by any other mean) the slots served by each master node to the node itself, and the information will rapidly propagate across the cluster. +When a new cluster is created, a system administrator needs to manually assign (using the `CLUSTER ADDSLOTS` command, via the redis-trib command line tool, or by any other means) the slots served by each master node only to the node itself, and the information will rapidly propagate across the cluster. However this rule is not enough. We know that hash slot mapping can change -because of two events: +during two events: 1. A slave replaces its master during a failover. 2. A slot is resharded from a node to a different one. -For now let's focus on failovers. When a slave failover its master, it obtains +For now let's focus on failovers. When a slave fails over its master, it obtains a configuration epoch which is guaranteed to be greater than the one of its master (and more generally greater than any other configuration epoch -generated before). For example node B, which is a slave of A, may failover +generated previously). For example node B, which is a slave of A, may failover B with configuration epoch of 4. It will start to send heartbeat packets -(the first time mass-broadcasting cluster-wide), because of the following -second rule, receivers will update their tables: +(the first time mass-broadcasting cluster-wide) and because of the following +second rule, receivers will update their hash slot tables: **Rule 2**: If a hash slot is already assigned, and a known node is advertising it using a `configEpoch` that is greater than the `configEpoch` of the master currently associated with the slot, I'll rebind the hash slot to the new node. -So after receiving messages from B that claims to serve hash slots 1 and 2 with configuration epoch of 4, the receivers will update their table in the following way: +So after receiving messages from B that claim to serve hash slots 1 and 2 with configuration epoch of 4, the receivers will update their table in the following way: ``` 0 -> NULL @@ -972,31 +973,31 @@ So after receiving messages from B that claims to serve hash slots 1 and 2 with 16383 -> NULL ``` -Liveness property: because of the second rule eventually all the nodes in the cluster will agree that the owner of a slot is the one with the greatest `configEpoch` among the nodes advertising it. +Liveness property: because of the second rule, eventually all nodes in the cluster will agree that the owner of a slot is the one with the greatest `configEpoch` among the nodes advertising it. This mechanism in Redis Cluster is called **last failover wins**. -The same happens during reshardings. When a node importing an hash slot -ends the import operation, its configuration epoch is incremented to make -sure the information will be updated in the cluster. +The same happens during reshardings. When a node importing a hash slot +completes the import operation, its configuration epoch is incremented to make +sure the change will be propogated throughout the cluster. UPDATE messages, a closer look --- -With the previous section in mind, it is easy now to check how update messages +With the previous section in mind, it is easier to see how update messages work. Node A may rejoin the cluster after some time. It will send heartbeat packets where it claims it serves hash slots 1 and 2 with configuration epoch -of 3. All the receivers with an updated information will instead see that +of 3. All the receivers with updated information will instead see that the same hash slots are associated with node B having an higher configuration -epoch. Because of this they'll send to A an `UPDATE` message with the new +epoch. Because of this they'll send an `UPDATE` message to A with the new configuration for the slots. A will update its configuration because of the **rule 2** above. How nodes rejoin the cluster --- -The same basic mechanism is also used in order for a node to rejoin a cluster -in a proper way. Continuing with the example above, node A will be notified +The same basic mechanism is used when a node rejoins a cluster. +Continuing with the example above, node A will be notified that hash slots 1 and 2 are now served by B. Assuming that these two were the only hash slots served by A, the count of hash slots served by A will drop to 0! So A will **reconfigure to be a slave of the new master**. @@ -1006,11 +1007,11 @@ happen that A rejoins after a lot of time, in the meantime it may happen that hash slots originally served by A are served by multiple nodes, for example hash slot 1 may be served by B, and hash slot 2 by C. -So the actual *Redis Cluster node role switch rule* is: **A master node will change its configuration to replicate (be a slave of) the node that stolen its last hash slot**. +So the actual *Redis Cluster node role switch rule* is: **A master node will change its configuration to replicate (be a slave of) the node that stole its last hash slot**. -So during the reconfiguration eventually the number of served hash slots will drop to zero, and the node will reconfigure accordingly. Note that in the base case this just means that the old master will be a slave of the slave that replaced it after a failover. However in the general form the rule covers all the possible cases. +During reconfiguration, eventually the number of served hash slots will drop to zero, and the node will reconfigure accordingly. Note that in the base case this just means that the old master will be a slave of the slave that replaced it after a failover. However in the general form the rule covers all possible cases. -Slaves do exactly the same: they reconfigure to replicate to the node that +Slaves do exactly the same: they reconfigure to replicate the node that stole the last hash slot of its former master. Replica migration @@ -1018,19 +1019,19 @@ Replica migration Redis Cluster implements a concept called *replica migration* in order to improve the availability of the system. The idea is that in a cluster with -a master-slave setup, if the map between slaves and masters is fixed there -is limited availability over time if multiple independent failures of single +a master-slave setup, if the map between slaves and masters is fixed +availability is limited over time if multiple independent failures of single nodes happen. For example in a cluster where every master has a single slave, the cluster -can continue the operations as long the master or the slave fail, but not -if both fail the same time. However there is a class of failures, that are +can continue operations as long as either the master or the slave fail, but not +if both fail the same time. However there is a class of failures that are the independent failures of single nodes caused by hardware or software issues that can accumulate over time. For example: * Master A has a single slave A1. * Master A fails. A1 is promoted as new master. -* Three hours later A1 fails in an independent manner (not related to the failure of A). No other slave is available for promotion since also node A is still down. The cluster cannot continue normal operations. +* Three hours later A1 fails in an independent manner (unrelated to the failure of A). No other slave is available for promotion since node A is still down. The cluster cannot continue normal operations. If the map between masters and slaves is fixed, the only way to make the cluster more resistant to the above scenario is to add slaves to every master, however @@ -1056,57 +1057,57 @@ following: Replica migration algorithm --- -The migration algorithm does not use any form of agreement, since the slaves -layout in a Redis Cluster is not part of the cluster configuration that requires +The migration algorithm does not use any form of agreement since the slave +layout in a Redis Cluster is not part of the cluster configuration that needs to be consistent and/or versioned with config epochs. Instead it uses an algorithm to avoid mass-migration of slaves when a master is not backed. -The algorithm guarantees that eventually, once the cluster configuration is -stable, every master will be backed by at least one slave. +The algorithm guarantees that eventually (once the cluster configuration is +stable) every master will be backed by at least one slave. This is how the algorithm works. To start we need to define what is a -*good slave* in this context: a good slave is a slave not in FAIL state +*good slave* in this context: a good slave is a slave not in `FAIL` state from the point of view of a given node. The execution of the algorithm is triggered in every slave that detects that there is at least a single master without good slaves. However among all the slaves detecting this condition, only a subset should act. This subset is actually often a single slave unless different slaves have in a given moment -a slightly different vision of the failure state of other nodes. +a slightly different view of the failure state of other nodes. -The *acting slave* is the slave among the masters having the maximum number +The *acting slave* is the slave among the masters with the maximum number of attached slaves, that is not in FAIL state and has the smallest node ID. So for example if there are 10 masters with 1 slave each, and 2 masters with -5 slaves each, the slave that will try to migrate is, among the 2 masters -having 5 slaves, the one with the lowest node ID. Given that no agreement +5 slaves each, the slave that will try to migrate is - among the 2 masters +having 5 slaves - the one with the lowest node ID. Given that no agreement is used, it is possible that when the cluster configuration is not stable, -a race condition occurs where multiple slaves think to be the non-failing -slave with the lower node ID (but it is a hard to trigger condition in -practice). If this happens, the result is multiple slaves migrating to the -same master, which is harmless. If the race happens in a way that will left +a race condition occurs where multiple slaves believe themselves to be +the non-failing slave with the lower node ID (it is unlikely for this to happen +in practice). If this happens, the result is multiple slaves migrating to the +same master, which is harmless. If the race happens in a way that will leave the ceding master without slaves, as soon as the cluster is stable again -the algorithm will be re-executed again and will migrate the slave back to +the algorithm will be re-executed again and will migrate a slave back to the original master. -Eventually every master will be backed by at least one slave, however -normally the behavior is that a single slave migrates from a master with +Eventually every master will be backed by at least one slave. However, +the normal behavior is that a single slave migrates from a master with multiple slaves to an orphaned master. The algorithm is controlled by a user-configurable parameter called -`cluster-migration-barrier`, that is the number of good slaves a master -will be left with for a slave to migrate. So for example if this parameter -is set to 2, a slave will try to migrate only if its master remains with -two working slaves. +`cluster-migration-barrier`: the number of good slaves a master +must be left with before a slave can migrate away. For example, if this +parameter is set to 2, a slave can try to migrate only if its master remains +with two working slaves. configEpoch conflicts resolution algorithm --- -When new `configEpoch` values are created via slave promotions during +When new `configEpoch` values are created via slave promotion during failovers, they are guaranteed to be unique. However there are two distinct events where new configEpoch values are created in an unsafe way, just incrementing the local `currentEpoch` of -the local node, hoping there are no conflicts at the same time. +the local node and hoping there are no conflicts at the same time. Both the events are system-administrator triggered: 1. `CLUSTER FAILOVER` command with `TAKEOVER` option is able to manually promote a slave node into a master *without the majority of masters being available*. This is useful, for example, in multi data center setups. @@ -1116,35 +1117,35 @@ Specifically, during manual reshardings, when a hash slot is migrated from a node A to a node B, the resharding program will force B to upgrade its configuration to an epoch which is the greatest found in the cluster, plus 1 (unless the node is already the one with the greatest configuration -epoch), without to require for an agreement from other nodes. -Usually a real world resharding involves moving several hundred hash slots, -especially in small clusters, so to require an agreement to generate new +epoch), without requiring agreement from other nodes. +Usually a real world resharding involves moving several hundred hash slots +(especially in small clusters). Requiring an agreement to generate new configuration epochs during reshardings, for each hash slot moved, is -inefficient. Moreover it requires an fsync every time in all the cluster nodes -in order to store the new configuration. Because of the way it is performed -instead, we need a new config epoch only when the first hash slot is moved -usually, making it much more efficient in production environments. +inefficient. Moreover it requires an fsync in each of the cluster nodes +every time in order to store the new configuration. Because of the way it is +performed instead, we only need a new config epoch when the first hash slot is moved, +making it much more efficient in production environments. -However because of the two cases above, it is possible, while unlikely, to end -with multiple nodes having the same configuration epoch (think for example -a resharding operation performed by the system administrator, and a failover -happening at the same time, plus a lot of bad luck so that the `currentEpoch` -is not propagated fast enough to avoid a collision). +However because of the two cases above, it is possible (though unlikely) to end +with multiple nodes having the same configuration epoch. A resharding operation +performed by the system administrator, and a failover happening at the same +time (plus a lot of bad luck) could cause `currentEpoch` collisions if +they are not propagated fast enough. -Moreover software bugs and filesystem corruptions are other causes that may -lead to multiple nodes to have the same configuration epoch. +Moreover, software bugs and filesystem corruptions can also contribute +to multiple nodes having the same configuration epoch. When masters serving different hash slots have the same `configEpoch`, there -are no issues, and we are more interested in making sure slaves -failing over a master have a different and unique configuration epoch. +are no issues. It is more important that slaves failing over a master have +unique configuration epochs. -However manual interventions or more reshardings may change the cluster +That said, manual interventions or reshardings may change the cluster configuration in different ways. The Redis Cluster main liveness property -is that the slot configurations always converges, so we really want under every -condition that all the master nodes have a different `configEpoch`. +requires that slot configurations always converge, so under every circumstance +we really want all the master nodes to have a different `configEpoch`. -In order to enforce this, **a conflicts resolution algorithm** is used in the -event that two nodes end with the same `configEpoch`. +In order to enforce this, **a conflict resolution algorithm** is used in the +event that two nodes end up with the same `configEpoch`. * IF a master node detects another master node is advertising itself with the same `configEpoch`. @@ -1154,12 +1155,12 @@ the same `configEpoch`. If there are any set of nodes with the same `configEpoch`, all the nodes but the one with the greatest Node ID will move forward, guaranteeing that, eventually, every node will pick a unique configEpoch regardless of what happened. This mechanism also guarantees that after a fresh cluster is created, all -nodes start with a different `configEpoch`, even if this is not actually -used since `redis-trib` makes sure to use `CONFIG SET-CONFIG-EPOCH` at startup. +nodes start with a different `configEpoch` (even if this is not actually +used) since `redis-trib` makes sure to use `CONFIG SET-CONFIG-EPOCH` at startup. However if for some reason a node is left misconfigured, it will update its configuration to a different configuration epoch automatically. -Nodes reset +Node resets --- Nodes can be software reset (without restarting them) in order to be reused @@ -1174,10 +1175,10 @@ command is provided in two variants: * `CLUSTER RESET SOFT` * `CLUSTER RESET HARD` -The command must be sent directly to the node to reset, and the default reset -type if no explicit type is provided is to perform a soft reset. +The command must be sent directly to the node to reset. If no reset type is +provided, a soft reset is performed. -The following is a list of operations performed by reset: +The following is a list of operations performed by a reset: 1. Soft and hard reset: If the node is a slave, it is turned into a master, and its dataset is discarded. If the node is a master and contains keys the reset operation is aborted. 2. Soft and hard reset: All the slots are released, and the manual failover state is reset. @@ -1185,26 +1186,26 @@ The following is a list of operations performed by reset: 4. Hard reset only: `currentEpoch`, `configEpoch`, and `lastVoteEpoch` are set to 0. 5. Hard reset only: the Node ID is changed to a new random ID. -Master nodes with non-empty data sets can't be reset (since normally you want to reshard data to the other nodes), however in special conditions when this is appropriate, like when a cluster is totally destroyed in order to create a new one, `FLUSHALL` must be executed before to proceed with the reset. +Master nodes with non-empty data sets can't be reset (since normally you want to reshard data to the other nodes). However, under special conditions when this is appropriate (e.g. when a cluster is totally destroyed with the intent of creating a new one), `FLUSHALL` must be executed before proceeding with the reset. Removing nodes from a cluster --- It is possible to practically remove a node from an existing cluster by -resharding all its data to other nodes (if it is a master node) and finally -by shutting it down, however the other nodes will still remember its node +resharding all its data to other nodes (if it is a master node) and +shutting it down. However, the other nodes will still remember its node ID and address, and will attempt to connect with it. -For this reason when a node is removed, we want to also remove its entry +For this reason, when a node is removed we want to also remove its entry from all the other nodes tables. This is accomplished by using the `CLUSTER FORGET ` command. The command does two things: 1. It removes the node with the specified node ID from the nodes table. -2. It sets a 60 seconds ban to prevent a node with the same node ID to be re-added. +2. It sets a 60 second ban which prevents a node with the same node ID from being re-added. -The second operation is needed because Redis Cluster uses gossip in order to auto-discover nodes, so removing the node X from node A, could result into node B to gossip node X to A again. Because of the 60 seconds ban, the Redis Cluster administration tools have 60 seconds in order to remove the node from all the nodes, preventing the re-addition of the node because of auto discovery. +The second operation is needed because Redis Cluster uses gossip in order to auto-discover nodes, so removing the node X from node A, could result in node B gossiping about node X to A again. Because of the 60 second ban, the Redis Cluster administration tools have 60 seconds in order to remove the node from all the nodes, preventing the re-addition of the node due to auto discovery. Further information is available in the `CLUSTER FORGET` documentation. @@ -1212,12 +1213,12 @@ Publish/Subscribe === In a Redis Cluster clients can subscribe to every node, and can also -publish to every other node. The cluster will make sure that publish +publish to every other node. The cluster will make sure that published messages are forwarded as needed. -The current implementation will simply broadcast all the publish messages -to all the other nodes, but at some point this will be optimized either -using bloom filters or other algorithms. +The current implementation will simply broadcast each published message +to all other nodes, but at some point this will be optimized either +using Bloom filters or other algorithms. Appendix === From 94ddb91ac161b7379e0b6930bc253246f3ce27d5 Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Sun, 12 Jul 2015 23:51:07 +0200 Subject: [PATCH 0352/2314] A few more fixes --- topics/cluster-spec.md | 69 ++++++++++++++++++++---------------------- 1 file changed, 33 insertions(+), 36 deletions(-) diff --git a/topics/cluster-spec.md b/topics/cluster-spec.md index 3bce2611a6..172f078d9a 100644 --- a/topics/cluster-spec.md +++ b/topics/cluster-spec.md @@ -16,7 +16,7 @@ Redis Cluster is a distributed implementation of Redis with the following goals, * High performance and linear scalability up to 1000 nodes. There are no proxies, asynchronous replication is used, and no merge operations are performed on values. * Acceptable degree of write safety: the system tries (in a best-effort way) to retain all the writes originating from clients connected with the majority of the master nodes. Usually there are small windows where acknowledged writes can be lost. Windows to lose acknowledged writes are larger when clients are in a minority partition. -* Availability: Redis Cluster is able to survive to partitions where the majority of the master nodes are reachable and there is at least a reachable slave for every master node that is no longer reachable. Moreover using *replicas migration*, masters no longer replicated by any slave will receive one from a master which is covered by multiple slaves. +* Availability: Redis Cluster is able to survive partitions where the majority of the master nodes are reachable and there is at least one reachable slave for every master node that is no longer reachable. Moreover using *replicas migration*, masters no longer replicated by any slave will receive one from a master which is covered by multiple slaves. What is described in this document is implemented in Redis 3.0 or greater. @@ -69,8 +69,8 @@ Write safety Redis Cluster uses asynchronous replication between nodes, and **last failover wins** implicit merge function. This means that the last elected master dataset eventually replaces all the other replicas. There is always a window of time when it is possible to lose writes during partitions. However these windows are very different in the case of a client that is connected to the majority of masters, and a client that is connected to the minority of masters. -Redis Cluster tries harder to retain writes that are performed by clients connected to the majority of masters, compared to writes performed into the minority -side. The following are examples of scenarios that lead to loss of acknowledged +Redis Cluster tries harder to retain writes that are performed by clients connected to the majority of masters, compared to writes performed in the minority side. +The following are examples of scenarios that lead to loss of acknowledged writes received in the majority partitions during failures: 1. A write may reach a master, but while the master may be able to reply to the client, the write may not be propagated to slaves via the asynchronous replication used between master and slave nodes. If the master dies without the write reaching the slaves, the write is lost forever if the master is unreachable for a long enough period that one of its slaves is promoted. This is usually hard to observe in the case of a total, sudden failure of a master node since masters try to reply to clients (with the acknowledge of the write) and slaves (propagating the write) at about the same time. However it is a real world failure mode. @@ -114,7 +114,7 @@ Eventually clients obtain an up-to-date representation of the cluster and which Because of the use of asynchronous replication, nodes do not wait for other nodes' acknowledgment of writes (if not explicitly requested using the `WAIT` command). -Also, because multiple-key commands are only limited to *near* keys, data is never moved between nodes except when resharding. +Also, because multi-key commands are only limited to *near* keys, data is never moved between nodes except when resharding. Normal operations are handled exactly as in the case of a single Redis instance. This means that in a Redis Cluster with N master nodes you can expect the same performance as a single Redis instance multiplied by N as the design scales linearly. At the same time the query is usually performed in a single round trip, since clients usually retain persistent connections with the nodes, so latency figures are also the same as the single standalone Redis node case. @@ -144,13 +144,11 @@ for the cluster size of 16384 master nodes (however the suggested max size of nodes is in the order of ~ 1000 nodes). Each master node in a cluster handles a subset of the 16384 hash slots. -The cluster is **stable** when there is no cluster reconfiguration in -progress (i.e. where hash slots are being moved from one node -to another). When the cluster is stable, a single hash slot will be -served by a single node (however the serving node can have one or more -slaves that will replace it in the case of net splits or failures, -and that can be used in order to scale read operations where -reading stale data is acceptable). +The cluster is **stable** when there is no cluster reconfiguration in +progress (i.e. where hash slots are being moved from one node to another). +When the cluster is stable, a single hash slot will be served by a single node +(however the serving node can have one or more slaves that will replace it in the case of net splits or failures, +and that can be used in order to scale read operations where reading stale data is acceptable). The base algorithm used to map keys to hash slots is the following (read the next paragraph for the hash tag exception to this rule): @@ -268,11 +266,11 @@ cluster configuration detail of this specific node, and is eventually consistent across the cluster. Some other information, like the last time a node was pinged, is instead local to each node. -Every node maintains the following information about other nodes that it is -aware of in the cluster: The node ID, IP and port of the node, a set of -flags, what is the master of the node if it is flagged as `slave`, last time -the node was pinged and the last time the pong was received, the current -*configuration epoch* of the node (explained later in this specification), +Every node maintains the following information about other nodes that it is +aware of in the cluster: The node ID, IP and port of the node, a set of +flags, what is the master of the node if it is flagged as `slave`, last time +the node was pinged and the last time the pong was received, the current +*configuration epoch* of the node (explained later in this specification), the link state and finally the set of hash slots served. A detailed [explanation of all the node fields](http://redis.io/commands/cluster-nodes) is described in the `CLUSTER NODES` documentation. @@ -379,7 +377,7 @@ between hash slots and Redis nodes identified by IP:port pairs. The client is not required to, but should try to memorize that hash slot 3999 is served by 127.0.0.1:6381. This way once a new command needs to -be issued it can compute the hash slot of the target key and have a +be issued it can compute the hash slot of the target key and have a greater chance of choosing the right node. An alternative is to just refresh the whole client-side cluster layout @@ -493,7 +491,7 @@ dataset. From the point of view of an external client a key exists either in A or B at any given time. In Redis Cluster there is no need to specify a database other than 0, but -`MIGRATE` is a general command that can be used for other tasks not +`MIGRATE` is a general command that can be used for other tasks not involving Redis Cluster. `MIGRATE` is optimized to be as fast as possible even when moving complex keys such as long lists, but in Redis Cluster reconfiguring the @@ -527,8 +525,7 @@ ASKING command before sending the query. Basically the ASKING command sets a one-time flag on the client that forces a node to serve a query about an IMPORTING slot. -The full semantics of ASK redirection from the -point of view of the client is as follows: +The full semantics of ASK redirection from the point of view of the client is as follows: * If ASK redirection is received, send only the query that was redirected to the specified node but continue sending subsequent queries to the old node. * Start the redirected query with the ASKING command. @@ -554,7 +551,7 @@ be redirected, such a client would be very inefficient. Redis Cluster clients should try to be smart enough to memorize the slots configuration. However this configuration is not *required* to be up to date. -Since contacting the wrong node will simply result in a redirection, that +Since contacting the wrong node will simply result in a redirection, that should trigger an update of the client view. Clients usually need to fetch a complete list of slots and mapped node @@ -564,9 +561,9 @@ addresses in two different situations: * When a `MOVED` redirection is received. Note that a client may handle the `MOVED` redirection by updating just the -moved slot in its table, however this is usually not efficient since often -the configuration of multiple slots is modified at once (for example if a -slave is promoted to master, all the slots served by the old master will +moved slot in its table, however this is usually not efficient since often +the configuration of multiple slots is modified at once (for example if a +slave is promoted to master, all the slots served by the old master will be remapped). It is much simpler to react to a `MOVED` redirection by fetching the full map of slots to nodes from scratch. @@ -624,7 +621,7 @@ again to check if the cluster is now configured properly. Multiple keys operations --- -Using hash tags, clients are free to use multiple-key operations. +Using hash tags, clients are free to use multi-key operations. For example the following operation is valid: MSET {user:1000}.name Angela {user:1000}.surname White @@ -684,10 +681,10 @@ The number of messages globally exchanged can be sizable if `NODE_TIMEOUT` is se For example in a 100 node cluster with a node timeout set to 60 seconds, every node will try to send 99 pings every 30 seconds, with a total amount of pings of 3.3 per second. Multiplied by 100 nodes, this is 330 pings per second in the total cluster. There are ways to lower the number of messages, however there have been no -reported issues with the bandwidth currently used by Redis Cluster failure +reported issues with the bandwidth currently used by Redis Cluster failure detection, so for now the obvious and direct design is used. Note that even in the above example, the 330 packets per second exchanged are evenly -divided among 100 different nodes, so the traffic each node receives +divided among 100 different nodes, so the traffic each node receives is acceptable. Heartbeat packet content @@ -844,12 +841,12 @@ The fixed delay ensures that we wait for the `FAIL` state to propagate across th The random delay is used to desynchronize slaves so they're unlikely to start an election at the same time. -The `SLAVE_RANK` is the rank of this slave regarding the amount of data -replication it has processed from the master. Slaves exchange messages when -the master is failing in order to establish a (best effort) rank: the slave -with the most updated replication offset is at rank 0, the second most updated at rank 1, and so forth. In this way the most updated slaves try to get elected before others. +The `SLAVE_RANK` is the rank of this slave regarding the amount of replication data it has processed from the master. +Slaves exchange messages when the master is failing in order to establish a (best effort) rank: +the slave with the most updated replication offset is at rank 0, the second most updated at rank 1, and so forth. +In this way the most updated slaves try to get elected before others. -Rank order is not strictly enforced; if a slave of higher rank fails to be +Rank order is not strictly enforced; if a slave of higher rank fails to be elected, the others will try shortly. Once a slave wins the election, it obtains a new unique and incremental `configEpoch` which is higher than that of any other existing master. It starts advertising itself as master in ping and pong packets, providing the set of served slots with a `configEpoch` that will win over the past ones. @@ -1081,7 +1078,7 @@ So for example if there are 10 masters with 1 slave each, and 2 masters with 5 slaves each, the slave that will try to migrate is - among the 2 masters having 5 slaves - the one with the lowest node ID. Given that no agreement is used, it is possible that when the cluster configuration is not stable, -a race condition occurs where multiple slaves believe themselves to be +a race condition occurs where multiple slaves believe themselves to be the non-failing slave with the lower node ID (it is unlikely for this to happen in practice). If this happens, the result is multiple slaves migrating to the same master, which is harmless. If the race happens in a way that will leave @@ -1095,7 +1092,7 @@ multiple slaves to an orphaned master. The algorithm is controlled by a user-configurable parameter called `cluster-migration-barrier`: the number of good slaves a master -must be left with before a slave can migrate away. For example, if this +must be left with before a slave can migrate away. For example, if this parameter is set to 2, a slave can try to migrate only if its master remains with two working slaves. @@ -1123,12 +1120,12 @@ Usually a real world resharding involves moving several hundred hash slots configuration epochs during reshardings, for each hash slot moved, is inefficient. Moreover it requires an fsync in each of the cluster nodes every time in order to store the new configuration. Because of the way it is -performed instead, we only need a new config epoch when the first hash slot is moved, +performed instead, we only need a new config epoch when the first hash slot is moved, making it much more efficient in production environments. However because of the two cases above, it is possible (though unlikely) to end with multiple nodes having the same configuration epoch. A resharding operation -performed by the system administrator, and a failover happening at the same +performed by the system administrator, and a failover happening at the same time (plus a lot of bad luck) could cause `currentEpoch` collisions if they are not propagated fast enough. From def67b170d2f43875e0da31b347358b5eb429078 Mon Sep 17 00:00:00 2001 From: Jungtaek Lim Date: Fri, 12 Jun 2015 07:58:25 +0900 Subject: [PATCH 0353/2314] Correct version that pipe mode was introduced --- topics/mass-insert.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/mass-insert.md b/topics/mass-insert.md index 6a7fa9c1c5..38d6abcedf 100644 --- a/topics/mass-insert.md +++ b/topics/mass-insert.md @@ -41,7 +41,7 @@ as fast as possible. In the past the way to do this was to use the However this is not a very reliable way to perform mass import because netcat does not really know when all the data was transferred and can't check for -errors. In the unstable branch of Redis at github the `redis-cli` utility +errors. In 2.6 or later versions of Redis the `redis-cli` utility supports a new mode called **pipe mode** that was designed in order to perform mass insertion. From 9027192d8ea91f300a8cef63e723c68625fdf4c4 Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Mon, 13 Jul 2015 00:04:03 +0200 Subject: [PATCH 0354/2314] =?UTF-8?q?propogated=20=E2=86=92=20propagated?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- topics/cluster-spec.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/cluster-spec.md b/topics/cluster-spec.md index 172f078d9a..71ab99cf66 100644 --- a/topics/cluster-spec.md +++ b/topics/cluster-spec.md @@ -976,7 +976,7 @@ This mechanism in Redis Cluster is called **last failover wins**. The same happens during reshardings. When a node importing a hash slot completes the import operation, its configuration epoch is incremented to make -sure the change will be propogated throughout the cluster. +sure the change will be propagated throughout the cluster. UPDATE messages, a closer look --- From e54eb7ddb931f884823a128be1391b368ef7a1f2 Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Mon, 13 Jul 2015 00:07:01 +0200 Subject: [PATCH 0355/2314] Update client URLs --- clients.json | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/clients.json b/clients.json index c825ade1f0..24b7dc93d5 100644 --- a/clients.json +++ b/clients.json @@ -395,7 +395,7 @@ { "name": "RedisServer", "language": "PHP", - "repository": "https://github.com/jamm/Memory/blob/master/lib/Jamm/Memory/RedisServer.php", + "repository": "https://github.com/e-oz/Memory/blob/master/lib/Jamm/Memory/RedisServer.php", "description": "Standalone and full-featured class for Redis in PHP", "authors": ["eugeniyoz"] }, @@ -665,7 +665,7 @@ { "name": "node_redis", "language": "Node.js", - "repository": "https://github.com/mranney/node_redis", + "repository": "https://github.com/NodeRedis/node_redis", "description": "Recommended client for node.", "authors": ["mranney"], "recommended": true, @@ -926,7 +926,7 @@ { "name": "redis", "language": "Nim", - "repository": "https://github.com/Araq/Nim", + "repository": "https://github.com/nim-lang/Nim/blob/devel/lib/pure/redis.nim", "url": "http://nim-lang.org/docs/redis.html", "description": "Redis client for Nim", "authors": [], From 9f1083f79cc2912ab402e611330ae4ff14ef82e6 Mon Sep 17 00:00:00 2001 From: Anirudh Srinivas Date: Tue, 9 Jul 2013 16:04:37 -0700 Subject: [PATCH 0356/2314] Add finagle-redis to scala clients --- clients.json | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/clients.json b/clients.json index 4a0de6e3bb..b2228ebed9 100644 --- a/clients.json +++ b/clients.json @@ -990,5 +990,13 @@ "description": "A lightweight wrapper around the C client hiredis.", "authors": ["matt_howlett"], "active": true + }, + + { + "name": "finagle-redis", + "language": "Scala", + "repository": "https://github.com/twitter/finagle", + "description": "Redis client based on Finagle", + "authors": [""] } ] From 54f44330f891fcbae4c0d7575c34171f525b82d5 Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Mon, 13 Jul 2015 00:20:08 +0200 Subject: [PATCH 0357/2314] Add full URL to finagle-redis --- clients.json | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/clients.json b/clients.json index b2228ebed9..55b82096cf 100644 --- a/clients.json +++ b/clients.json @@ -995,8 +995,7 @@ { "name": "finagle-redis", "language": "Scala", - "repository": "https://github.com/twitter/finagle", - "description": "Redis client based on Finagle", - "authors": [""] + "repository": "https://github.com/twitter/finagle/tree/develop/finagle-redis", + "description": "Redis client based on Finagle" } ] From 821de2c0cf0cc0bad243f8ff966493cdc981d491 Mon Sep 17 00:00:00 2001 From: Vitaliy Khamin Date: Tue, 13 Aug 2013 11:27:15 +0600 Subject: [PATCH 0358/2314] redisca client info added. --- clients.json | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/clients.json b/clients.json index 55b82096cf..73d3ab1bed 100644 --- a/clients.json +++ b/clients.json @@ -504,6 +504,15 @@ "authors": ["rebolek"] }, + { + "name": "redisca", + "language": "Python", + "repository": "https://github.com/khamin/redisca", + "description": "Lightweight ORM for Redis", + "authors": ["vitaliykhamin"], + "active": true + }, + { "name": "scala-redis", "language": "Scala", From 2707f7cd6caf0855be7d0f2674c3ce2733019b08 Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Mon, 13 Jul 2015 00:35:24 +0200 Subject: [PATCH 0359/2314] Updated to second version of redisca --- clients.json | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/clients.json b/clients.json index 73d3ab1bed..095c5d8927 100644 --- a/clients.json +++ b/clients.json @@ -505,12 +505,11 @@ }, { - "name": "redisca", + "name": "redisca2", "language": "Python", - "repository": "https://github.com/khamin/redisca", + "repository": "https://github.com/khamin/redisca2", "description": "Lightweight ORM for Redis", - "authors": ["vitaliykhamin"], - "active": true + "authors": ["vitaliykhamin"] }, { From 5f16df4a4307a083628187b5b0dd440aa797f409 Mon Sep 17 00:00:00 2001 From: Dirk Eddelbuettel Date: Sun, 25 Aug 2013 09:34:05 -0500 Subject: [PATCH 0360/2314] added entry for R client --- clients.json | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/clients.json b/clients.json index 095c5d8927..ce2a62123a 100644 --- a/clients.json +++ b/clients.json @@ -1006,4 +1006,15 @@ "repository": "https://github.com/twitter/finagle/tree/develop/finagle-redis", "description": "Redis client based on Finagle" } + + { + "name": "rredis", + "language": "R", + "repository": "https://github.com/bwlewis/rredis", + "url": "http://cran.r-project.org/package=rredis", + "description": "Redis client for R", + "authors": ["bwlewis"] + "active": true + } + ] From b9bb70858446155edb97361e5a949af97cb2ac0a Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Mon, 13 Jul 2015 00:38:42 +0200 Subject: [PATCH 0361/2314] Correct json --- clients.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/clients.json b/clients.json index ce2a62123a..cf046c1d63 100644 --- a/clients.json +++ b/clients.json @@ -1005,7 +1005,7 @@ "language": "Scala", "repository": "https://github.com/twitter/finagle/tree/develop/finagle-redis", "description": "Redis client based on Finagle" - } + }, { "name": "rredis", @@ -1013,7 +1013,7 @@ "repository": "https://github.com/bwlewis/rredis", "url": "http://cran.r-project.org/package=rredis", "description": "Redis client for R", - "authors": ["bwlewis"] + "authors": ["bwlewis"], "active": true } From 6fc3012d125b1657fa104fbc21f77d2a15010b60 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Filip=20Proch=C3=A1zka?= Date: Wed, 4 Sep 2013 13:54:00 +0200 Subject: [PATCH 0362/2314] Added Kdyby/Redis to PHP clients list Kdyby/Redis is a powerfull caching and session storage for Nette Framework, including metadata journals (like tags and priorities) that could be used for invalidating the data. --- clients.json | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/clients.json b/clients.json index cf046c1d63..2c75928cc0 100644 --- a/clients.json +++ b/clients.json @@ -416,6 +416,14 @@ "description": "Lightweight, standalone, unit-tested fork of Redisent which wraps phpredis for best performance if available.", "authors": ["colinmollenhour"] }, + { + "name": "Kdyby/Redis", + "language": "PHP", + "repository": "https://github.com/kdyby/redis", + "description": "Powerfull Redis storage for Nette Framework",", + "authors": ["hosiplan"] + "active": true + }, { "name": "phpish/redis", From f662d2425c2e73e8b0a6effb43359f5a19853ed7 Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Mon, 13 Jul 2015 00:43:35 +0200 Subject: [PATCH 0363/2314] Remove non-existent author and correct json --- clients.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/clients.json b/clients.json index 2c75928cc0..e24a38b381 100644 --- a/clients.json +++ b/clients.json @@ -416,12 +416,12 @@ "description": "Lightweight, standalone, unit-tested fork of Redisent which wraps phpredis for best performance if available.", "authors": ["colinmollenhour"] }, + { "name": "Kdyby/Redis", "language": "PHP", "repository": "https://github.com/kdyby/redis", - "description": "Powerfull Redis storage for Nette Framework",", - "authors": ["hosiplan"] + "description": "Powerfull Redis storage for Nette Framework", "active": true }, From 9264a33544ce9821ecbc3f0df7a4f4b61551ccbf Mon Sep 17 00:00:00 2001 From: mikeheier Date: Sun, 28 Dec 2014 00:30:21 -0600 Subject: [PATCH 0364/2314] added redis-as3 client --- clients.json | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/clients.json b/clients.json index e24a38b381..95069ff453 100644 --- a/clients.json +++ b/clients.json @@ -983,6 +983,7 @@ "description": "Thread-safe async Redis client. Offers high performance and simple api", "authors": ["bn_andrew"] }, + { "name": "redis", "language": "Dart", @@ -1023,6 +1024,14 @@ "description": "Redis client for R", "authors": ["bwlewis"], "active": true + }, + + { + "name": "Redis-AS3", + "language": "ActionScript", + "repository": "https://github.com/mikeheier/Redis-AS3", + "description": "An as3 client library for redis.", + "authors": ["mikeheier"] } ] From 0490bd7b76192de4590cee67e5b342a042b0fa9e Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Mon, 13 Jul 2015 00:49:07 +0200 Subject: [PATCH 0365/2314] Removed invalid twitter user --- clients.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clients.json b/clients.json index 95069ff453..0124f512f2 100644 --- a/clients.json +++ b/clients.json @@ -1031,7 +1031,7 @@ "language": "ActionScript", "repository": "https://github.com/mikeheier/Redis-AS3", "description": "An as3 client library for redis.", - "authors": ["mikeheier"] + "authors": [] } ] From 4aa0770b4b17d883a305a5c074f820e4f290a7c7 Mon Sep 17 00:00:00 2001 From: mmurdoch Date: Sun, 31 May 2015 10:26:38 +0100 Subject: [PATCH 0366/2314] Improved grammar in 'Distributed locks' topic --- topics/distlock.md | 64 +++++++++++++++++++++++----------------------- 1 file changed, 32 insertions(+), 32 deletions(-) diff --git a/topics/distlock.md b/topics/distlock.md index 8d1b11da6a..1a6cfca864 100644 --- a/topics/distlock.md +++ b/topics/distlock.md @@ -2,7 +2,7 @@ Distributed locks with Redis === Distributed locks are a very useful primitive in many environments where -different processes require to operate with shared resources in a mutually +different processes must operate with shared resources in a mutually exclusive way. There are a number of libraries and blog posts describing how to implement @@ -20,8 +20,8 @@ complex or alternative designs. Implementations --- -Before describing the algorithm, here there are a few links at implementations -already available, that can be used as a reference. +Before describing the algorithm, here are a few links to implementations +already available that can be used for reference. * [Redlock-rb](https://github.com/antirez/redlock-rb) (Ruby implementation). There is also a [fork of Redlock-rb](https://github.com/leandromoreira/redlock-rb) that adds a gem for easy distribution and perhaps more. * [Redlock-py](https://github.com/SPSCommerce/redlock-py) (Python implementation). @@ -29,32 +29,32 @@ already available, that can be used as a reference. * [Redsync.go](https://github.com/hjr265/redsync.go) (Go implementation). * [Redisson](https://github.com/mrniko/redisson) (Java implementation). * [Redis::DistLock](https://github.com/sbertrang/redis-distlock) (Perl implementation). -* [Redlock-cpp](https://github.com/jacket-code/redlock-cpp) (Cpp implementation). +* [Redlock-cpp](https://github.com/jacket-code/redlock-cpp) (C++ implementation). * [Redlock-cs](https://github.com/kidfashion/redlock-cs) (C#/.NET implementation). * [node-redlock](https://github.com/mike-marcacci/node-redlock) (NodeJS implementation). Includes support for lock extension. Safety and Liveness guarantees --- -We are going to model our design with just three properties, that are from our point of view the minimum guarantees needed to use distributed locks in an effective way. +We are going to model our design with just three properties that, from our point of view, are the minimum guarantees needed to use distributed locks in an effective way. 1. Safety property: Mutual exclusion. At any given moment, only one client can hold a lock. -2. Liveness property A: Deadlocks free. Eventually it is always possible to acquire a lock, even if the client that locked a resource crashed or gets partitioned. +2. Liveness property A: Deadlock free. Eventually it is always possible to acquire a lock, even if the client that locked a resource crashed or gets partitioned. 3. Liveness property B: Fault tolerance. As long as the majority of Redis nodes are up, clients are able to acquire and release locks. -Why failover based implementations are not enough +Why failover-based implementations are not enough --- -To understand what we want to improve, let’s analyze the current state of affairs with most Redis-based distributed locks libraries. +To understand what we want to improve, let’s analyze the current state of affairs with most Redis-based distributed lock libraries. -The simple way to use Redis to lock a resource is to create a key into an instance. The key is usually created with a limited time to live, using Redis expires feature, so that eventually it gets released one way or the other (property 2 in our list). When the client needs to release the resource, it deletes the key. +The simplest way to use Redis to lock a resource is to create a key in an instance. The key is usually created with a limited time to live, using the Redis expires feature, so that eventually it will get released (property 2 in our list). When the client needs to release the resource, it deletes the key. Superficially this works well, but there is a problem: this is a single point of failure in our architecture. What happens if the Redis master goes down? -Well, let’s add a slave! And use it if the master is unavailable. This is unfortunately not viable. By doing so we can’t implement our safety property of the mutual exclusion, because Redis replication is asynchronous. +Well, let’s add a slave! And use it if the master is unavailable. This is unfortunately not viable. By doing so we can’t implement our safety property of mutual exclusion, because Redis replication is asynchronous. -This is an obvious race condition with this model: +There is an obvious race condition with this model: -1. Client A acquires the lock into the master. +1. Client A acquires the lock in the master. 2. The master crashes before the write to the key is transmitted to the slave. 3. The slave gets promoted to master. 4. Client B acquires the lock to the same resource A already holds a lock for. **SAFETY VIOLATION!** @@ -72,9 +72,9 @@ To acquire the lock, the way to go is the following: SET resource_name my_random_value NX PX 30000 The command will set the key only if it does not already exist (NX option), with an expire of 30000 milliseconds (PX option). -The key is set to a value “my_random_value”. This value requires to be unique across all the clients and all the locks requests. +The key is set to a value “my_random_value”. This value must be unique across all clients and all lock requests. -Basically the random value is used in order to release the lock in a safe way, with a script that tells Redis: remove the key only if exists and the value stored at the key is exactly the one I expect to be. This is accomplished by the following Lua script: +Basically the random value is used in order to release the lock in a safe way, with a script that tells Redis: remove the key only if it exists and the value stored at the key is exactly the one I expect to be. This is accomplished by the following Lua script: if redis.call("get",KEYS[1]) == ARGV[1] then return redis.call("del",KEYS[1]) @@ -82,29 +82,29 @@ Basically the random value is used in order to release the lock in a safe way, w return 0 end -This is important in order to avoid removing a lock that was created by another client. For example a client may acquire the lock, get blocked into some operation for longer than the lock validity time (the time at which the key will expire), and later remove the lock, that was already acquired by some other client. +This is important in order to avoid removing a lock that was created by another client. For example a client may acquire the lock, get blocked in some operation for longer than the lock validity time (the time at which the key will expire), and later remove the lock, that was already acquired by some other client. Using just DEL is not safe as a client may remove the lock of another client. With the above script instead every lock is “signed” with a random string, so the lock will be removed only if it is still the one that was set by the client trying to remove it. -What this random string should be? I assume it’s 20 bytes from /dev/urandom, but you can find cheaper ways to make it unique enough for your tasks. +What should this random string be? I assume it’s 20 bytes from /dev/urandom, but you can find cheaper ways to make it unique enough for your tasks. For example a safe pick is to seed RC4 with /dev/urandom, and generate a pseudo random stream from that. A simpler solution is to use a combination of unix time with microseconds resolution, concatenating it with a client ID, it is not as safe, but probably up to the task in most environments. The time we use as the key time to live, is called the “lock validity time”. It is both the auto release time, and the time the client has in order to perform the operation required before another client may be able to acquire the lock again, without technically violating the mutual exclusion guarantee, which is only limited to a given window of time from the moment the lock is acquired. -So now we have a good way to acquire and release the lock. The system, reasoning about a non-distributed system which is composed of a single instance, always available, is safe. Let’s extend the concept to a distributed system where we don’t have such guarantees. +So now we have a good way to acquire and release the lock. The system, reasoning about a non-distributed system composed of a single, always available, instance, is safe. Let’s extend the concept to a distributed system where we don’t have such guarantees. The Redlock algorithm --- -In the distributed version of the algorithm we assume to have N Redis masters. Those nodes are totally independent, so we don’t use replication or any other implicit coordination system. We already described how to acquire and release the lock safely in a single instance. We give for granted that the algorithm will use this method to acquire and release the lock in a single instance. In our examples we set N=5, which is a reasonable value, so we need to run 5 Redis masters in different computers or virtual machines in order to ensure that they’ll fail in a mostly independent way. +In the distributed version of the algorithm we assume we have N Redis masters. Those nodes are totally independent, so we don’t use replication or any other implicit coordination system. We already described how to acquire and release the lock safely in a single instance. We take for granted that the algorithm will use this method to acquire and release the lock in a single instance. In our examples we set N=5, which is a reasonable value, so we need to run 5 Redis masters on different computers or virtual machines in order to ensure that they’ll fail in a mostly independent way. In order to acquire the lock, the client performs the following operations: 1. It gets the current time in milliseconds. -2. It tries to acquire the lock in all the N instances sequentially, using the same key name and random value in all the instances. During the step 2, when setting the lock in each instance, the client uses a timeout which is small compared to the total lock auto-release time in order to acquire it. For example if the auto-release time is 10 seconds, the timeout could be in the ~ 5-50 milliseconds range. This prevents the client to remain blocked for a long time trying to talk with a Redis node which is down: if an instance is not available, we should try to talk with the next instance ASAP. -3. The client computes how much time elapsed in order to acquire the lock, by subtracting to the current time the timestamp obtained in step 1. If and only if the client was able to acquire the lock in the majority of the instances (at least 3), and the total time elapsed to acquire the lock is less than lock validity time, the lock is considered to be acquired. +2. It tries to acquire the lock in all the N instances sequentially, using the same key name and random value in all the instances. During step 2, when setting the lock in each instance, the client uses a timeout which is small compared to the total lock auto-release time in order to acquire it. For example if the auto-release time is 10 seconds, the timeout could be in the ~ 5-50 milliseconds range. This prevents the client from remaining blocked for a long time trying to talk with a Redis node which is down: if an instance is not available, we should try to talk with the next instance ASAP. +3. The client computes how much time elapsed in order to acquire the lock, by subtracting from the current time the timestamp obtained in step 1. If and only if the client was able to acquire the lock in the majority of the instances (at least 3), and the total time elapsed to acquire the lock is less than lock validity time, the lock is considered to be acquired. 4. If the lock was acquired, its validity time is considered to be the initial validity time minus the time elapsed, as computed in step 3. -5. If the client failed to acquire the lock for some reason (either it was not able to lock N/2+1 instances or the validity time is negative), it will try to unlock all the instances (even the instances it believe it was not able to lock). +5. If the client failed to acquire the lock for some reason (either it was not able to lock N/2+1 instances or the validity time is negative), it will try to unlock all the instances (even the instances it believed it was not able to lock). Is the algorithm asynchronous? --- @@ -118,29 +118,29 @@ For more information about similar systems requiring a bound *clock drift*, this Retry on failure --- -When a client is not able to acquire the lock, it should try again after a random delay in order to try to desynchronize multiple clients trying to acquire the lock, for the same resource, at the same time (this may result in a split brain condition where nobody wins). Also the faster a client will try to acquire the lock in the majority of Redis instances, the less window for a split brain condition (and the need for a retry), so ideally the client should try to send the SET commands to the N instances at the same time using multiplexing. +When a client is unable to acquire the lock, it should try again after a random delay in order to try to desynchronize multiple clients trying to acquire the lock for the same resource at the same time (this may result in a split brain condition where nobody wins). Also the faster a client tries to acquire the lock in the majority of Redis instances, the smaller the window for a split brain condition (and the need for a retry), so ideally the client should try to send the SET commands to the N instances at the same time using multiplexing. -It is worth to stress how important is for the clients that failed to acquire the majority of locks, to release the (partially) acquired locks ASAP, so that there is no need to wait for keys expiry in order for the lock to be acquired again (however if a network partition happens and the client is no longer able to communicate with the Redis instances, there is to pay an availability penalty and wait for the expires). +It is worth stressing how important it is for clients that fail to acquire the majority of locks, to release the (partially) acquired locks ASAP, so that there is no need to wait for key expiry in order for the lock to be acquired again (however if a network partition happens and the client is no longer able to communicate with the Redis instances, there is an availability penalty to pay as it waits for key expiration). Releasing the lock --- -Releasing the lock is simple and involves just to release the lock in all the instances, regardless of the fact the client believe it was able to successfully lock a given instance. +Releasing the lock is simple and involves just releasing the lock in all instances, whether or not the client believes it was able to successfully lock a given instance. Safety arguments --- Is the algorithm safe? We can try to understand what happens in different scenarios. -To start let’s assume that a client is able to acquire the lock in the majority of instances. All the instances will contain a key with the same time to live. However the key was set at different times, so the keys will also expire at different times. However if the first key was set at worst at time T1 (the time we sample before contacting the first server) and the last key was set at worst at time T2 (the time we obtained the reply from the last server), we are sure that the first key to expire in the set will exist for at least `MIN_VALIDITY=TTL-(T2-T1)-CLOCK_DRIFT`. All the other keys will expire later, so we are sure that the keys will be simultaneously set for at least this time. +To start let’s assume that a client is able to acquire the lock in the majority of instances. All the instances will contain a key with the same time to live. However, the key was set at different times, so the keys will also expire at different times. But if the first key was set at worst at time T1 (the time we sample before contacting the first server) and the last key was set at worst at time T2 (the time we obtained the reply from the last server), we are sure that the first key to expire in the set will exist for at least `MIN_VALIDITY=TTL-(T2-T1)-CLOCK_DRIFT`. All the other keys will expire later, so we are sure that the keys will be simultaneously set for at least this time. -During the time the majority of keys are set, another client will not be able to acquire the lock, since N/2+1 SET NX operations can’t succeed if N/2+1 keys already exist. So if a lock was acquired, it is not possible to re-acquire it at the same time (violating the mutual exclusion property). +During the time that the majority of keys are set, another client will not be able to acquire the lock, since N/2+1 SET NX operations can’t succeed if N/2+1 keys already exist. So if a lock was acquired, it is not possible to re-acquire it at the same time (violating the mutual exclusion property). However we want to also make sure that multiple clients trying to acquire the lock at the same time can’t simultaneously succeed. -If a client locked the majority of instances using a time near, or greater, than the lock maximum validity time (the TTL we use for SET basically), it will consider the lock invalid and will unlock the instances, so we only need to consider the case where a client was able to lock the majority of instances in a time which is less than the validity time. In this case for the argument already expressed above, for `MIN_VALIDITY` no client should be able to re-acquire the lock. So multiple clients will be able to lock N/2+1 instances at the same time (with “time" being the end of Step 2) only when the time to lock the majority was greater than the TTL time, making the lock invalid. +If a client locked the majority of instances using a time near, or greater, than the lock maximum validity time (the TTL we use for SET basically), it will consider the lock invalid and will unlock the instances, so we only need to consider the case where a client was able to lock the majority of instances in a time which is less than the validity time. In this case for the argument already expressed above, for `MIN_VALIDITY` no client should be able to re-acquire the lock. So multiple clients will be able to lock N/2+1 instances at the same time (with "time" being the end of Step 2) only when the time to lock the majority was greater than the TTL time, making the lock invalid. -Are you able to provide a formal proof of safety, point out to existing algorithms that are similar enough, or to find a bug? That would be very appreciated. +Are you able to provide a formal proof of safety, point to existing algorithms that are similar, or find a bug? That would be greatly appreciated. Liveness arguments --- @@ -149,9 +149,9 @@ The system liveness is based on three main features: 1. The auto release of the lock (since keys expire): eventually keys are available again to be locked. 2. The fact that clients, usually, will cooperate removing the locks when the lock was not acquired, or when the lock was acquired and the work terminated, making it likely that we don’t have to wait for keys to expire to re-acquire the lock. -3. The fact that when a client needs to retry a lock, it waits a time which is comparable greater to the time needed to acquire the majority of locks, in order to probabilistically make split brain conditions during resource contention unlikely. +3. The fact that when a client needs to retry a lock, it waits a time which is comparably greater than the time needed to acquire the majority of locks, in order to probabilistically make split brain conditions during resource contention unlikely. -However we pay an availability penalty equal to “TTL” time on network partitions, so if there are continuous partitions, we can pay this penalty indefinitely. +However, we pay an availability penalty equal to `TTL` time on network partitions, so if there are continuous partitions, we can pay this penalty indefinitely. This happens every time a client acquires a lock and gets partitioned away before being able to remove the lock. Basically if there are infinite continuous network partitions, the system may become not available for an infinite amount of time. @@ -201,8 +201,8 @@ the lock into the majority of instances, and within the validity time (basically the algorithm to use is very similar to the one used when acquiring the lock). -However this does not technically change the algorithm, so anyway the max number -of locks reacquiring attempts should be limited, otherwise one of the liveness +However this does not technically change the algorithm, so the maximum number +of lock reacquisition attempts should be limited, otherwise one of the liveness properties is violated. Want to help? From d69942e933c5ba1441982476028aafe36dfed905 Mon Sep 17 00:00:00 2001 From: antirez Date: Mon, 13 Jul 2015 18:22:21 +0200 Subject: [PATCH 0367/2314] Variadic EXISTS documented. --- commands/exists.md | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/commands/exists.md b/commands/exists.md index 1963109ce8..867ebf9d57 100644 --- a/commands/exists.md +++ b/commands/exists.md @@ -1,5 +1,9 @@ Returns if `key` exists. +Since Redis 3.0.3 it is possible to specify multiple keys instead of a single one. In such a case, it returns the total number of keys existing. Note that returning 1 or 0 for a single key is just a special case of the variadic usage, so the command is completely backward compatible. + +The user should be aware that if the same existing key is mentioned in the arguments multiple times, it will be counted multiple times. So if `somekey` exists, `EXISTS somekey somekey` will return 2. + @return @integer-reply, specifically: @@ -7,10 +11,16 @@ Returns if `key` exists. * `1` if the key exists. * `0` if the key does not exist. +Since Redis 3.0.3 the command accepts a variable number of keys and the return value is generalized: + +* The number of keys existing among the ones specified as arguments. Keys mentioned multiple times and existing are counted multiple times. + @examples ```cli SET key1 "Hello" EXISTS key1 -EXISTS key2 +EXISTS nosuchkey +SET key2 "World" +EXISTS key1 key2 nosuchkey ``` From f0916d97b21c30396ed731ab6c25ef2a64e6edfb Mon Sep 17 00:00:00 2001 From: Damian Janowski Date: Tue, 14 Jul 2015 10:04:47 -0300 Subject: [PATCH 0368/2314] Fix formatting on BITOP. --- commands/bitop.md | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/commands/bitop.md b/commands/bitop.md index 1f16dc88f6..794b2a8b66 100644 --- a/commands/bitop.md +++ b/commands/bitop.md @@ -4,15 +4,16 @@ store the result in the destination key. The `BITOP` command supports four bitwise operations: **AND**, **OR**, **XOR** and **NOT**, thus the valid forms to call the command are: -* `BITOP AND _destkey srckey1 srckey2 srckey3 ... srckeyN_` -* `BITOP OR _destkey srckey1 srckey2 srckey3 ... srckeyN_` -* `BITOP XOR _destkey srckey1 srckey2 srckey3 ... srckeyN_` -* `BITOP NOT _destkey srckey_` + +* `BITOP AND destkey srckey1 srckey2 srckey3 ... srckeyN` +* `BITOP OR destkey srckey1 srckey2 srckey3 ... srckeyN` +* `BITOP XOR destkey srckey1 srckey2 srckey3 ... srckeyN` +* `BITOP NOT destkey srckey` As you can see **NOT** is special as it only takes an input key, because it performs inversion of bits so it only makes sense as an unary operator. -The result of the operation is always stored at _destkey_. +The result of the operation is always stored at `destkey`. ## Handling of strings with different lengths From dae42cc3581540933aadcf4fd3c02fa8661e3d38 Mon Sep 17 00:00:00 2001 From: Damian Janowski Date: Tue, 14 Jul 2015 10:09:30 -0300 Subject: [PATCH 0369/2314] Mark keywords. --- commands/cluster-nodes.md | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/commands/cluster-nodes.md b/commands/cluster-nodes.md index d48b5635a2..23ef562de1 100644 --- a/commands/cluster-nodes.md +++ b/commands/cluster-nodes.md @@ -37,26 +37,26 @@ Each line is composed of the following fields: The meaning of each filed is the following: -1. **id** The node ID, a 40 characters random string generated when a node is created and never changed again (unless `CLUSTER RESET HARD` is used). -2. **ip:port** The node address where clients should contact the node to run queries. -3. **flags** A list of comma separated flags: `myself`, `master`, `slave`, `fail?`, `fail`, `handshake`, `noaddr`, `noflags`. Flags are explained in detail in the next section. -4. **master** If the node is a slave, and the master is known, the master node ID, otherwise the "-" character. -5. **ping-sent** Milliseconds unix time at which the currently active ping was sent, or zero if there are no pending pings. -6. **pong-recv** Milliseconds unix time the last pong was received. -7. **config-epoch** The configuration epoch (or version) of the current node (or of the current master if the node is a slave). Each time there is a failover, a new, unique, monotonically increasing configuration epoch is created. If multiple nodes claim to serve the same hash slots, the one with higher configuration epoch wins. -8. **link-state** The state of the link used for the node-to-node cluster bus. We use this link to communicate with the node. Can be `connected` or `disconnected`. -9. **slot** An hash slot number or range. Starting from argument number 9, but there may be up to 16384 entries in total (limit never reached). This is the list of hash slots served by this node. If the entry is just a number, is parsed as such. If it is a range, it is in the form `start-end`, and means that the node is responsible for all the hash slots from `start` to `end` including the start and end values. +1. `id`: The node ID, a 40 characters random string generated when a node is created and never changed again (unless `CLUSTER RESET HARD` is used). +2. `ip:port`: The node address where clients should contact the node to run queries. +3. `flags`: A list of comma separated flags: `myself`, `master`, `slave`, `fail?`, `fail`, `handshake`, `noaddr`, `noflags`. Flags are explained in detail in the next section. +4. `master`: If the node is a slave, and the master is known, the master node ID, otherwise the "-" character. +5. `ping-sent`: Milliseconds unix time at which the currently active ping was sent, or zero if there are no pending pings. +6. `pong-recv`: Milliseconds unix time the last pong was received. +7. `config-epoch`: The configuration epoch (or version) of the current node (or of the current master if the node is a slave). Each time there is a failover, a new, unique, monotonically increasing configuration epoch is created. If multiple nodes claim to serve the same hash slots, the one with higher configuration epoch wins. +8. `link-state`: The state of the link used for the node-to-node cluster bus. We use this link to communicate with the node. Can be `connected` or `disconnected`. +9. `slot`: An hash slot number or range. Starting from argument number 9, but there may be up to 16384 entries in total (limit never reached). This is the list of hash slots served by this node. If the entry is just a number, is parsed as such. If it is a range, it is in the form `start-end`, and means that the node is responsible for all the hash slots from `start` to `end` including the start and end values. Meaning of the flags (field number 3): -* **myself** The node you are contacting. -* **master** Node is a master. -* **slave** Node is a slave. -* **fail?** Node is in `PFAIL` state. Not reachable for the node you are contacting, but still logically reachable (not in `FAIL` state). -* **fail** Node is in `FAIL` state. It was not reachable for multiple nodes that promoted the `PFAIL` state to `FAIL`. -* **handshake** Untrusted node, we are handshaking. -* **noaddr** No address known for this node. -* **noflags** No flags at all. +* `myself`: The node you are contacting. +* `master`: Node is a master. +* `slave`: Node is a slave. +* `fail?`: Node is in `PFAIL` state. Not reachable for the node you are contacting, but still logically reachable (not in `FAIL` state). +* `fail`: Node is in `FAIL` state. It was not reachable for multiple nodes that promoted the `PFAIL` state to `FAIL`. +* `handshake`: Untrusted node, we are handshaking. +* `noaddr`: No address known for this node. +* `noflags`: No flags at all. ## Notes on published config epochs @@ -99,7 +99,7 @@ Note that the format does not have any space, so `CLUSTER NODES` output format i Note that: -1. Migration and importing slots are only added to the node flagged as **myself**. This information is local to a node, for its own slots. +1. Migration and importing slots are only added to the node flagged as `myself`. This information is local to a node, for its own slots. 2. Importing and migrating slots are provided as **additional info**. If the node has a given hash slot assigned, it will be also a plain number in the list of hash slots, so clients that don't have a clue about hash slots migrations can just skip this special fields. @return From 54ac547ef1c228be71293b633beb465b2d387369 Mon Sep 17 00:00:00 2001 From: Damian Janowski Date: Tue, 14 Jul 2015 10:10:37 -0300 Subject: [PATCH 0370/2314] Mark keywords. --- commands/config-set.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands/config-set.md b/commands/config-set.md index 42492ae4d3..8fd1e98d08 100644 --- a/commands/config-set.md +++ b/commands/config-set.md @@ -17,7 +17,7 @@ following important differences: [hgcarr22rc]: http://github.com/antirez/redis/raw/2.8/redis.conf * In options where bytes or other quantities are specified, it is not - possible to use the `redis.conf` abbreviated form (10k 2gb ... and so forth), + possible to use the `redis.conf` abbreviated form (`10k`, `2gb` ... and so forth), everything should be specified as a well-formed 64-bit integer, in the base unit of the configuration directive. However since Redis version 3.0 or greater, it is possible to use `CONFIG SET` with memory units for From bd50a7d89680cf782ec9d2bccab83987f186af91 Mon Sep 17 00:00:00 2001 From: Damian Janowski Date: Tue, 14 Jul 2015 10:16:40 -0300 Subject: [PATCH 0371/2314] Spell checking and formatting on EVAL. --- commands/eval.md | 12 ++++++++---- wordlist | 6 ++++++ 2 files changed, 14 insertions(+), 4 deletions(-) diff --git a/commands/eval.md b/commands/eval.md index 28a84d1ff3..056a47a0d8 100644 --- a/commands/eval.md +++ b/commands/eval.md @@ -291,27 +291,31 @@ Redis offers a SCRIPT command that can be used in order to control the scripting subsystem. SCRIPT currently accepts three different commands: -* SCRIPT FLUSH. +* `SCRIPT FLUSH` + This command is the only way to force Redis to flush the scripts cache. It is most useful in a cloud environment where the same instance can be reassigned to a different user. It is also useful for testing client libraries' implementations of the scripting feature. -* SCRIPT EXISTS _sha1_ _sha2_... _shaN_. +* `SCRIPT EXISTS sha1 sha2 ... shaN` + Given a list of SHA1 digests as arguments this command returns an array of 1 or 0, where 1 means the specific SHA1 is recognized as a script already present in the scripting cache, while 0 means that a script with this SHA1 was never seen before (or at least never seen after the latest SCRIPT FLUSH command). -* SCRIPT LOAD _script_. +* `SCRIPT LOAD script` + This command registers the specified script in the Redis script cache. The command is useful in all the contexts where we want to make sure that `EVALSHA` will not fail (for instance during a pipeline or MULTI/EXEC operation), without the need to actually execute the script. -* SCRIPT KILL. +* `SCRIPT KILL` + This command is the only way to interrupt a long-running script that reaches the configured maximum execution time for scripts. The SCRIPT KILL command can only be used with scripts that did not modify diff --git a/wordlist b/wordlist index 8857778f3b..71a49971c6 100644 --- a/wordlist +++ b/wordlist @@ -2,8 +2,10 @@ ACLs AMD AOF API +BitOp CAS CJSON +CJSON CLI CP CPUs @@ -60,6 +62,7 @@ MX MacBook Maxmemory Memcached +MessagePack MongoDB MySQL NAS @@ -165,6 +168,7 @@ changelogs checksum chrt cli +cmsgpack commandstats conf config @@ -267,6 +271,7 @@ myzset netcat netsplits newjobs +nils noeviction noscript numactl @@ -325,6 +330,7 @@ smaps snapshotting startup strace +struct subcommand subcommands suboptimal From 58197baaf79d45f5c452882d219bf6dac1c9d311 Mon Sep 17 00:00:00 2001 From: Brian Picciano Date: Thu, 7 May 2015 13:08:36 -0400 Subject: [PATCH 0372/2314] Update radix go client information --- clients.json | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/clients.json b/clients.json index 0124f512f2..950c6abf97 100644 --- a/clients.json +++ b/clients.json @@ -102,9 +102,9 @@ { "name": "Radix", "language": "Go", - "repository": "https://github.com/fzzy/radix", - "description": "MIT licensed Redis client.", - "authors": ["fzzbt"], + "repository": "https://github.com/mediocregopher/radix.v2", + "description": "MIT licensed Redis client which supports pipelining, pooling, redis cluster, scripting, pub/sub, scanning, and more.", + "authors": ["fzzbt", "mediocregopher"], "recommended": true, "active": true }, From ea37b5b8771c01c7f6974733617638975b0ad919 Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Tue, 14 Jul 2015 21:48:55 +0200 Subject: [PATCH 0373/2314] Update Twitter handle --- clients.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clients.json b/clients.json index 950c6abf97..a9c3df20a0 100644 --- a/clients.json +++ b/clients.json @@ -104,7 +104,7 @@ "language": "Go", "repository": "https://github.com/mediocregopher/radix.v2", "description": "MIT licensed Redis client which supports pipelining, pooling, redis cluster, scripting, pub/sub, scanning, and more.", - "authors": ["fzzbt", "mediocregopher"], + "authors": ["fzzbt", "mediocre_gopher"], "recommended": true, "active": true }, From dd17a2518d218c56eaaaae6b4f41cb7e7fc6609f Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 15 Jul 2015 09:34:20 +0200 Subject: [PATCH 0374/2314] Sponsors page updated. --- topics/sponsors.md | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/topics/sponsors.md b/topics/sponsors.md index 14f9fcdf73..07db25e291 100644 --- a/topics/sponsors.md +++ b/topics/sponsors.md @@ -1,12 +1,14 @@ Redis Sponsors === -Starting from May 2013, all the work [Salvatore Sanfilippo](http://twitter.com/antirez) is doing in order to develop Redis is sponsored by [Pivotal](http://gopivotal.com). The Redis project no longer accepts money donations. +Starting from June 2015 the work [Salvatore Sanfilippo](http://twitter.com/antirez) is doing in order to develop Redis is sponsored by [Redis Labs](https://redislabs.com). -Before May 2013 the project was sponsored by VMware with the work of [Salvatore Sanfilippo](http://twitter.com/antirez) and [Pieter Noordhuis](http://twitter.com/pnoordhuis). - -In the past Redis accepted donations from the following companies: +Past sponsorships: +* The [Shuttleworth Foundation](http://www.shuttleworthfoundation.org) donated 5000 USD to the Redis project in form of a flash grant. The details will be posted soon on a blog post documenting how the money was used. +![Shuttleworth Foundation](http://redis.io/images/shuttleworth.png) +* From May 2013 to June 2015 the work [Salvatore Sanfilippo](http://twitter.com/antirez) did in order to develop Redis was sponsored by [Pivotal](http://gopivotal.com). +* Before May 2013 the project was sponsored by VMware with the work of [Salvatore Sanfilippo](http://twitter.com/antirez) and [Pieter Noordhuis](http://twitter.com/pnoordhuis). * [VMware](http://vmware.com) and later [Pivotal](http://pivotal.io) provided a 24 GB RAM workstation for me to run the [Redis CI test](http://ci.redis.io) and other long running tests. Later I (Salvatore) equipped the server with an SSD drive in order to test in the same hardware with rotating and flash drives. * [Linode](http://linode.com) 15 January 2010, provided Virtual Machines for Redis testing in a virtualized environment. * [Slicehost](http://slicehost.com) 14 January 2010, provided Virtual Machines for Redis testing in a virtualized environment. @@ -20,7 +22,7 @@ Also thanks to the following people or organizations that donated to the Project * [Brad Jasper](http://bradjasper.com/) * [Mrkris](http://www.mrkris.com/) -We are grateful to [Pivotal](http://gopivotal.com), [VMware](http://vmware.com) and to the other companies and people that donated to the Redis project. Thank you. +We are grateful to [Redis Labs](http://redislabs.com), [Pivotal](http://gopivotal.com), [VMware](http://vmware.com) and to the other companies and people that donated to the Redis project. Thank you. ## redis.io @@ -33,5 +35,5 @@ They also sponsored the initial implementation of this site by Martens](https://twitter.com/soveran). Damian and Michel remain the current maintainers. -The `redis.io` domain is kindly donated to the project by [I Want My -Name](https://iwantmyname.com). +The `redis.io` domain was donated for a few years to the project by [I Want My +Name](https://iwantmyname.com). Now is sponsored by myself (Salvatore Sanfilippo). From e7a07d5adbeaefbac0b9f1411b0914560260550a Mon Sep 17 00:00:00 2001 From: Edward Brey Date: Wed, 15 Jul 2015 14:12:04 -0500 Subject: [PATCH 0375/2314] grammar fixes --- topics/faq.md | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/topics/faq.md b/topics/faq.md index 2ec910b181..7f9c42a29a 100644 --- a/topics/faq.md +++ b/topics/faq.md @@ -24,10 +24,10 @@ To give you a few examples (all obtained using 64-bit instances): To test your use case is trivial using the `redis-benchmark` utility to generate random data sets and check with the `INFO memory` command the space used. -64 bit systems will use considerably more memory than 32 bit systems to store the same keys, especially if the keys and values are small, this is because pointers takes 8 bytes in 64 bit systems. But of course the advantage is that you can -have a lot of memory in 64 bit systems, so in order to run large Redis servers a 64 bit system is more or less required. The alternative is sharding. +64-bit systems will use considerably more memory than 32-bit systems to store the same keys, especially if the keys and values are small. This is because pointers takes 8 bytes in 64-bit systems. But of course the advantage is that you can +have a lot of memory in 64-bit systems, so in order to run large Redis servers a 64 bit system is more or less required. The alternative is sharding. -## I like Redis high level operations and features, but I don't like that it takes everything in memory and I can't have a dataset larger the memory. Plans to change this? +## I like Redis's high level operations and features, but I don't like that it takes everything in memory and I can't have a dataset larger the memory. Plans to change this? In the past the Redis developers experimented with Virtual Memory and other systems in order to allow larger than RAM datasets, but after all we are very happy if we can do one thing well: data served from memory, disk used for storage. So for now there are no plans to create an on disk backend for Redis. Most of what Redis is, after all, is a direct result of its current design. @@ -55,7 +55,7 @@ way. There is more info in the [Memory Optimization page](/topics/memory-optimiz Redis will either be killed by the Linux kernel OOM killer, crash with an error, or will start to slow down. With modern operating systems malloc() returning NULL is not common, usually -the server will start swapping and Redis performances will degrade so +the server will start swapping, and Redis performance will degrade, so you'll probably notice there is something wrong. The INFO command will report the amount of memory Redis is using so you can @@ -109,17 +109,17 @@ Yes, redis background saving process is always forked when the server is outside of the execution of a command, so every command reported to be atomic in RAM is also atomic from the point of view of the disk snapshot. -## Redis is single threaded, how can I exploit multiple CPU / cores? +## Redis is single threaded. How can I exploit multiple CPU / cores? -It's very unlikely that CPU becomes your bottleneck with Redis, as usually Redis is either memory or network bound. For instance using pipelining Redis running +It's very unlikely that CPU becomes your bottleneck with Redis, as usually Redis is either memory or network bound. For instance, using pipelining Redis running on an average Linux system can deliver even 500k requests per second, so -if your application mainly uses O(N) or O(log(N)) commands it is hardly +if your application mainly uses O(N) or O(log(N)) commands, it is hardly going to use too much CPU. -However to maximize CPU usage you can start multiple instances of Redis in +However, to maximize CPU usage you can start multiple instances of Redis in the same box and treat them as different servers. At some point a single box may not be enough anyway, so if you want to use multiple CPUs you can -start thinking at some way to shard earlier. +start thinking of some way to shard earlier. You can find more information about using multiple Redis instances in the [Partitioning page](/topics/partitioning). @@ -147,7 +147,7 @@ As a result of this, it is common for users with many keys with an expire set to It means REmote DIctionary Server. -## Why did you started the Redis project? +## Why did you start the Redis project? Originally Redis was started in order to scale [LLOOGG][lloogg]. But after I got the basic server working I liked the idea to share the work with other people, and Redis was turned into an open source project. From 31706b5d9c76c722287835a45eb18220416f355f Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Wed, 15 Jul 2015 22:55:25 +0200 Subject: [PATCH 0376/2314] Small fixes in the FAQ --- topics/faq.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/topics/faq.md b/topics/faq.md index 7f9c42a29a..2ac0d9bf97 100644 --- a/topics/faq.md +++ b/topics/faq.md @@ -24,8 +24,8 @@ To give you a few examples (all obtained using 64-bit instances): To test your use case is trivial using the `redis-benchmark` utility to generate random data sets and check with the `INFO memory` command the space used. -64-bit systems will use considerably more memory than 32-bit systems to store the same keys, especially if the keys and values are small. This is because pointers takes 8 bytes in 64-bit systems. But of course the advantage is that you can -have a lot of memory in 64-bit systems, so in order to run large Redis servers a 64 bit system is more or less required. The alternative is sharding. +64-bit systems will use considerably more memory than 32-bit systems to store the same keys, especially if the keys and values are small. This is because pointers take 8 bytes in 64-bit systems. But of course the advantage is that you can +have a lot of memory in 64-bit systems, so in order to run large Redis servers a 64-bit system is more or less required. The alternative is sharding. ## I like Redis's high level operations and features, but I don't like that it takes everything in memory and I can't have a dataset larger the memory. Plans to change this? From 9d688c980a354b5a707dc6c4089415ea285bb116 Mon Sep 17 00:00:00 2001 From: Eugene Fidelin Date: Fri, 17 Jul 2015 13:11:23 +0200 Subject: [PATCH 0377/2314] Add phpRedExpert tool to the docs --- tools.json | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/tools.json b/tools.json index 65f9371821..28f653110a 100644 --- a/tools.json +++ b/tools.json @@ -344,5 +344,13 @@ "repository": "https://github.com/adriano-di-giovanni/node-redis-keychain", "description": "A Node.js library for streamlining the configuration and maintenance of your Redis namespace", "authors": ["codecreativity"] + }, + { + "name": "phpRedExpert", + "language": "PHP", + "url": "https://github.com/eugef/phpRedExpert", + "repository": "https://github.com/eugef/phpRedExpert", + "description": "phpRedExpert ia simple and powerful web UI for Redis databases and servers management, written in PHP and JavaScript.", + "authors": ["EugeneFidelin"] } ] From 78dedb18b7aadb601d4dfcde15503455cf0f4bc1 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 17 Jul 2015 15:26:51 +0200 Subject: [PATCH 0378/2314] EXISTS is now variadic. --- commands.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/commands.json b/commands.json index eeedc2c259..a48361db85 100644 --- a/commands.json +++ b/commands.json @@ -664,7 +664,8 @@ "arguments": [ { "name": "key", - "type": "key" + "type": "key", + "multiple": true } ], "since": "1.0.0", From 27297e399f80d0ca9057c9c029145e8a2d2de254 Mon Sep 17 00:00:00 2001 From: Ed Costello Date: Sun, 19 Jul 2015 13:45:28 -0400 Subject: [PATCH 0379/2314] Copy edits for spelling typos and doubled words --- commands/cluster nodes.md | 2 +- commands/cluster set-config-epoch.md | 2 +- commands/geoadd.md | 2 +- commands/geoencode.md | 2 +- topics/cluster-spec.md | 2 +- topics/protocol.md | 2 +- topics/sentinel.md | 4 ++-- 7 files changed, 8 insertions(+), 8 deletions(-) diff --git a/commands/cluster nodes.md b/commands/cluster nodes.md index 9d318bea25..4b1de46808 100644 --- a/commands/cluster nodes.md +++ b/commands/cluster nodes.md @@ -77,7 +77,7 @@ However node hash slots can be in a special state, used in order to communicate The meaning of the two states is explained in the Redis Specification, however the gist of the two states is the following: * **Importing** slots are yet not part of the nodes hash slot, there is a migration in progress. The node will accept queries about these slots only if the `ASK` command is used. -* **Migrating** slots are assigned to the node, but but are being migrated to some other node. The node will accept queries if all the keys in the command exist already, otherwise it will emit what is called an **ASK redirection**, to force new keys creation directly in the importing node. +* **Migrating** slots are assigned to the node, but are being migrated to some other node. The node will accept queries if all the keys in the command exist already, otherwise it will emit what is called an **ASK redirection**, to force new keys creation directly in the importing node. Importing and migrating slots are emitted in the `CLUSTER NODES` output as follows: diff --git a/commands/cluster set-config-epoch.md b/commands/cluster set-config-epoch.md index 4c12458457..9c97d39087 100644 --- a/commands/cluster set-config-epoch.md +++ b/commands/cluster set-config-epoch.md @@ -12,7 +12,7 @@ However there is an exception to this rule, and it is when a new cluster is created from scratch. Redis Cluster *config epoch collision resolution* algorithm can deal with new nodes all configured with the same configuration at startup, but this process is slow and should be -the exception, only to make sure that whatever happens, two more more +the exception, only to make sure that whatever happens, two more nodes eventually always move away from the state of having the same configuration epoch. diff --git a/commands/geoadd.md b/commands/geoadd.md index 0665596bfc..0e9ed913b9 100644 --- a/commands/geoadd.md +++ b/commands/geoadd.md @@ -7,7 +7,7 @@ can be indexed: areas very near to the poles are not indexable. The exact limits, as specified by EPSG:900913 / EPSG:3785 / OSGEO:41001 are the following: * Valid longitudes are from -180 to 180 degrees. -* Valid latitudes are from -85.05112878 to 85.05112878 degreees. +* Valid latitudes are from -85.05112878 to 85.05112878 degrees. The command will report an error when the user attempts to index coordinates outside the specified ranges. diff --git a/commands/geoencode.md b/commands/geoencode.md index 3334e32121..b5081bc9b6 100644 --- a/commands/geoencode.md +++ b/commands/geoencode.md @@ -1,4 +1,4 @@ -Geospatial Redis commands encode positions of objects in a single 52 bit integer, using a technique called geohash. The encoding is further explained in the `GEODECODE` and `GEOADD` documentation. The `GEOENCODE` command, documented in this page, is able to convert a longitude and latitude pair into such 52 bit integer, which is used as as the *score* for the sorted set members representing geopositional information. +Geospatial Redis commands encode positions of objects in a single 52 bit integer, using a technique called geohash. The encoding is further explained in the `GEODECODE` and `GEOADD` documentation. The `GEOENCODE` command, documented in this page, is able to convert a longitude and latitude pair into such 52 bit integer, which is used as the *score* for the sorted set members representing geopositional information. Normally you don't need to use this command, unless you plan to implement low level code in the client side interacting with the Redis geo commands. This command may also be useful for debugging purposes. diff --git a/topics/cluster-spec.md b/topics/cluster-spec.md index 71ab99cf66..6b06e5d03a 100644 --- a/topics/cluster-spec.md +++ b/topics/cluster-spec.md @@ -851,7 +851,7 @@ elected, the others will try shortly. Once a slave wins the election, it obtains a new unique and incremental `configEpoch` which is higher than that of any other existing master. It starts advertising itself as master in ping and pong packets, providing the set of served slots with a `configEpoch` that will win over the past ones. -In order to speedup the reconfiguration of other nodes, a pong packet is broadcasted to all the nodes of the cluster. Currently unreachable nodes will eventually be reconfigured when they receive a ping or pong packet from another node or will receive an `UPDATE` packet from another node if the information it publishes via heartbeat packets are detected to be out of date. +In order to speedup the reconfiguration of other nodes, a pong packet is broadcast to all the nodes of the cluster. Currently unreachable nodes will eventually be reconfigured when they receive a ping or pong packet from another node or will receive an `UPDATE` packet from another node if the information it publishes via heartbeat packets are detected to be out of date. The other nodes will detect that there is a new master serving the same slots served by the old master but with a greater `configEpoch`, and will upgrade their configuration. Slaves of the old master (or the failed over master if it rejoins the cluster) will not just upgrade the configuration but will also reconfigure to replicate from the new master. How nodes rejoining the cluster are configured is explained in the next sections. diff --git a/topics/protocol.md b/topics/protocol.md index d26dba2ec0..40f268acdd 100644 --- a/topics/protocol.md +++ b/topics/protocol.md @@ -354,7 +354,7 @@ never need to scan the payload for special characters like it happens for instance with JSON, nor to quote the payload that needs to be sent to the server. -The Bulk and Multi Bulk lengths can be be processed with code that performs +The Bulk and Multi Bulk lengths can be processed with code that performs a single operation per character while at the same time scanning for the CR character, like the following C code: diff --git a/topics/sentinel.md b/topics/sentinel.md index 2cee8d3228..044b1fc0d7 100644 --- a/topics/sentinel.md +++ b/topics/sentinel.md @@ -748,7 +748,7 @@ master, and another slave S2 in another data center, it is possible to set S1 with a priority of 10 and S2 with a priority of 100, so that if the master fails and both S1 and S2 are available, S1 will be preferred. -For more information about the the way slaves are selected, please check the **slave selection and priority** section of this documentation. +For more information about the way slaves are selected, please check the **slave selection and priority** section of this documentation. Sentinel and Redis authentication --- @@ -838,7 +838,7 @@ The ODOWN condition **only applies to masters**. For other kind of instances Sentinel doesn't require to act, so the ODOWN state is never reached for slaves and other sentinels, but only SDOWN is. -However SDOWN has also semantical implications. For example a slave in SDOWN +However SDOWN has also semantic implications. For example a slave in SDOWN state is not selected to be promoted by a Sentinel performing a failover. Sentinels and Slaves auto discovery From b0159c12fc60b842daaec511cb2fece1914a523a Mon Sep 17 00:00:00 2001 From: James Cauwelier Date: Sun, 26 Jul 2015 18:33:54 +0200 Subject: [PATCH 0380/2314] Move package to new vendor namespace --- clients.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clients.json b/clients.json index a9c3df20a0..c0869e9c0f 100644 --- a/clients.json +++ b/clients.json @@ -436,7 +436,7 @@ { "name": "PHP Sentinel Client", "language": "PHP", - "repository": "https://github.com/Sparkcentral/PSRedis", + "repository": "https://github.com/jamescauwelier/PSRedis", "description": "A PHP sentinel client acting as an extension to your regular redis client", "authors": ["jamescauwelier"], "active": true From 9167ff90964a9327121072fecbc666b1026a61c3 Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Sun, 26 Jul 2015 19:43:16 +0200 Subject: [PATCH 0381/2314] Fix gist link --- topics/pubsub.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/pubsub.md b/topics/pubsub.md index 27c0f6cfe2..f4294dd832 100644 --- a/topics/pubsub.md +++ b/topics/pubsub.md @@ -167,7 +167,7 @@ unsubscribing from all the channels and patterns. Pieter Noordhuis provided a great example using EventMachine and Redis to create [a multi user high performance web -chat](https://gist.github.com/348262). +chat](https://gist.github.com/pietern/348262). ## Client library implementation hints From 80f2b287ea1a03c38058ff5ad3980fb6dbb7faf2 Mon Sep 17 00:00:00 2001 From: Johnny Everson Date: Tue, 28 Jul 2015 22:56:29 -0300 Subject: [PATCH 0382/2314] Remove mention of 3.0 as beta --- topics/lru-cache.md | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/topics/lru-cache.md b/topics/lru-cache.md index 82cb208d81..25b59cc8bb 100644 --- a/topics/lru-cache.md +++ b/topics/lru-cache.md @@ -89,10 +89,9 @@ was accessed the most in the past. Instead it will try to run an approximation of the LRU algorithm, by sampling a small number of keys, and evicting the one that is the best (with the oldest access time) among the sampled keys. -However since Redis 3.0 (that is currently in beta) the algorithm was improved -to also take a pool of good candidates for eviction. This improved the -performance of the algorithm, making it able to approximate more closely the -behavior of a real LRU algorithm. +However since Redis 3.0 the algorithm was improved to also take a pool of good +candidates for eviction. This improved the performance of the algorithm, making +it able to approximate more closely the behavior of a real LRU algorithm. What is important about the Redis LRU algorithm is that you **are able to tune** the precision of the algorithm by changing the number of samples to check for every eviction. This parameter is controlled by the following configuration directive: From 7db28aee5972e6524e25f4c1bb5f10643a06497f Mon Sep 17 00:00:00 2001 From: Damian Janowski Date: Wed, 29 Jul 2015 11:32:28 -0300 Subject: [PATCH 0383/2314] Typos, formatting. --- commands/eval.md | 2 +- commands/geoadd.md | 2 +- commands/geohash.md | 2 +- commands/georadius.md | 14 +++++++------- commands/info.md | 18 +++++++++--------- commands/pubsub.md | 4 ++-- commands/role.md | 2 ++ commands/setnx.md | 2 +- wordlist | 6 ++++++ 9 files changed, 30 insertions(+), 22 deletions(-) diff --git a/commands/eval.md b/commands/eval.md index 056a47a0d8..b5fe028965 100644 --- a/commands/eval.md +++ b/commands/eval.md @@ -608,7 +608,7 @@ It supports several other functions: `bit.lshift`, `bit.rshift`, `bit.arshift`, `bit.rol`, `bit.ror`, `bit.bswap`. All available functions are documented in the [Lua BitOp documentation](http://bitop.luajit.org/api.html) -### redis.sha1hex +### `redis.sha1hex` Perform the SHA1 of the input string. diff --git a/commands/geoadd.md b/commands/geoadd.md index 0e9ed913b9..f95a1e4d33 100644 --- a/commands/geoadd.md +++ b/commands/geoadd.md @@ -31,7 +31,7 @@ What Earth model does it use? It just assumes that the Earth is a sphere, since the used distance formula is the Haversine formula. This formula is only an approximation when applied to the Earth, which is not a perfect sphere. The introduced errors are not an issue when used in the context of social network sites that need to query by radius -and most other applications. However in th worst case the error may be up to +and most other applications. However in the worst case the error may be up to 0.5%, so you may want to consider other systems for error-critical applications. @return diff --git a/commands/geohash.md b/commands/geohash.md index 5098609a10..2517c3f28d 100644 --- a/commands/geohash.md +++ b/commands/geohash.md @@ -4,7 +4,7 @@ Normally Redis represents positions of elements using a variation of the Geohash technique where positions are encoded using 52 bit integers. The encoding is also different compared to the standard because the initial min and max coordinates used during the encoding and decoding process are different. This -command however **returns a standd Geohash** in the form of a string as +command however **returns a standard Geohash** in the form of a string as described in the [Wikipedia article](https://en.wikipedia.org/wiki/Geohash) and compatible with the [geohash.org](http://geohash.org) web site. Geohash string properties diff --git a/commands/georadius.md b/commands/georadius.md index 5bb11c7c6f..a7808dd5c2 100644 --- a/commands/georadius.md +++ b/commands/georadius.md @@ -1,6 +1,6 @@ Return the members of a sorted set populated with geospatial information using `GEOADD`, which are within the borders of the area specified with the center location and the maximum distance from the center (the radius). -The common use case for this command is to retrieve geospatial items near ot a specified point and no far than a given amount of meters (or other units). This allows, for example, to suggest mobile users of an application nearby places. +The common use case for this command is to retrieve geospatial items near a specified point and no far than a given amount of meters (or other units). This allows, for example, to suggest mobile users of an application nearby places. The radius is specified in one of the following units: @@ -11,14 +11,14 @@ The radius is specified in one of the following units: The command optionally returns additional information using the following options: -* **WITHDIST**: Also return the distance of the returned items from the specified center. The distance is returned in the same unit as the unit specified as the radius argument of the command. -* **WITHCOORD**: Also return the longitude,latitude coordinates of the matching items. -* **WITHASH**: Also return the raw geohash-encoded sorted set score of the item, in the form of a 52 bit unsigned integer. This is only useful for low level hacks or debugging and is otherwise of little interest for the general user. +* `WITHDIST`: Also return the distance of the returned items from the specified center. The distance is returned in the same unit as the unit specified as the radius argument of the command. +* `WITHCOORD`: Also return the longitude,latitude coordinates of the matching items. +* `WITHASH`: Also return the raw geohash-encoded sorted set score of the item, in the form of a 52 bit unsigned integer. This is only useful for low level hacks or debugging and is otherwise of little interest for the general user. The command default is to return unsorted items. Two different sorting methods can be invoked using the following two options: -* **ASC**: Sort returned items from the nearest to the fairest, relative to the center. -* **DESC**: Sort returned items from the fairest to the nearest, relative to the center. +* `ASC`: Sort returned items from the nearest to the fairest, relative to the center. +* `DESC`: Sort returned items from the fairest to the nearest, relative to the center. By default all the matching items are returned. It is possible to limit the results to the first N matching items by using the **COUNT ``** option. However note that internally the command needs to perform an effort proportional to the number of items matching the specified area, so to query very large areas with a very small `COUNT` option may be slow even if just a few results are returned. On the other hand `COUNT` can be a very effective way to reduce bandwidth usage if normally just the first results are used. @@ -35,7 +35,7 @@ When additional information is returned as an array of arrays for each item, the 2. The geohash integer. 3. The coordinates as a two items x,y array (longitude,latitude). -So for example the command `GEORADIUS Sicily 15 37 200 km withcoord withdist` will return each item in the following way: +So for example the command `GEORADIUS Sicily 15 37 200 km WITHCOORD WITHDIST` will return each item in the following way: ["Palermo","190.4424",["13.361389338970184","38.115556395496299"]] diff --git a/commands/info.md b/commands/info.md index 062e28bf52..d30565ac9c 100644 --- a/commands/info.md +++ b/commands/info.md @@ -73,12 +73,12 @@ Here is the meaning of all fields in the **memory** section: * `used_memory_human`: Human readable representation of previous value * `used_memory_rss`: Number of bytes that Redis allocated as seen by the operating system (a.k.a resident set size). This is the number reported by tools - such as **top** and **ps**. + such as `top(1)` and `ps(1)` * `used_memory_peak`: Peak memory consumed by Redis (in bytes) * `used_memory_peak_human`: Human readable representation of previous value * `used_memory_lua`: Number of bytes used by the Lua engine * `mem_fragmentation_ratio`: Ratio between `used_memory_rss` and `used_memory` -* `mem_allocator`: Memory allocator, chosen at compile time. +* `mem_allocator`: Memory allocator, chosen at compile time Ideally, the `used_memory_rss` value should be only slightly higher than `used_memory`. When rss >> used, a large difference means there is memory fragmentation @@ -142,9 +142,9 @@ Here is the meaning of all fields in the **stats** section: * `total_connections_received`: Total number of connections accepted by the server * `total_commands_processed`: Total number of commands processed by the server * `instantaneous_ops_per_sec`: Number of commands processed per second -* `rejected_connections`: Number of connections rejected because of maxclients limit +* `rejected_connections`: Number of connections rejected because of `maxclients` limit * `expired_keys`: Total number of key expiration events -* `evicted_keys`: Number of evicted keys due to maxmemory limit +* `evicted_keys`: Number of evicted keys due to `maxmemory` limit * `keyspace_hits`: Number of successful lookup of keys in the main dictionary * `keyspace_misses`: Number of failed lookup of keys in the main dictionary * `pubsub_channels`: Global number of pub/sub channels with client subscriptions @@ -162,11 +162,11 @@ If the instance is a slave, these additional fields are provided: * `master_port`: Master listening TCP port * `master_link_status`: Status of the link (up/down) * `master_last_io_seconds_ago`: Number of seconds since the last interaction with master -* `master_sync_in_progress`: Indicate the master is SYNCing to the slave +* `master_sync_in_progress`: Indicate the master is syncing to the slave If a SYNC operation is on-going, these additional fields are provided: -* `master_sync_left_bytes`: Number of bytes left before SYNCing is complete +* `master_sync_left_bytes`: Number of bytes left before syncing is complete * `master_sync_last_io_seconds_ago`: Number of seconds since last transfer I/O during a SYNC operation If the link between master and slave is down, an additional field is provided: @@ -179,7 +179,7 @@ The following field is always provided: For each slave, the following line is added: -* `slaveXXX`: id, ip address, port, state +* `slaveXXX`: id, IP address, port, state Here is the meaning of all fields in the **cpu** section: @@ -194,7 +194,7 @@ and the average CPU consumed per command execution. For each command type, the following line is added: -* `cmdstat_XXX`:calls=XXX,usec=XXX,usec_per_call=XXX +* `cmdstat_XXX`: `calls=XXX,usec=XXX,usec_per_call=XXX` The **cluster** section currently only contains a unique field: @@ -205,6 +205,6 @@ The statistics are the number of keys, and the number of keys with an expiration For each database, the following line is added: -* `dbXXX`:keys=XXX,expires=XXX +* `dbXXX`: `keys=XXX,expires=XXX` [hcgcpgp]: http://code.google.com/p/google-perftools/ diff --git a/commands/pubsub.md b/commands/pubsub.md index f766b00b0d..96f3ce0448 100644 --- a/commands/pubsub.md +++ b/commands/pubsub.md @@ -17,7 +17,7 @@ listed. @array-reply: a list of active channels, optionally matching the specified pattern. -# PUBSUB NUMSUB [channel-1 ... channel-N] +# `PUBSUB NUMSUB [channel-1 ... channel-N]` Returns the number of subscribers (not counting clients subscribed to patterns) for the specified channels. @@ -31,7 +31,7 @@ channels specified in the command call. Note that it is valid to call this command without channels. In this case it will just return an empty list. -# PUBSUB NUMPAT +# `PUBSUB NUMPAT` Returns the number of subscriptions to patterns (that are performed using the `PSUBSCRIBE` command). Note that this is not just the count of clients subscribed diff --git a/commands/role.md b/commands/role.md index ce77d09f0e..6d261328b5 100644 --- a/commands/role.md +++ b/commands/role.md @@ -56,11 +56,13 @@ The slave output is composed of the following parts: An example of Sentinel output: +``` 1) "sentinel" 2) 1) "resque-master" 2) "html-fragments-master" 3) "stats-master" 4) "metadata-master" +``` The sentinel output is composed of the following parts: diff --git a/commands/setnx.md b/commands/setnx.md index 8aaad80c4b..94d0b517b9 100644 --- a/commands/setnx.md +++ b/commands/setnx.md @@ -1,7 +1,7 @@ Set `key` to hold string `value` if `key` does not exist. In that case, it is equal to `SET`. When `key` already holds a value, no operation is performed. -`SETNX` is short for "**SET** if **N** ot e **X** ists". +`SETNX` is short for "**SET** if **N**ot e**X**ists". @return diff --git a/wordlist b/wordlist index 71a49971c6..d9f683af00 100644 --- a/wordlist +++ b/wordlist @@ -229,6 +229,8 @@ infeasible init inline internet +intset +intsets iojob iostat ists @@ -279,6 +281,7 @@ online overcommit pades pageview +parsable pcall pid pipelined @@ -315,6 +318,7 @@ resharding reshardings resync resynchronization +resynchronizations resynchronize robj roundtrips @@ -378,3 +382,5 @@ virtualized vmstat vtype wildcards +ziplist +ziplists From a25e58676b8d003d15f5ef5d53eb1752ed26eafe Mon Sep 17 00:00:00 2001 From: Damian Janowski Date: Wed, 29 Jul 2015 11:33:59 -0300 Subject: [PATCH 0384/2314] Don't spell-check topics for now. --- makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/makefile b/makefile index 1c8ffcee4d..9c59c473fd 100644 --- a/makefile +++ b/makefile @@ -1,4 +1,4 @@ -MD_FILES:=$(shell find commands topics -name '*.md') +MD_FILES:=$(shell find commands -name '*.md') TEXT_FILES:=$(patsubst %.md,tmp/%.txt,$(MD_FILES)) SPELL_FILES:=$(patsubst %.txt,%.spell,$(TEXT_FILES)) From ee5b189828bdf2981ee578534d1b3f4ff69dd532 Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 29 Jul 2015 16:53:15 +0200 Subject: [PATCH 0385/2314] CONFIG REWRITE now is a real thing. --- topics/config.md | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/topics/config.md b/topics/config.md index b6fff8c83c..4ebcf7b3e5 100644 --- a/topics/config.md +++ b/topics/config.md @@ -66,11 +66,7 @@ redis.conf file** so at the next restart of Redis the old configuration will be used instead. Make sure to also modify the `redis.conf` file accordingly to the configuration -you set using [CONFIG SET](/commands/config-set). -There are plans to provide a `CONFIG REWRITE` -command that will be able to run the `redis.conf` file rewriting the -configuration accordingly to the current server configuration, without modifying -the comments and the structure of the current file. +you set using [CONFIG SET](/commands/config-set). You can do it manually, or starting with Redis 2.8, you can just use [CONFIG REWRITE](/commands/config-rewrite), which will automatically scan your `redis.conf` file and update the fields which don't match the current configuration value. Fields non existing but set to the default value are not added. Comments inside your configuration file are retained. Configuring Redis as a cache --- From 380ad33a0614b4202491f39dfa58e3b554f19343 Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Thu, 30 Jul 2015 01:00:57 +0200 Subject: [PATCH 0386/2314] Document different return type with INCR option --- commands/zadd.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/commands/zadd.md b/commands/zadd.md index b145dfbe6f..064673a6d9 100644 --- a/commands/zadd.md +++ b/commands/zadd.md @@ -61,6 +61,10 @@ If the user inserts all the elements in a sorted set with the same score (for ex * The number of elements added to the sorted sets, not including elements already existing for which the score was updated. +If the `INCR` option is specified, the return value will be @bulk-string-reply: + +* the new score of `member` (a double precision floating point number), represented as string. + @history * `>= 2.4`: Accepts multiple elements. From 14b3bbb79790e9ed68bf9816ee0b3a22fd2fd505 Mon Sep 17 00:00:00 2001 From: Markus Malkusch Date: Sat, 1 Aug 2015 13:16:32 +0200 Subject: [PATCH 0387/2314] reference to PHPRedisMutex --- topics/distlock.md | 1 + 1 file changed, 1 insertion(+) diff --git a/topics/distlock.md b/topics/distlock.md index 1a6cfca864..f159684ce5 100644 --- a/topics/distlock.md +++ b/topics/distlock.md @@ -26,6 +26,7 @@ already available that can be used for reference. * [Redlock-rb](https://github.com/antirez/redlock-rb) (Ruby implementation). There is also a [fork of Redlock-rb](https://github.com/leandromoreira/redlock-rb) that adds a gem for easy distribution and perhaps more. * [Redlock-py](https://github.com/SPSCommerce/redlock-py) (Python implementation). * [Redlock-php](https://github.com/ronnylt/redlock-php) (PHP implementation). +* [PHPRedisMutex](https://github.com/malkusch/lock#phpredismutex) (further PHP implementation) * [Redsync.go](https://github.com/hjr265/redsync.go) (Go implementation). * [Redisson](https://github.com/mrniko/redisson) (Java implementation). * [Redis::DistLock](https://github.com/sbertrang/redis-distlock) (Perl implementation). From a0967eba14940dc41a23aec8ce1c3e67c36d4e85 Mon Sep 17 00:00:00 2001 From: Sebastian Waisbrot Date: Sat, 1 Aug 2015 19:02:39 -0700 Subject: [PATCH 0388/2314] Add note about multi/exec in brpoplpush --- commands/brpoplpush.md | 1 + 1 file changed, 1 insertion(+) diff --git a/commands/brpoplpush.md b/commands/brpoplpush.md index 4f72434bce..9a6fe376d9 100644 --- a/commands/brpoplpush.md +++ b/commands/brpoplpush.md @@ -1,5 +1,6 @@ `BRPOPLPUSH` is the blocking variant of `RPOPLPUSH`. When `source` contains elements, this command behaves exactly like `RPOPLPUSH`. +When used inside a `MULTI`/`EXEC` block, this command behaves exactly like `RPOPLPUSH`. When `source` is empty, Redis will block the connection until another client pushes to it or until `timeout` is reached. A `timeout` of zero can be used to block indefinitely. From 175b38803e5ae84d6f03fbe4825948e20210db9e Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Mon, 3 Aug 2015 09:39:01 +0200 Subject: [PATCH 0389/2314] Add aspell link --- README.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 550d5f76c7..cdde215a8f 100644 --- a/README.md +++ b/README.md @@ -112,7 +112,7 @@ $ make This will make sure that JSON and Markdown files compile and that all text files have no typos. -You need to install a few Ruby gems and [Aspell][han] to run these checks. +You need to install a few Ruby gems and [Aspell][aspell] to run these checks. The gems are listed in the `.gems` file. Install them with the following command: @@ -121,3 +121,5 @@ $ gem install $(sed -e 's/ -v /:/' .gems) ``` The spell checking exceptions should be added to `./wordlist`. + +[aspell]: http://aspell.net/ From baca9a8f1cd967258b8cb994fa3b452ccf2b8de2 Mon Sep 17 00:00:00 2001 From: Sebastian Waisbrot Date: Sun, 2 Aug 2015 17:40:41 -0700 Subject: [PATCH 0390/2314] Ping argument and behavior when subscribed --- commands/ping.md | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/commands/ping.md b/commands/ping.md index 5864933236..044895c1b2 100644 --- a/commands/ping.md +++ b/commands/ping.md @@ -1,7 +1,13 @@ -Returns `PONG`. +Returns `PONG` if no argument is provided, otherwise return a copy of the +argument as a bulk. This command is often used to test if a connection is still alive, or to measure latency. +If the client is subscribed to a channel or a pattern, it will instead return a +multi-bulk with a "pong" in the first position and an empty bulk in the second +position, unless an argument is provided in which case it returns a copy +of the argument. + @return @simple-string-reply @@ -10,4 +16,6 @@ latency. ```cli PING + +PING "hello world" ``` From 136fb8acfe41257cecc34c8238496c0399d06142 Mon Sep 17 00:00:00 2001 From: Alexey Popravka Date: Fri, 5 Jun 2015 10:09:55 +0300 Subject: [PATCH 0391/2314] Added aioredis python client to clients.json --- clients.json | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/clients.json b/clients.json index c0869e9c0f..e2792223cb 100644 --- a/clients.json +++ b/clients.json @@ -504,6 +504,16 @@ "active": true }, + { + "name": "aioredis", + "language": "Python", + "repository": "https://github.com/aio-libs/aioredis", + "url": "http://aioredis.readthedocs.org/", + "description": "Asyncio (PEP 3156) Redis client", + "authors": [], + "active": true + }, + { "name": "prot-redis", "language": "Rebol", From d8a3ec7b2ffd16269bd88a54261e33d276f91f68 Mon Sep 17 00:00:00 2001 From: swordflychen Date: Tue, 4 Aug 2015 10:51:55 +0800 Subject: [PATCH 0392/2314] fix lru-cache.md: allkeys-lru is not suitable for both caching and persist keys. --- topics/lru-cache.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/lru-cache.md b/topics/lru-cache.md index 25b59cc8bb..6a9e487a9d 100644 --- a/topics/lru-cache.md +++ b/topics/lru-cache.md @@ -63,7 +63,7 @@ In general as a rule of thumb: * Use the **allkeys-random** if you have a cyclic access where all the keys are scanned continuously, or when you expect the distribution to be uniform (all elements likely accessed with the same probability). * Use the **volatile-ttl** if you want to be able to provide hints to Redis about what are good candidate for expiration by using different TTL values when you create your cache objects. -The **allkeys-lru** and **volatile-random** policies are mainly useful when you want to use a single instance for both caching and to have a set of persistent keys. However it is usually a better idea to run two Redis instances to solve such a problem. +The **volatile-lru** and **volatile-random** policies are mainly useful when you want to use a single instance for both caching and to have a set of persistent keys. However it is usually a better idea to run two Redis instances to solve such a problem. It is also worth to note that setting an expire to a key costs memory, so using a policy like **allkeys-lru** is more memory efficient since there is no need to set an expire for the key to be evicted under memory pressure. From edc680cdcedb21a93af1548bce1761cc4facbfba Mon Sep 17 00:00:00 2001 From: huangz1990 Date: Wed, 5 Aug 2015 20:02:30 +0800 Subject: [PATCH 0393/2314] Add the missing count argument to GEORADIUS and GEORADIUSBYMEMBER. --- commands.json | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/commands.json b/commands.json index a48361db85..60267dbf71 100644 --- a/commands.json +++ b/commands.json @@ -827,6 +827,11 @@ "type": "enum", "enum": ["WITHHASH"], "optional": true + }, + { + "name": "count", + "type": "integer", + "optional": true } ], "group": "geo" @@ -869,6 +874,11 @@ "type": "enum", "enum": ["WITHHASH"], "optional": true + }, + { + "name": "count", + "type": "integer", + "optional": true } ], "group": "geo" From 827f77cd1b28164b1a33c3fb3e4cdf1b175f9384 Mon Sep 17 00:00:00 2001 From: Ed Costello Date: Sun, 16 Aug 2015 12:19:42 -0400 Subject: [PATCH 0394/2314] Copy edit for typo --- commands/georadius.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands/georadius.md b/commands/georadius.md index a7808dd5c2..d26172d1aa 100644 --- a/commands/georadius.md +++ b/commands/georadius.md @@ -13,7 +13,7 @@ The command optionally returns additional information using the following option * `WITHDIST`: Also return the distance of the returned items from the specified center. The distance is returned in the same unit as the unit specified as the radius argument of the command. * `WITHCOORD`: Also return the longitude,latitude coordinates of the matching items. -* `WITHASH`: Also return the raw geohash-encoded sorted set score of the item, in the form of a 52 bit unsigned integer. This is only useful for low level hacks or debugging and is otherwise of little interest for the general user. +* `WITHHASH`: Also return the raw geohash-encoded sorted set score of the item, in the form of a 52 bit unsigned integer. This is only useful for low level hacks or debugging and is otherwise of little interest for the general user. The command default is to return unsorted items. Two different sorting methods can be invoked using the following two options: From 440dd955406be204e2e700cde4e9c05d53f516a0 Mon Sep 17 00:00:00 2001 From: Itamar Haber Date: Mon, 17 Aug 2015 18:44:16 +0300 Subject: [PATCH 0395/2314] Unblocked client flagged changed to uppercase --- commands/client-list.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands/client-list.md b/commands/client-list.md index 4279cf3c9a..900b0afc2c 100644 --- a/commands/client-list.md +++ b/commands/client-list.md @@ -40,7 +40,7 @@ b: the client is waiting in a blocking operation i: the client is waiting for a VM I/O (deprecated) d: a watched keys has been modified - EXEC will fail c: connection to be closed after writing entire reply -u: the client is unblocked +U: the client is unblocked A: connection to be closed ASAP N: no specific flag set ``` From b66d03e28facd415fa593abcea25bdc2123b8212 Mon Sep 17 00:00:00 2001 From: Itamar Haber Date: Mon, 17 Aug 2015 18:45:28 +0300 Subject: [PATCH 0396/2314] Added client readonly flag --- commands/client-list.md | 1 + 1 file changed, 1 insertion(+) diff --git a/commands/client-list.md b/commands/client-list.md index 900b0afc2c..a7b085600d 100644 --- a/commands/client-list.md +++ b/commands/client-list.md @@ -41,6 +41,7 @@ i: the client is waiting for a VM I/O (deprecated) d: a watched keys has been modified - EXEC will fail c: connection to be closed after writing entire reply U: the client is unblocked +r: the client is in readonly mode against a cluster node A: connection to be closed ASAP N: no specific flag set ``` From c6a6152d9049fa05bdc978822c49e4f5230e863b Mon Sep 17 00:00:00 2001 From: Itamar Haber Date: Mon, 17 Aug 2015 18:48:44 +0300 Subject: [PATCH 0397/2314] Adds documentation for READONLY and READWRITE --- commands.json | 12 ++++++++++++ commands/readonly.md | 19 +++++++++++++++++++ commands/readwrite.md | 10 ++++++++++ 3 files changed, 41 insertions(+) create mode 100644 commands/readonly.md create mode 100644 commands/readwrite.md diff --git a/commands.json b/commands.json index 60267dbf71..6597c3dc86 100644 --- a/commands.json +++ b/commands.json @@ -1748,6 +1748,18 @@ "since": "1.0.0", "group": "generic" }, + "READONLY": { + "summary:" "Enables read queries for a connection to a cluster slave node", + "complexity": "O(1)", + "since": "3.0.0", + "group": "cluster" + }, + "READWRITE": { + "summary:" "Disables read queries for a connection to a cluster slave node", + "complexity": "O(1)", + "since": "3.0.0", + "group": "cluster" + }, "RENAME": { "summary": "Rename a key", "complexity": "O(1)", diff --git a/commands/readonly.md b/commands/readonly.md new file mode 100644 index 0000000000..511f294e9e --- /dev/null +++ b/commands/readonly.md @@ -0,0 +1,19 @@ +Enables read queries for a connection to a Redis Cluster slave node. + +Normally slave nodes will redirect clients to the authoritative master for +the hash slot involved in a given command, however clients can use slaves +in order to scale reads using the `READONLY` command. + +`READONLY` tells a Redis Cluster slave node that the client is ok reading +possibly stale data and is not interested in running write queries. + +When the connection is in readonly mode, the cluster will send a redirection +to the client only if the operation involves keys not served by the slave's +master node. This may happen because: + +1. The client sent a command about hash slots never served by the master of this slave. +2. The cluster was reconfigured (for example resharded) and the slave is no longer able to serve commands for a given hash slot. + +@return + +@simple-string-reply diff --git a/commands/readwrite.md b/commands/readwrite.md new file mode 100644 index 0000000000..847ca9f301 --- /dev/null +++ b/commands/readwrite.md @@ -0,0 +1,10 @@ +Disables read queries for a connection to a Redis Cluster slave node. + +Read queries against a Redis Cluster slave node are disabled by default, +but you can use the `READONLY` command to change this behavior on a per- +connection basis. The `READWRITE` command resets the readonly mode flag +of a connection back to readwrite. + +@return + +@simple-string-reply From 2095cc1f22512a6906836d0b143bb45c630e2bc0 Mon Sep 17 00:00:00 2001 From: Itamar Haber Date: Mon, 17 Aug 2015 20:30:33 +0300 Subject: [PATCH 0398/2314] fixin my newb mistake in commands.json --- commands.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/commands.json b/commands.json index 6597c3dc86..e71cfa6904 100644 --- a/commands.json +++ b/commands.json @@ -1749,13 +1749,13 @@ "group": "generic" }, "READONLY": { - "summary:" "Enables read queries for a connection to a cluster slave node", + "summary": "Enables read queries for a connection to a cluster slave node", "complexity": "O(1)", "since": "3.0.0", "group": "cluster" }, "READWRITE": { - "summary:" "Disables read queries for a connection to a cluster slave node", + "summary": "Disables read queries for a connection to a cluster slave node", "complexity": "O(1)", "since": "3.0.0", "group": "cluster" From a702f6f4431c2142a3b42edb3d62537d41d9fcf5 Mon Sep 17 00:00:00 2001 From: Itamar Haber Date: Mon, 17 Aug 2015 20:42:12 +0300 Subject: [PATCH 0399/2314] fix spelling errors --- commands/readonly.md | 4 ++-- wordlist | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/commands/readonly.md b/commands/readonly.md index 511f294e9e..697010af76 100644 --- a/commands/readonly.md +++ b/commands/readonly.md @@ -4,8 +4,8 @@ Normally slave nodes will redirect clients to the authoritative master for the hash slot involved in a given command, however clients can use slaves in order to scale reads using the `READONLY` command. -`READONLY` tells a Redis Cluster slave node that the client is ok reading -possibly stale data and is not interested in running write queries. +`READONLY` tells a Redis Cluster slave node that the client is willing to +read possibly stale data and is not interested in running write queries. When the connection is in readonly mode, the cluster will send a redirection to the client only if the operation involves keys not served by the slave's diff --git a/wordlist b/wordlist index d9f683af00..1001731b83 100644 --- a/wordlist +++ b/wordlist @@ -299,6 +299,7 @@ qsort queueing rdb readonly +readwrite realtime rebalance rebalancing @@ -314,6 +315,7 @@ reimplement representable reprovisioned reshard +resharded resharding reshardings resync From 76a2f47a8e4528a45492bdcfd55bb36ebcb7ccba Mon Sep 17 00:00:00 2001 From: Josh Kupershmidt Date: Tue, 18 Aug 2015 08:38:23 -0400 Subject: [PATCH 0400/2314] Cleanup of the doc section: Bit and byte level operations. --- topics/memory-optimization.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/memory-optimization.md b/topics/memory-optimization.md index ffe5b619c2..2267439efc 100644 --- a/topics/memory-optimization.md +++ b/topics/memory-optimization.md @@ -26,7 +26,7 @@ Redis compiled with 32 bit target uses a lot less memory per key, since pointers Bit and byte level operations ----------------------------- -Redis 2.2 introduced new bit and byte level operations: `GETRANGE`, `SETRANGE`, `GETBIT` and `SETBIT`. Using this commands you can treat the Redis string type as a random access array. For instance if you have an application where users are identified by an unique progressive integer number, you can use a bitmap in order to save information about sex of users, setting the bit for females and clearing it for males, or the other way around. With 100 millions of users this data will take just 12 megabyte of RAM in a Redis instance. You can do the same using `GETRANGE` and `SETRANGE` in order to store one byte of information for user. This is just an example but it is actually possible to model a number of problems in very little space with this new primitives. +Redis 2.2 introduced new bit and byte level operations: `GETRANGE`, `SETRANGE`, `GETBIT` and `SETBIT`. Using these commands you can treat the Redis string type as a random access array. For instance if you have an application where users are identified by a unique progressive integer number, you can use a bitmap in order to save information about the sex of users, setting the bit for females and clearing it for males, or the other way around. With 100 million users this data will take just 12 megabytes of RAM in a Redis instance. You can do the same using `GETRANGE` and `SETRANGE` in order to store one byte of information for each user. This is just an example but it is actually possible to model a number of problems in very little space with these new primitives. Use hashes when possible ------------------------ From 56d8e988618a1aa5067d04bb6d7c5be9539b6b81 Mon Sep 17 00:00:00 2001 From: randvis Date: Thu, 27 Aug 2015 22:13:29 +0800 Subject: [PATCH 0401/2314] Make the number consistent with command output --- topics/cluster-tutorial.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/topics/cluster-tutorial.md b/topics/cluster-tutorial.md index 08e79c7bfc..78c36d7721 100644 --- a/topics/cluster-tutorial.md +++ b/topics/cluster-tutorial.md @@ -641,8 +641,8 @@ OK 107046 R (0 err) | 107046 W (0 err) | 114 lost | ``` -When I set the counter to 0 the real value was 144, so the program reports -144 lost writes (`INCR` commands that are not remembered by the cluster). +When I set the counter to 0 the real value was 114, so the program reports +114 lost writes (`INCR` commands that are not remembered by the cluster). This program is much more interesting as a test case, so we'll use it to test the Redis Cluster failover. From e552b63e1c0ac0126dc8513c3a68d6935087aba2 Mon Sep 17 00:00:00 2001 From: randvis Date: Thu, 27 Aug 2015 22:54:26 +0800 Subject: [PATCH 0402/2314] Fix wrong word usage in context, s/slave/master/ --- topics/cluster-tutorial.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/cluster-tutorial.md b/topics/cluster-tutorial.md index 08e79c7bfc..0a7aa31dd2 100644 --- a/topics/cluster-tutorial.md +++ b/topics/cluster-tutorial.md @@ -908,7 +908,7 @@ general idea and what you should do in order to benefit from it. The reason why you may want to let your cluster replicas to move from one master to another under certain condition, is that usually the Redis Cluster is as -resistant to failures as the number of replicas attached to a given slave. +resistant to failures as the number of replicas attached to a given master. For example a cluster where every master has a single replica can't continue operations if the master and its replica fail at the same time, simply because From f2b4ac852236b7d7c45525c1f3a88827406ace12 Mon Sep 17 00:00:00 2001 From: Mark Paluch Date: Thu, 27 Aug 2015 20:28:57 +0200 Subject: [PATCH 0403/2314] Add Hibernate OGM to the tools listing --- tools.json | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/tools.json b/tools.json index 28f653110a..e2ca7bac29 100644 --- a/tools.json +++ b/tools.json @@ -352,5 +352,13 @@ "repository": "https://github.com/eugef/phpRedExpert", "description": "phpRedExpert ia simple and powerful web UI for Redis databases and servers management, written in PHP and JavaScript.", "authors": ["EugeneFidelin"] + }, + { + "name": "Hibernate OGM", + "language": "Java", + "url": "http://hibernate.org/ogm", + "repository": "https://github.com/hibernate/hibernate-ogm", + "description": "Hibernate OGM is the JPA integration for Redis", + "authors": ["mp911de", "gunnarmorling"] } ] From e5c9a2fbd5bcaa369950590981fb14b591f0f9da Mon Sep 17 00:00:00 2001 From: Andrea Leopardi Date: Fri, 28 Aug 2015 09:06:24 +0200 Subject: [PATCH 0404/2314] Add the Redix client --- clients.json | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/clients.json b/clients.json index e2792223cb..b17476f2a7 100644 --- a/clients.json +++ b/clients.json @@ -1042,6 +1042,14 @@ "repository": "https://github.com/mikeheier/Redis-AS3", "description": "An as3 client library for redis.", "authors": [] - } + }, + { + "name": "redix", + "language": "Elixir", + "repository": "https://github.com/whatyouhide/redix", + "description": "Superfast, pipelined, resilient Redis client written in pure Elixir.", + "authors": ["whatyouhide"], + "active": true + } ] From b509c479e5818232b6b4940402f403c4a10c8b59 Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Fri, 28 Aug 2015 14:46:00 +0200 Subject: [PATCH 0405/2314] fix: Parse JSON files for errors --- Rakefile | 2 +- makefile | 8 +++++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/Rakefile b/Rakefile index 44e2faaa8b..08878f1c9c 100644 --- a/Rakefile +++ b/Rakefile @@ -4,7 +4,7 @@ task :parse do require "json" require "batch" - Batch.each(Dir["**/*.json"]) do |file| + Dir["**/*.json"].each do |file| JSON.parse(File.read(file)) end end diff --git a/makefile b/makefile index 9c59c473fd..09b8a12826 100644 --- a/makefile +++ b/makefile @@ -1,7 +1,13 @@ MD_FILES:=$(shell find commands -name '*.md') +JSON_FILES:=$(shell find commands -name '*.json') TEXT_FILES:=$(patsubst %.md,tmp/%.txt,$(MD_FILES)) SPELL_FILES:=$(patsubst %.txt,%.spell,$(TEXT_FILES)) +default: parse spell + +parse: $(JSON_FILES) + rake parse + spell: tmp/commands tmp/topics $(SPELL_FILES) find tmp -name '*.spell' | xargs cat > tmp/spelling-errors cat tmp/spelling-errors @@ -30,4 +36,4 @@ tmp/dict: wordlist tmp/commands.txt clean: rm -rf tmp/* -.PHONY: clean +.PHONY: parse spell clean From 3210597d2525401ad24cb630ee9cd4adc4cbd60e Mon Sep 17 00:00:00 2001 From: Stefan Wille Date: Fri, 15 May 2015 16:36:35 +0200 Subject: [PATCH 0406/2314] Add the crystal-redis client for Crystal --- clients.json | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/clients.json b/clients.json index b17476f2a7..fba965b4c4 100644 --- a/clients.json +++ b/clients.json @@ -1051,5 +1051,16 @@ "description": "Superfast, pipelined, resilient Redis client written in pure Elixir.", "authors": ["whatyouhide"], "active": true + }, + + { + "name": "crystal-redis", + "language": "Crystal", + "url": "http://www.stefanwille.com/projects/crystal-redis", + "repository": "https://github.com/stefanwille/crystal-redis", + "description": "Full featured, high performance Redis client for Crystal", + "authors": ["stefanwille"], + "recommended": true, + "active": true } ] From 0074bb05ca19103b2e9da349d7e16486d43386d1 Mon Sep 17 00:00:00 2001 From: Anton Davydov Date: Sat, 29 Aug 2015 00:18:01 +0300 Subject: [PATCH 0407/2314] Fix typo in php redis client --- clients.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clients.json b/clients.json index b17476f2a7..7618f3aa6f 100644 --- a/clients.json +++ b/clients.json @@ -421,7 +421,7 @@ "name": "Kdyby/Redis", "language": "PHP", "repository": "https://github.com/kdyby/redis", - "description": "Powerfull Redis storage for Nette Framework", + "description": "Powerful Redis storage for Nette Framework", "active": true }, From 3fcce5b140b0fcc523a24c9d8f67918da57162cb Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 3 Sep 2015 16:40:10 +0200 Subject: [PATCH 0408/2314] Redis.io intro text updated. --- topics/introduction.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/topics/introduction.md b/topics/introduction.md index c33aeb2b35..fdd45182bb 100644 --- a/topics/introduction.md +++ b/topics/introduction.md @@ -1,8 +1,8 @@ Introduction to Redis === -Redis is an open source, BSD licensed, advanced **key-value cache** and **store**. It is often referred to as a **data structure server** since -keys can contain [strings](/topics/data-types-intro#strings), [hashes](/topics/data-types-intro#hashes), [lists](/topics/data-types-intro#lists), [sets](/topics/data-types-intro#sets), [sorted sets](/topics/data-types-intro#sorted-sets), [bitmaps](/topics/data-types-intro#bitmaps) and [hyperloglogs](/topics/data-types-intro#hyperloglogs). +Redis is an open source (BSD licensed), in-memory **data structure store**, used as database, cache and message broker. It supports data strucures such as +[strings](/topics/data-types-intro#strings), [hashes](/topics/data-types-intro#hashes), [lists](/topics/data-types-intro#lists), [sets](/topics/data-types-intro#sets), [sorted sets](/topics/data-types-intro#sorted-sets) with range queries, [bitmaps](/topics/data-types-intro#bitmaps) and [hyperloglogs](/topics/data-types-intro#hyperloglogs) and [geospatial indexes](/commands/geoadd) with radius queries. Redis has built-in [replication](/topics/replication), [Lua scripting](/commands/eval), [LRU eviction](/topics/lru-cache), [transactions](/topics/transactions) and different levels of [on-disk persistence](/topics/persistence), and provides high availability via [Redis Sentinel](/topics/sentinel) and automatic partitioning with [Redis Cluster](/topics/cluster-tutorial). You can run **atomic operations** on these types, like [appending to a string](/commands/append); From 2addf5f0193da30c6db97dccee2a43357174ed28 Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 3 Sep 2015 16:42:08 +0200 Subject: [PATCH 0409/2314] Fixed typo in intro text. --- topics/introduction.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/introduction.md b/topics/introduction.md index fdd45182bb..90a0095c3a 100644 --- a/topics/introduction.md +++ b/topics/introduction.md @@ -2,7 +2,7 @@ Introduction to Redis === Redis is an open source (BSD licensed), in-memory **data structure store**, used as database, cache and message broker. It supports data strucures such as -[strings](/topics/data-types-intro#strings), [hashes](/topics/data-types-intro#hashes), [lists](/topics/data-types-intro#lists), [sets](/topics/data-types-intro#sets), [sorted sets](/topics/data-types-intro#sorted-sets) with range queries, [bitmaps](/topics/data-types-intro#bitmaps) and [hyperloglogs](/topics/data-types-intro#hyperloglogs) and [geospatial indexes](/commands/geoadd) with radius queries. Redis has built-in [replication](/topics/replication), [Lua scripting](/commands/eval), [LRU eviction](/topics/lru-cache), [transactions](/topics/transactions) and different levels of [on-disk persistence](/topics/persistence), and provides high availability via [Redis Sentinel](/topics/sentinel) and automatic partitioning with [Redis Cluster](/topics/cluster-tutorial). +[strings](/topics/data-types-intro#strings), [hashes](/topics/data-types-intro#hashes), [lists](/topics/data-types-intro#lists), [sets](/topics/data-types-intro#sets), [sorted sets](/topics/data-types-intro#sorted-sets) with range queries, [bitmaps](/topics/data-types-intro#bitmaps), [hyperloglogs](/topics/data-types-intro#hyperloglogs) and [geospatial indexes](/commands/geoadd) with radius queries. Redis has built-in [replication](/topics/replication), [Lua scripting](/commands/eval), [LRU eviction](/topics/lru-cache), [transactions](/topics/transactions) and different levels of [on-disk persistence](/topics/persistence), and provides high availability via [Redis Sentinel](/topics/sentinel) and automatic partitioning with [Redis Cluster](/topics/cluster-tutorial). You can run **atomic operations** on these types, like [appending to a string](/commands/append); From 2b6c4155a875df27a7df8ed0d1008a271a64012b Mon Sep 17 00:00:00 2001 From: Itamar Haber Date: Fri, 4 Sep 2015 11:39:57 +0300 Subject: [PATCH 0410/2314] Fixed typo in intro text --- topics/introduction.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/introduction.md b/topics/introduction.md index 90a0095c3a..975506cf1b 100644 --- a/topics/introduction.md +++ b/topics/introduction.md @@ -1,7 +1,7 @@ Introduction to Redis === -Redis is an open source (BSD licensed), in-memory **data structure store**, used as database, cache and message broker. It supports data strucures such as +Redis is an open source (BSD licensed), in-memory **data structure store**, used as database, cache and message broker. It supports data structures such as [strings](/topics/data-types-intro#strings), [hashes](/topics/data-types-intro#hashes), [lists](/topics/data-types-intro#lists), [sets](/topics/data-types-intro#sets), [sorted sets](/topics/data-types-intro#sorted-sets) with range queries, [bitmaps](/topics/data-types-intro#bitmaps), [hyperloglogs](/topics/data-types-intro#hyperloglogs) and [geospatial indexes](/commands/geoadd) with radius queries. Redis has built-in [replication](/topics/replication), [Lua scripting](/commands/eval), [LRU eviction](/topics/lru-cache), [transactions](/topics/transactions) and different levels of [on-disk persistence](/topics/persistence), and provides high availability via [Redis Sentinel](/topics/sentinel) and automatic partitioning with [Redis Cluster](/topics/cluster-tutorial). You can run **atomic operations** From 36f156dfe94d9dd5ff2554cf15b223d43046524b Mon Sep 17 00:00:00 2001 From: Itamar Haber Date: Fri, 4 Sep 2015 14:14:57 +0300 Subject: [PATCH 0411/2314] `U` socket flag added per @badboy's observation --- commands/client-list.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/commands/client-list.md b/commands/client-list.md index a7b085600d..d9a91f327b 100644 --- a/commands/client-list.md +++ b/commands/client-list.md @@ -40,7 +40,8 @@ b: the client is waiting in a blocking operation i: the client is waiting for a VM I/O (deprecated) d: a watched keys has been modified - EXEC will fail c: connection to be closed after writing entire reply -U: the client is unblocked +u: the client is unblocked +U: the client is connected via a Unix domain socket r: the client is in readonly mode against a cluster node A: connection to be closed ASAP N: no specific flag set From 3aa7d96ca2293efae20f4365086d2d390ac60086 Mon Sep 17 00:00:00 2001 From: Seamus Abshere Date: Mon, 7 Sep 2015 18:49:51 -0300 Subject: [PATCH 0412/2314] fixes #269 - redis-racket --- clients.json | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/clients.json b/clients.json index 13445e1b0f..64aaf23cb4 100644 --- a/clients.json +++ b/clients.json @@ -1062,5 +1062,15 @@ "authors": ["stefanwille"], "recommended": true, "active": true + }, + + { + "name": "redis-racket", + "language": "Racket", + "url": "https://pkg.racket-lang.org/info/redis", + "repository": "https://github.com/stchang/redis", + "description": "A Redis client for Racket.", + "authors": ["s_chng"], + "active": true } ] From 16e4996fa65ba36c66217e6eda57c930f65d9b8c Mon Sep 17 00:00:00 2001 From: Seamus Abshere Date: Mon, 7 Sep 2015 18:59:45 -0300 Subject: [PATCH 0413/2314] fixes #271 - delphi-redis --- clients.json | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/clients.json b/clients.json index 64aaf23cb4..f792415bfb 100644 --- a/clients.json +++ b/clients.json @@ -1072,5 +1072,14 @@ "description": "A Redis client for Racket.", "authors": ["s_chng"], "active": true + }, + + { + "name": "delphi-redis", + "language": "Delphi", + "url": "https://bitbucket.org/Gloegg/delphi-redis", + "repository": "https://bitbucket.org/Gloegg/delphi-redis.git", + "description": "A lightweight Redis client written in Delphi", + "authors": ["Gloegg"] } ] From 44bd69dd83680b3e4df8ebb1da2eabb1713939af Mon Sep 17 00:00:00 2001 From: Seamus Abshere Date: Mon, 7 Sep 2015 19:07:05 -0300 Subject: [PATCH 0414/2314] fixes #296 - @ziogas's PHP implementation --- clients.json | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/clients.json b/clients.json index f792415bfb..d036f43dc4 100644 --- a/clients.json +++ b/clients.json @@ -1081,5 +1081,14 @@ "repository": "https://bitbucket.org/Gloegg/delphi-redis.git", "description": "A lightweight Redis client written in Delphi", "authors": ["Gloegg"] + }, + + { + "name": "PHP Redis implementation / wrapper", + "language": "PHP", + "url": "https://github.com/ziogas/PHP-Redis-implementation", + "repository": "https://github.com/ziogas/PHP-Redis-implementation", + "description": "Simple and lightweight redis implementation. Basically wrapper for raw redis commands.", + "authors": ["arminas"] } ] From c797dabe63a9704101bd61bd22f1ca4835e4154d Mon Sep 17 00:00:00 2001 From: Seamus Abshere Date: Mon, 7 Sep 2015 19:09:17 -0300 Subject: [PATCH 0415/2314] fixes #298 - @jonathanslenders's python asyncio-redis --- clients.json | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/clients.json b/clients.json index d036f43dc4..f589a31a2b 100644 --- a/clients.json +++ b/clients.json @@ -1090,5 +1090,15 @@ "repository": "https://github.com/ziogas/PHP-Redis-implementation", "description": "Simple and lightweight redis implementation. Basically wrapper for raw redis commands.", "authors": ["arminas"] + }, + + { + "name": "asyncio_redis", + "language": "Python", + "url": "http://asyncio-redis.readthedocs.org/", + "repository": "https://github.com/jonathanslenders/asyncio-redis", + "description": "Asynchronous Redis client that works with the asyncio event loop", + "authors": ["Jonathan Slenders"], + "active": true } ] From 778c489fd47a1777b15029ae2c10a2c790335c07 Mon Sep 17 00:00:00 2001 From: Seamus Abshere Date: Mon, 7 Sep 2015 19:11:10 -0300 Subject: [PATCH 0416/2314] fixes #300 - @shogo82148's Redis::Fast (perl) --- clients.json | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/clients.json b/clients.json index f589a31a2b..50ceee3e2b 100644 --- a/clients.json +++ b/clients.json @@ -1100,5 +1100,15 @@ "description": "Asynchronous Redis client that works with the asyncio event loop", "authors": ["Jonathan Slenders"], "active": true + }, + + { + "name": "Redis::Fast", + "language": "Perl", + "url": "https://metacpan.org/pod/Redis::Fast", + "repository": "https://github.com/shogo82148/Redis-Fast", + "description": "Perl binding for Redis database", + "authors": ["shogo82148"], + "active": true } ] From c6b31e7852850c68d7b15584cde902cbd2e82b4c Mon Sep 17 00:00:00 2001 From: Seamus Abshere Date: Mon, 7 Sep 2015 19:17:49 -0300 Subject: [PATCH 0417/2314] fixes #343 - @chiradip's scala RedisClient --- clients.json | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/clients.json b/clients.json index 50ceee3e2b..18f8936282 100644 --- a/clients.json +++ b/clients.json @@ -1110,5 +1110,14 @@ "description": "Perl binding for Redis database", "authors": ["shogo82148"], "active": true + }, + + { + "name": "RedisClient", + "language": "Scala", + "repository": "https://github.com/chiradip/RedisClient", + "description": "A no nonsense Redis Client using pure scala. Preserves elegant Redis style without any need to learn any special API", + "authors": ["chiradip"], + "active": true } ] From 055f290739cae31a37381653baa3d59027a7d0c3 Mon Sep 17 00:00:00 2001 From: Seamus Abshere Date: Mon, 7 Sep 2015 19:34:28 -0300 Subject: [PATCH 0418/2314] fixes #353 - @pepijndevos's pypredis (python) --- clients.json | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/clients.json b/clients.json index 18f8936282..35987e5d5e 100644 --- a/clients.json +++ b/clients.json @@ -1119,5 +1119,14 @@ "description": "A no nonsense Redis Client using pure scala. Preserves elegant Redis style without any need to learn any special API", "authors": ["chiradip"], "active": true + }, + + { + "name": "Pypredis", + "language": "Python", + "repository": "https://github.com/pepijndevos/pypredis", + "description": "A client focused on arbitrary sharding and parallel pipelining.", + "authors": ["pepijndevos"], + "active": true } ] From 35498b00f04d0c71e5786c65b6412bbed46e01a7 Mon Sep 17 00:00:00 2001 From: Seamus Abshere Date: Mon, 7 Sep 2015 19:36:23 -0300 Subject: [PATCH 0419/2314] fixes #354 - agree with @pepijndevos that evilkost/brukva is not active (since 2011) --- clients.json | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/clients.json b/clients.json index 35987e5d5e..7fca559a79 100644 --- a/clients.json +++ b/clients.json @@ -500,8 +500,7 @@ "language": "Python", "repository": "https://github.com/evilkost/brukva", "description": "Asynchronous Redis client that works within Tornado IO loop", - "authors": ["evilkost"], - "active": true + "authors": ["evilkost"] }, { From 000e7808b8f8a911b410c372bed13c774d4a4bf4 Mon Sep 17 00:00:00 2001 From: Seamus Abshere Date: Mon, 7 Sep 2015 19:38:52 -0300 Subject: [PATCH 0420/2314] fixes #366 - @himulawang's IRedis (Dart) --- clients.json | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/clients.json b/clients.json index 7fca559a79..205158f193 100644 --- a/clients.json +++ b/clients.json @@ -1127,5 +1127,14 @@ "description": "A client focused on arbitrary sharding and parallel pipelining.", "authors": ["pepijndevos"], "active": true + }, + + { + "name": "IRedis", + "language": "Dart", + "repository": "https://github.com/himulawang/i_redis", + "description": "A redis client for Dart", + "authors": ["ila"], + "active": true } ] From c6ed136df8529329acbbcc29d6c202afb66df092 Mon Sep 17 00:00:00 2001 From: Seamus Abshere Date: Mon, 7 Sep 2015 19:40:30 -0300 Subject: [PATCH 0421/2314] fixes #369 - new DartRedisClient link --- clients.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clients.json b/clients.json index 205158f193..607c0d6eb5 100644 --- a/clients.json +++ b/clients.json @@ -636,7 +636,7 @@ { "name": "DartRedisClient", "language": "Dart", - "url": "https://github.com/mythz/DartRedisClient", + "url": "https://github.com/dartist/redis_client", "description": "A high-performance async/non-blocking Redis client for Dart", "authors": ["demisbellot"], "recommended": true, From 339100c5eeadebfb76e7195cdb3ff39cc13eacad Mon Sep 17 00:00:00 2001 From: Seamus Abshere Date: Mon, 7 Sep 2015 19:42:58 -0300 Subject: [PATCH 0422/2314] fixes #371 - latest link to tideland's redis client (go) --- clients.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/clients.json b/clients.json index 607c0d6eb5..c10b72e8f9 100644 --- a/clients.json +++ b/clients.json @@ -122,7 +122,8 @@ { "name": "Tideland Go Redis Client", "language": "Go", - "repository": "https://github.com/tideland/godm", + "url": "https://github.com/tideland/golib/tree/master/redis", + "repository": "https://github.com/tideland/golib", "description": "A flexible Go Redis client able to handle all commands", "authors": ["themue"], "active": true From 3fd7eb91ccabeb8545ce02a0c8098db177e28739 Mon Sep 17 00:00:00 2001 From: Seamus Abshere Date: Mon, 7 Sep 2015 19:46:45 -0300 Subject: [PATCH 0423/2314] fixes #424 - gawk extension --- clients.json | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/clients.json b/clients.json index c10b72e8f9..1daaa2f447 100644 --- a/clients.json +++ b/clients.json @@ -1137,5 +1137,14 @@ "description": "A redis client for Dart", "authors": ["ila"], "active": true + }, + + { + "name": "gawk-redis", + "language": "gawk", + "repository": "http://sourceforge.net/p/gawkextlib/code/ci/master/tree/", + "description": "Gawk extension, using the hiredis C library. Supports pipelining and pub/sub", + "authors": ["paulinohuerta"], + "active": true } ] From 7fc02261afa35f94ca165cd3b8aa3f08dd2093e6 Mon Sep 17 00:00:00 2001 From: Seamus Abshere Date: Mon, 7 Sep 2015 20:57:34 -0300 Subject: [PATCH 0424/2314] fixes #527 - @hishamco's vRedis (VB) --- clients.json | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/clients.json b/clients.json index 1daaa2f447..98fdaeedd8 100644 --- a/clients.json +++ b/clients.json @@ -1146,5 +1146,14 @@ "description": "Gawk extension, using the hiredis C library. Supports pipelining and pub/sub", "authors": ["paulinohuerta"], "active": true + }, + + { + "name": "vRedis", + "language": "VB", + "repository": "https://github.com/hishamco/vRedis", + "description": "Redis client using VB.NET.", + "authors": ["hishamco"], + "active": true } ] From d5ee8da576d3a9988e3f6ea577b7b2cdb9af65f4 Mon Sep 17 00:00:00 2001 From: Seamus Abshere Date: Mon, 7 Sep 2015 21:00:28 -0300 Subject: [PATCH 0425/2314] fixes #532 - delphiredisclient and redis_client.fpc (pascal) --- clients.json | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/clients.json b/clients.json index 98fdaeedd8..b21340d115 100644 --- a/clients.json +++ b/clients.json @@ -1076,7 +1076,7 @@ { "name": "delphi-redis", - "language": "Delphi", + "language": "Pascal", "url": "https://bitbucket.org/Gloegg/delphi-redis", "repository": "https://bitbucket.org/Gloegg/delphi-redis.git", "description": "A lightweight Redis client written in Delphi", @@ -1155,5 +1155,22 @@ "description": "Redis client using VB.NET.", "authors": ["hishamco"], "active": true + }, + + { + "name": "delphiredisclient", + "language": "Pascal", + "repository": "https://github.com/danieleteti/delphiredisclient", + "description": "Redis client for Delphi", + "authors": ["danieleteti"], + "active": true + }, + + { + "name": "redis_client.fpc", + "language": "Pascal", + "repository": "https://github.com/ik5/redis_client.fpc", + "description": "Object Pascal client implementation for the redis protocol and commands", + "authors": ["ik5"] } ] From b4cec5d1ad247a9d4a7d89d9bdafbf9d3df4c279 Mon Sep 17 00:00:00 2001 From: Seamus Abshere Date: Mon, 7 Sep 2015 21:02:15 -0300 Subject: [PATCH 0426/2314] fixes #533 - @crypt1d's Redi.sh (bash) --- clients.json | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/clients.json b/clients.json index b21340d115..c88b1e4674 100644 --- a/clients.json +++ b/clients.json @@ -1172,5 +1172,14 @@ "repository": "https://github.com/ik5/redis_client.fpc", "description": "Object Pascal client implementation for the redis protocol and commands", "authors": ["ik5"] + }, + + { + "name": "Redi.sh", + "language": "Bash", + "repository": "https://github.com/crypt1d/redi.sh", + "description": "Simple, Bash-based, Redis client to store your script's variables", + "authors": ["crypt1d"], + "active": true } ] From 8bd8af71d874444c5b0bae2b90c8c822fac081bf Mon Sep 17 00:00:00 2001 From: Seamus Abshere Date: Mon, 7 Sep 2015 21:03:09 -0300 Subject: [PATCH 0427/2314] fixes #543 - amphp/redis --- clients.json | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/clients.json b/clients.json index c88b1e4674..8edc61a190 100644 --- a/clients.json +++ b/clients.json @@ -1181,5 +1181,14 @@ "description": "Simple, Bash-based, Redis client to store your script's variables", "authors": ["crypt1d"], "active": true + }, + + { + "name": "amphp/redis", + "language": "PHP", + "repository": "https://github.com/amphp/redis", + "description": "An async redis client built on the amp concurrency framework.", + "authors": ["kelunik"], + "active": true } ] From ceae8523a2097948aeaa0fdcd8090ed9a0943e4b Mon Sep 17 00:00:00 2001 From: Seamus Abshere Date: Mon, 7 Sep 2015 21:07:41 -0300 Subject: [PATCH 0428/2314] fixes #563 - @schlitzered's pyredis --- clients.json | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/clients.json b/clients.json index 8edc61a190..83d3edd446 100644 --- a/clients.json +++ b/clients.json @@ -1190,5 +1190,14 @@ "description": "An async redis client built on the amp concurrency framework.", "authors": ["kelunik"], "active": true + }, + + { + "name": "pyredis", + "language": "Python", + "repository": "https://github.com/schlitzered/pyredis", + "description": "Python Client with support for Redis Cluster. Currently only Python 3 is supported.", + "authors": ["schlitzered"], + "active": true } ] From a1066e55ed1d085c0d097f125df85ac046341a30 Mon Sep 17 00:00:00 2001 From: Seamus Abshere Date: Mon, 7 Sep 2015 21:08:36 -0300 Subject: [PATCH 0429/2314] fixes #567 - @jkaye2012's Redis.jl (Julia) --- clients.json | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/clients.json b/clients.json index 83d3edd446..ba1a677f23 100644 --- a/clients.json +++ b/clients.json @@ -1199,5 +1199,14 @@ "description": "Python Client with support for Redis Cluster. Currently only Python 3 is supported.", "authors": ["schlitzered"], "active": true + }, + + { + "name": "Redis.jl", + "language": "Julia", + "repository": "https://github.com/jkaye2012/redis.jl", + "description": "A fully-featured Redis client for the Julia programming language", + "authors": ["jkaye2012"], + "active": true } ] From b0c0ea268d529e4f3c3ca02cf964350efa41ea6c Mon Sep 17 00:00:00 2001 From: Seamus Abshere Date: Mon, 7 Sep 2015 21:10:16 -0300 Subject: [PATCH 0430/2314] fixes #574 - @h0x91b's redis-fast-driver and fast-redis-cluster (javascript) --- clients.json | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/clients.json b/clients.json index ba1a677f23..1a23d0ab75 100644 --- a/clients.json +++ b/clients.json @@ -1208,5 +1208,21 @@ "description": "A fully-featured Redis client for the Julia programming language", "authors": ["jkaye2012"], "active": true + }, + + { + "name": "redis-fast-driver", + "language": "Javascript", + "repository": "https://github.com/h0x91b/redis-fast-driver", + "description": "Driver based on hiredis async lib, can do PUBSUB and MONITOR, simple and really fast, written with NaN so works fine with node 0.8, 0.10 and 0.12", + "authors": ["h0x91B"] + }, + + { + "name": "fast-redis-cluster", + "language": "Javascript", + "repository": "https://github.com/h0x91b/fast-redis-cluster", + "description": "Simple and fast cluster driver with error handling, uses redis-fast-driver as main adapter and node_redis as backup for windows", + "authors": ["h0x91B"] } ] From 2ada357ce6862d7ee971cf9fd85447bb1390f9b5 Mon Sep 17 00:00:00 2001 From: Seamus Abshere Date: Mon, 7 Sep 2015 21:12:57 -0300 Subject: [PATCH 0431/2314] fixes #589 - @smsonline's Redis::Cluster (perl) --- clients.json | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/clients.json b/clients.json index 1a23d0ab75..226772a7f2 100644 --- a/clients.json +++ b/clients.json @@ -1224,5 +1224,15 @@ "repository": "https://github.com/h0x91b/fast-redis-cluster", "description": "Simple and fast cluster driver with error handling, uses redis-fast-driver as main adapter and node_redis as backup for windows", "authors": ["h0x91B"] + }, + + { + "name": "Redis::Cluster", + "language": "Perl", + "url": "http://search.cpan.org/dist/Redis-Cluster/", + "repository": "https://github.com/smsonline/redis-cluster-perl", + "description": "Redis Cluster client for Perl", + "authors": ["smsonline"], + "active": true } ] From 35de2ab3903b312006e5f113ccdf767b6889d789 Mon Sep 17 00:00:00 2001 From: Seamus Abshere Date: Mon, 7 Sep 2015 21:14:17 -0300 Subject: [PATCH 0432/2314] fixes #590 - @Farhaddc's Swidis (Swift) --- clients.json | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/clients.json b/clients.json index 226772a7f2..ad4280ca32 100644 --- a/clients.json +++ b/clients.json @@ -1234,5 +1234,14 @@ "description": "Redis Cluster client for Perl", "authors": ["smsonline"], "active": true + }, + + { + "name": "Swidis", + "language": "Swift", + "repository": "https://github.com/Farhaddc/Swidis", + "description": "iOS Framework Allowing you to connect to Redis server with Swift programming language.", + "authors": ["Farhaddc"], + "active": true } ] From 9df4fd1c597c93a112fb487a6a7513e636567fce Mon Sep 17 00:00:00 2001 From: Seamus Abshere Date: Mon, 7 Sep 2015 21:58:19 -0300 Subject: [PATCH 0433/2314] fixes #594 - @eu90h's Rackdis (Racket) --- clients.json | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/clients.json b/clients.json index ad4280ca32..f72284809f 100644 --- a/clients.json +++ b/clients.json @@ -1243,5 +1243,14 @@ "description": "iOS Framework Allowing you to connect to Redis server with Swift programming language.", "authors": ["Farhaddc"], "active": true + }, + + { + "name": "Rackdis", + "language": "Racket", + "repository": "https://github.com/eu90h/rackdis", + "description": "A Redis client for Racket", + "authors": ["eu90h"], + "active": true } ] From 292f47f3faa3c34973308e7750a160940c3c6a2b Mon Sep 17 00:00:00 2001 From: Seamus Abshere Date: Mon, 7 Sep 2015 21:59:36 -0300 Subject: [PATCH 0434/2314] fixes #595 - @dizzus's RedisKit (Objective-C) --- clients.json | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/clients.json b/clients.json index f72284809f..f55f831388 100644 --- a/clients.json +++ b/clients.json @@ -1252,5 +1252,13 @@ "description": "A Redis client for Racket", "authors": ["eu90h"], "active": true + }, + + { + "name": "RedisKit", + "language": "Objective-C", + "repository": "https://github.com/dizzus/RedisKit", + "description": "RedisKit is a asynchronious client framework for Redis server, written in Objective-C", + "authors": ["dizzus"] } ] From 9ca807fdbafeabcd9ff90bd3fc968bb9ab1859cb Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Tue, 8 Sep 2015 14:47:20 +0200 Subject: [PATCH 0435/2314] Check clients and tools JSON --- makefile | 11 ++++++++++- utils/clients.rb | 21 +++++++++++++++++++-- 2 files changed, 29 insertions(+), 3 deletions(-) diff --git a/makefile b/makefile index 09b8a12826..44ff6e5745 100644 --- a/makefile +++ b/makefile @@ -8,6 +8,15 @@ default: parse spell parse: $(JSON_FILES) rake parse +check: clients tools + +clients: + ruby utils/clients.rb clients.json + +tools: + ruby utils/clients.rb tools.json + + spell: tmp/commands tmp/topics $(SPELL_FILES) find tmp -name '*.spell' | xargs cat > tmp/spelling-errors cat tmp/spelling-errors @@ -36,4 +45,4 @@ tmp/dict: wordlist tmp/commands.txt clean: rm -rf tmp/* -.PHONY: parse spell clean +.PHONY: parse spell clean check clients tools diff --git a/utils/clients.rb b/utils/clients.rb index 2a352615f9..858e20a072 100644 --- a/utils/clients.rb +++ b/utils/clients.rb @@ -54,9 +54,13 @@ def check_url(url) uri = URI(url) if uri.scheme == "http" || uri.scheme == "https" - res = Net::HTTP.get_response(uri) + begin + res = Net::HTTP.get_response(uri) - assert(res.code == "200" || res.code == "302", sprintf("URL broken: %s (%s)", url, res.code)) + assert(res.code == "200" || res.code == "302", sprintf("URL broken: %s (%s)", url, res.code)) + rescue OpenSSL::SSL::SSLError + assert(false, sprintf("SSL Error for URL: %s", url)) + end end end @@ -77,3 +81,16 @@ def assert(assertion, message) end end end + +if $0 == __FILE__ + require 'json' + file = ARGV.shift + if file.nil? + puts "Usage: #{$0} [file]" + exit(1) + end + + puts "Checking #{file}..." + clients = JSON.parse(File.read(file), :symbolize_names => true) + Clients.check(clients) +end From 5b12fa3e86399de09a4742bca0ba5bc33b5df4e0 Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Tue, 8 Sep 2015 14:47:46 +0200 Subject: [PATCH 0436/2314] fix: Change authors and URLs where necessary --- clients.json | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/clients.json b/clients.json index f55f831388..6944e2ce2c 100644 --- a/clients.json +++ b/clients.json @@ -1030,7 +1030,7 @@ "name": "rredis", "language": "R", "repository": "https://github.com/bwlewis/rredis", - "url": "http://cran.r-project.org/package=rredis", + "url": "https://cran.r-project.org/web/packages/rredis/index.html", "description": "Redis client for R", "authors": ["bwlewis"], "active": true @@ -1056,7 +1056,7 @@ { "name": "crystal-redis", "language": "Crystal", - "url": "http://www.stefanwille.com/projects/crystal-redis", + "url": "http://www.stefanwille.com/projects/crystal-redis/", "repository": "https://github.com/stefanwille/crystal-redis", "description": "Full featured, high performance Redis client for Crystal", "authors": ["stefanwille"], @@ -1067,7 +1067,6 @@ { "name": "redis-racket", "language": "Racket", - "url": "https://pkg.racket-lang.org/info/redis", "repository": "https://github.com/stchang/redis", "description": "A Redis client for Racket.", "authors": ["s_chng"], @@ -1098,7 +1097,7 @@ "url": "http://asyncio-redis.readthedocs.org/", "repository": "https://github.com/jonathanslenders/asyncio-redis", "description": "Asynchronous Redis client that works with the asyncio event loop", - "authors": ["Jonathan Slenders"], + "authors": ["jonathan_s"], "active": true }, @@ -1179,7 +1178,7 @@ "language": "Bash", "repository": "https://github.com/crypt1d/redi.sh", "description": "Simple, Bash-based, Redis client to store your script's variables", - "authors": ["crypt1d"], + "authors": ["nkrzalic"], "active": true }, @@ -1197,7 +1196,7 @@ "language": "Python", "repository": "https://github.com/schlitzered/pyredis", "description": "Python Client with support for Redis Cluster. Currently only Python 3 is supported.", - "authors": ["schlitzered"], + "authors": [], "active": true }, @@ -1215,7 +1214,7 @@ "language": "Javascript", "repository": "https://github.com/h0x91b/redis-fast-driver", "description": "Driver based on hiredis async lib, can do PUBSUB and MONITOR, simple and really fast, written with NaN so works fine with node 0.8, 0.10 and 0.12", - "authors": ["h0x91B"] + "authors": [] }, { @@ -1223,7 +1222,7 @@ "language": "Javascript", "repository": "https://github.com/h0x91b/fast-redis-cluster", "description": "Simple and fast cluster driver with error handling, uses redis-fast-driver as main adapter and node_redis as backup for windows", - "authors": ["h0x91B"] + "authors": [] }, { From 35bc303058e9e90e10bb366f24898104c36f65e1 Mon Sep 17 00:00:00 2001 From: Seamus Abshere Date: Mon, 7 Sep 2015 18:44:48 -0300 Subject: [PATCH 0437/2314] fixes #255 - pyres --- tools.json | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tools.json b/tools.json index e2ca7bac29..8965f8d554 100644 --- a/tools.json +++ b/tools.json @@ -360,5 +360,12 @@ "repository": "https://github.com/hibernate/hibernate-ogm", "description": "Hibernate OGM is the JPA integration for Redis", "authors": ["mp911de", "gunnarmorling"] + }, + { + "name": "pyres", + "language": "Python", + "repository" : "http://github.com/binarydud/pyres", + "description" : "Python library inspired by Resque for creating background jobs and workers", + "authors" : ["binarydud"] } ] From 56997b9b16f4d74a7a67cce46986879fb1b200ce Mon Sep 17 00:00:00 2001 From: Seamus Abshere Date: Mon, 7 Sep 2015 18:47:10 -0300 Subject: [PATCH 0438/2314] fixes #237 - Redis-RdbParser (perl) --- tools.json | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tools.json b/tools.json index 8965f8d554..0d90196994 100644 --- a/tools.json +++ b/tools.json @@ -367,5 +367,12 @@ "repository" : "http://github.com/binarydud/pyres", "description" : "Python library inspired by Resque for creating background jobs and workers", "authors" : ["binarydud"] + }, + { + "name": "Redis-RdbParser", + "language": "Perl", + "repository": "https://github.com/flygoast/Redis-RdbParser", + "description": "Redis-RdbParser is a streaming parser for redis RDB database dumps.", + "authors": ["flygoast"] } ] From 202712387eb3aa1a67dba0f3869b39d7c590aa4a Mon Sep 17 00:00:00 2001 From: Seamus Abshere Date: Mon, 7 Sep 2015 19:05:06 -0300 Subject: [PATCH 0439/2314] fixes #279 - collected tools by @FGRibreau --- tools.json | 78 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 78 insertions(+) diff --git a/tools.json b/tools.json index 0d90196994..240ec7eeab 100644 --- a/tools.json +++ b/tools.json @@ -374,5 +374,83 @@ "repository": "https://github.com/flygoast/Redis-RdbParser", "description": "Redis-RdbParser is a streaming parser for redis RDB database dumps.", "authors": ["flygoast"] + }, + { + "name": "Redset", + "language": "Python", + "repository": "https://github.com/percolate/redset", + "description": "Simple, generic sorted sets backed by Redis that can be used to coordinate distributed systems.", + "authors": ["percolate"] + }, + { + "name": "Redsmin", + "language": "Web", + "repository": "https://github.com/Redsmin/redsmin", + "url":"https://redsmin.com/", + "description": "A fully featured Redis GUI for managing and monitoring redis.", + "authors": ["fgribreau"] + }, + { + "name": "Hot-redis", + "language": "Python", + "repository": "https://github.com/stephenmcd/hot-redis", + "description": "Higher Order Types for Redis in Python", + "authors": ["stephenmcd"] + }, + { + "name": "Redis Tree", + "language": "Javascript", + "repository": "https://github.com/FGRibreau/redistree", + "description": "Load and save Trees to Redis using sets.", + "authors": ["fgribreau"] + }, + { + "name": "Redis-NaiveBayes", + "language": "Perl", + "repository": "https://github.com/caio/Redis-NaiveBayes", + "description": "A generic Redis-backed NaiveBayes implementation", + "authors": ["caio"] + }, + { + "name": "Agentredrabbit", + "language": "Python", + "repository": "https://github.com/wingify/agentredrabbit", + "description": "Transport agent that moves data from Redis to RabbitMQ", + "authors": ["wingify"] + }, + { + "name": "Redtrine", + "language": "PHP", + "repository": "https://github.com/redtrine/redtrine", + "description": "Redis-based advanced PHP data structures", + "authors": ["redtrine"] + }, + { + "name": "Redis LUA Unit", + "language": "Lua", + "repository": "https://github.com/Redsmin/redis-lua-unit", + "description": "Framework agnostic unit-testing for Redis Lua scripts", + "authors": ["redsmin"] + }, + { + "name": "Redis Info", + "language": "Javascript", + "repository": "https://github.com/FGRibreau/node-redis-info", + "description": "Redis info string parser for NodeJS", + "authors": ["fgribreau"] + }, + { + "name": "Redis tool", + "language": "Javascript", + "repository": "https://github.com/FGRibreau/redis-tool", + "description": "Redis-tool - Little helpers for Redis (ztrim, del-all, rename)", + "authors": ["fgribreau"] + }, + { + "name": "Redis_failover", + "language": "Ruby", + "repository": "https://github.com/ryanlecompte/redis_failover", + "description": "Redis Failover is a ZooKeeper-based automatic master/slave failover solution for Ruby.", + "authors": ["ryanlecompte"] } ] From 6b762de6aa5ce31cae7cc62ea33f6f45d4326bd2 Mon Sep 17 00:00:00 2001 From: Seamus Abshere Date: Mon, 7 Sep 2015 19:19:30 -0300 Subject: [PATCH 0440/2314] fixes #345 - @iwac's LabVIEW toolkit for Redis --- tools.json | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tools.json b/tools.json index 240ec7eeab..624fc09f7c 100644 --- a/tools.json +++ b/tools.json @@ -452,5 +452,12 @@ "repository": "https://github.com/ryanlecompte/redis_failover", "description": "Redis Failover is a ZooKeeper-based automatic master/slave failover solution for Ruby.", "authors": ["ryanlecompte"] + }, + { + "name": "redis-in-labview", + "language": "LabVIEW", + "repository": "https://decibel.ni.com/content/docs/DOC-36322", + "description": "LabVIEW toolkit for Redis", + "authors": ["iwac"] } ] From 0f1b1cb1b05c89d95587b74d0986c4efb78dc16b Mon Sep 17 00:00:00 2001 From: Seamus Abshere Date: Mon, 7 Sep 2015 19:50:28 -0300 Subject: [PATCH 0441/2314] fixes #438 - @cinience's RedisStudio tool --- tools.json | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tools.json b/tools.json index 624fc09f7c..bc9382c9ff 100644 --- a/tools.json +++ b/tools.json @@ -459,5 +459,12 @@ "repository": "https://decibel.ni.com/content/docs/DOC-36322", "description": "LabVIEW toolkit for Redis", "authors": ["iwac"] + }, + { + "name": "RedisStudio", + "language": "C++", + "repository": "https://github.com/cinience/RedisStudio", + "description": "Redis GUI tool for windows platform.", + "authors": ["cinience"] } ] From 48df7aabeac4052a851d5aae4123b5b0a9520f90 Mon Sep 17 00:00:00 2001 From: Seamus Abshere Date: Mon, 7 Sep 2015 19:51:35 -0300 Subject: [PATCH 0442/2314] fixes #476 - keylord --- tools.json | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tools.json b/tools.json index bc9382c9ff..2f3dfa053e 100644 --- a/tools.json +++ b/tools.json @@ -466,5 +466,12 @@ "repository": "https://github.com/cinience/RedisStudio", "description": "Redis GUI tool for windows platform.", "authors": ["cinience"] + }, + { + "name": "Keylord", + "language": "Java", + "url": "http://protonail.com/products/keylord", + "description": "Cross-platform administration and development GUI application for key-value databases like Redis, LevelDB, etc.", + "authors": ["protonail"] } ] From d034463b264a64650815618cb2c949d78ecee23b Mon Sep 17 00:00:00 2001 From: Seamus Abshere Date: Mon, 7 Sep 2015 19:56:04 -0300 Subject: [PATCH 0443/2314] fixes #506 - should be called HOT Redis --- tools.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools.json b/tools.json index 2f3dfa053e..a40dd0b14a 100644 --- a/tools.json +++ b/tools.json @@ -391,7 +391,7 @@ "authors": ["fgribreau"] }, { - "name": "Hot-redis", + "name": "HOT Redis", "language": "Python", "repository": "https://github.com/stephenmcd/hot-redis", "description": "Higher Order Types for Redis in Python", From 5f23d0d8e721e531f5ac169a30b24bd3bffe9f8f Mon Sep 17 00:00:00 2001 From: Seamus Abshere Date: Mon, 7 Sep 2015 19:58:35 -0300 Subject: [PATCH 0444/2314] fixes #517 - redispapa redis monitor --- tools.json | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/tools.json b/tools.json index a40dd0b14a..bdf28d406a 100644 --- a/tools.json +++ b/tools.json @@ -473,5 +473,13 @@ "url": "http://protonail.com/products/keylord", "description": "Cross-platform administration and development GUI application for key-value databases like Redis, LevelDB, etc.", "authors": ["protonail"] + }, + { + "name": "redispapa", + "language": "Python", + "url": "https://github.com/no13bus/redispapa", + "repository": "https://github.com/no13bus/redispapa", + "description": "RedisPAPA is a redis monitor which watch the redis-info by using flask, angular, socket.io", + "authors": ["no13bus"] } ] From d295b00b1b49439765c5cd557e91c0daf9164ce1 Mon Sep 17 00:00:00 2001 From: Seamus Abshere Date: Mon, 7 Sep 2015 19:59:35 -0300 Subject: [PATCH 0445/2314] fixes #535 - Hangfire --- tools.json | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/tools.json b/tools.json index bdf28d406a..db91821400 100644 --- a/tools.json +++ b/tools.json @@ -481,5 +481,13 @@ "repository": "https://github.com/no13bus/redispapa", "description": "RedisPAPA is a redis monitor which watch the redis-info by using flask, angular, socket.io", "authors": ["no13bus"] + }, + { + "name": "Hangfire", + "language": "C#", + "url": "http://hangfire.io", + "repository": "https://github.com/HangfireIO/Hangfire", + "description": "An easy way to perform fire-and-forget, delayed and recurring tasks inside ASP.NET apps", + "authors": ["odinserj"] } ] From 74f14cfbfa9fdbc328d8682fe1dd623c2472c98f Mon Sep 17 00:00:00 2001 From: Seamus Abshere Date: Mon, 7 Sep 2015 20:49:14 -0300 Subject: [PATCH 0446/2314] fixes #536 - @zhengshuxin's redis_builder --- tools.json | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/tools.json b/tools.json index db91821400..6998727a79 100644 --- a/tools.json +++ b/tools.json @@ -489,5 +489,13 @@ "repository": "https://github.com/HangfireIO/Hangfire", "description": "An easy way to perform fire-and-forget, delayed and recurring tasks inside ASP.NET apps", "authors": ["odinserj"] + }, + { + "name": "redis_builder", + "language": "C++", + "url": "https://github.com/zhengshuxin/acl/tree/master/app/redis_tools/redis_builder", + "repository": "https://github.com/zhengshuxin/acl/tree/master/app/redis_tools/redis_builder", + "description": "A C++ redis tool to create and manage a redis cluster, basing on acl redis lib in https://github.com/zhengshuxin/acl/tree/master/lib_acl_cpp/samples/redis", + "authors": ["zhengshuxin"] } ] From 25380da3bcb072ea254b4081c271d47d5c29c7c2 Mon Sep 17 00:00:00 2001 From: Seamus Abshere Date: Mon, 7 Sep 2015 20:50:16 -0300 Subject: [PATCH 0447/2314] fixes #542 - add @coleifer's huey and walrus --- tools.json | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/tools.json b/tools.json index 6998727a79..8a854e78c1 100644 --- a/tools.json +++ b/tools.json @@ -497,5 +497,19 @@ "repository": "https://github.com/zhengshuxin/acl/tree/master/app/redis_tools/redis_builder", "description": "A C++ redis tool to create and manage a redis cluster, basing on acl redis lib in https://github.com/zhengshuxin/acl/tree/master/lib_acl_cpp/samples/redis", "authors": ["zhengshuxin"] + }, + { + "name": "huey", + "language": "Python", + "repository": "https://github.com/coleifer/huey", + "description": "Simple multi-threaded Python task queue. Supports Redis.", + "authors": ["coleifer"] + }, + { + "name": "walrus", + "language": "Python", + "repository": "https://github.com/coleifer/walrus", + "description": "A collection of lightweight utilities for working with Redis in Python. Includes ORM, autocompletion, full-text search, cache, locks, and more.", + "authors": ["coleifer"] } ] From b750308f3018f61ee636725385f2442f56ef82c0 Mon Sep 17 00:00:00 2001 From: Seamus Abshere Date: Mon, 7 Sep 2015 20:53:32 -0300 Subject: [PATCH 0448/2314] fixes #571 - @jacket-code's redisPlatform --- tools.json | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/tools.json b/tools.json index 8a854e78c1..fa6d7e1d79 100644 --- a/tools.json +++ b/tools.json @@ -511,5 +511,13 @@ "repository": "https://github.com/coleifer/walrus", "description": "A collection of lightweight utilities for working with Redis in Python. Includes ORM, autocompletion, full-text search, cache, locks, and more.", "authors": ["coleifer"] + }, + { + "name": "RedisPlatform", + "language": "C", + "url": "http://www.jacketzhong.com/?p=220", + "repository": "https://github.com/jacket-code/redisPlatform", + "description": "A rpc platform that base on redis, You can use it to do a lot of things, it can be a game server", + "authors": ["jacketzhong"] } ] From 2388bf268e7db85fcbdf8d820d31e0ab7dd0e9df Mon Sep 17 00:00:00 2001 From: Seamus Abshere Date: Mon, 7 Sep 2015 21:04:33 -0300 Subject: [PATCH 0449/2314] fixes #551 - @ienaga's RedisPlugin for Phalcon (MySQL sharding) --- tools.json | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tools.json b/tools.json index fa6d7e1d79..bf749759a2 100644 --- a/tools.json +++ b/tools.json @@ -519,5 +519,12 @@ "repository": "https://github.com/jacket-code/redisPlatform", "description": "A rpc platform that base on redis, You can use it to do a lot of things, it can be a game server", "authors": ["jacketzhong"] + }, + { + "name": "RedisPlugin for Phalcon", + "language": "PHP", + "repository": "https://github.com/ienaga/RedisPlugin", + "description": "RedisPlugin for Phalcon (The correspondence of MySQL sharding.)", + "authors": ["ienaga"] } ] From 070890f10d6bc13ac53bbe6dac54ca92bf3610e7 Mon Sep 17 00:00:00 2001 From: Seamus Abshere Date: Mon, 7 Sep 2015 21:06:19 -0300 Subject: [PATCH 0450/2314] fixes #560 - @maxbrieiev's promise-redis that makes node_redis work with promises --- tools.json | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tools.json b/tools.json index bf749759a2..29109ef1eb 100644 --- a/tools.json +++ b/tools.json @@ -526,5 +526,12 @@ "repository": "https://github.com/ienaga/RedisPlugin", "description": "RedisPlugin for Phalcon (The correspondence of MySQL sharding.)", "authors": ["ienaga"] + }, + { + "name": "promise-redis", + "language": "Javascript", + "repository": "https://github.com/maxbrieiev/promise-redis", + "description": "Use any promise library with node_redis.", + "authors": ["maxbrieiev"] } ] From 256eaa7a281ca8fa810003a4c9f2a561a4031cf0 Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Tue, 8 Sep 2015 14:52:37 +0200 Subject: [PATCH 0451/2314] fix: Change authors and repos for tools where necessary --- tools.json | 35 +++++++++++++++++------------------ 1 file changed, 17 insertions(+), 18 deletions(-) diff --git a/tools.json b/tools.json index 29109ef1eb..d09fd0c834 100644 --- a/tools.json +++ b/tools.json @@ -18,7 +18,7 @@ "language": "Python", "repository": "https://github.com/ask/celery", "description": "Python task queue. Supports multiple backends.", - "authors": ["asksolem"] + "authors": ["asksol"] }, { "name": "Fnordmetric", @@ -93,7 +93,7 @@ { "name": "Meerkat", "language": "Ruby", - "repository": "http://carlhoerberg.github.com/meerkat/", + "repository": "http://carlhoerberg.github.io/meerkat/", "description": "Rack middleware for Server Sent Events with multiple backends.", "authors": ["carlhoerberg"] }, @@ -114,7 +114,7 @@ { "name": "Redis-store", "language": "Ruby", - "repository": "https://github.com/jodosha/redis-store", + "repository": "https://github.com/redis-store/redis-store", "description": "Namespaced Rack::Session, Rack::Cache, I18n and cache Redis stores for Ruby web frameworks.", "authors": ["jodosha"] }, @@ -128,7 +128,7 @@ { "name": "Rollout", "language": "Ruby", - "repository": "https://github.com/jamesgolick/rollout", + "repository": "https://github.com/FetLife/rollout", "description": "Conditionally roll out features with redis.", "authors": ["jamesgolick"] }, @@ -164,7 +164,7 @@ { "name": "Sidekiq", "language": "Ruby", - "repository": "http://mperham.github.com/sidekiq/", + "repository": "http://sidekiq.org/", "description": "Simple, efficient message processing for your Rails 3 application.", "authors": ["mperham"] }, @@ -220,7 +220,7 @@ { "name": "HighcoTimelineBundle", "language": "PHP", - "repository": "https://github.com/stephpy/TimelineBundle", + "repository": "https://github.com/stephpy/timeline-bundle", "description": "TimelineBundle is a Bundle which works with Symfony 2.* which provides a timeline for a subject as Facebook can do.", "authors": ["stephpy"] }, @@ -243,7 +243,7 @@ "language": "Javascript", "repository": "http://github.com/chriso/redback", "description": "Higher-level Redis constructs - social graph, full text search, rate limiting, key pairs.", - "authors": ["chris6F"] + "authors": [] }, { "name": "Recurrent", @@ -262,16 +262,15 @@ { "name": "Redis Qi4j EntityStore", "language": "Java", - "url": "http://qi4j.org/latest/extension-es-redis.html", - "repository": "http://github.com/qi4j/qi4j-sdk", + "repository": "https://github.com/qi4j/qi4j-sdk", "description": "Qi4j EntityStore backed by Redis", "authors": ["eskatos"] }, { "name": "Spring Data Redis", "language": "Java", - "url": "http://www.springsource.org/spring-data/redis", - "repository": "http://github.com/SpringSource/spring-data-redis", + "url": "http://projects.spring.io/spring-data-redis/", + "repository": "https://github.com/spring-projects/spring-data-redis", "description": "Spring integration for Redis promoting POJO programming, portability and productivity", "authors": ["costinl"] }, @@ -285,7 +284,7 @@ { "name": "redis-tcl", "language": "Tcl", - "repository" : "http://github.com/bradvoth/redis-tcl", + "repository" : "https://github.com/bradvoth/redis-tcl", "description" : "Tcl library largely copied from the redis test tree, modified for minor bug fixes and expanded pub/sub capabilities", "authors" : ["bradvoth","antirez"] }, @@ -301,7 +300,7 @@ "name": "FastoRedis", "language": "C++", "url": "http://fastoredis.com", - "repository": "https://github.com/fasto/fastoredis", + "repository": "https://github.com/fastogt/fastoredis", "description": "Cross-platform Redis, Memcached management tool.", "authors": ["topilski"] }, @@ -325,7 +324,7 @@ "name": "RPQueue", "language": "Python", "url": "https://pypi.python.org/pypi/rpqueue", - "repository": "https://github.com/josiahcrlson/rpqueue", + "repository": "https://github.com/josiahcarlson/rpqueue", "description": "RPQueue offers a prioritized, periodic, and scheduled task system for Python using Redis", "authors": ["josiahcarlson"] }, @@ -356,7 +355,7 @@ { "name": "Hibernate OGM", "language": "Java", - "url": "http://hibernate.org/ogm", + "url": "http://hibernate.org/ogm/", "repository": "https://github.com/hibernate/hibernate-ogm", "description": "Hibernate OGM is the JPA integration for Redis", "authors": ["mp911de", "gunnarmorling"] @@ -364,7 +363,7 @@ { "name": "pyres", "language": "Python", - "repository" : "http://github.com/binarydud/pyres", + "repository" : "https://github.com/binarydud/pyres", "description" : "Python library inspired by Resque for creating background jobs and workers", "authors" : ["binarydud"] }, @@ -373,7 +372,7 @@ "language": "Perl", "repository": "https://github.com/flygoast/Redis-RdbParser", "description": "Redis-RdbParser is a streaming parser for redis RDB database dumps.", - "authors": ["flygoast"] + "authors": [] }, { "name": "Redset", @@ -532,6 +531,6 @@ "language": "Javascript", "repository": "https://github.com/maxbrieiev/promise-redis", "description": "Use any promise library with node_redis.", - "authors": ["maxbrieiev"] + "authors": [] } ] From 5f58ef5da3f5e0f4e77eda6432cd8d899a813c88 Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Tue, 8 Sep 2015 15:17:42 +0200 Subject: [PATCH 0452/2314] fix: Remove exhaustive list of supported command types Closes #603 --- clients.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clients.json b/clients.json index 943fc7b7ac..3c30b80045 100644 --- a/clients.json +++ b/clients.json @@ -805,7 +805,7 @@ "language": "C++", "url": "https://github.com/zhengshuxin/acl/tree/master/lib_acl_cpp/samples/redis", "repository": "https://github.com/zhengshuxin/acl/tree/master/lib_acl_cpp/include/acl_cpp/redis", - "description": "full redis client commands, one redis command one redis function, including STRING, HASH, LIST, SET, ZSET, HLL, PUBSUB, TRANSACTION, SCRIPT, CONNECTION, SERVER, CLUSTER", + "description": "Full Redis client commands, one redis command, one redis function", "authors": [], "active": true }, From 11bb96b1f4aba4c1f84b0d7f872b7572fb9eb2e4 Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Tue, 8 Sep 2015 20:43:22 +0200 Subject: [PATCH 0453/2314] fix: Pypredis is not actively developed anymore --- clients.json | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/clients.json b/clients.json index 3c30b80045..4885e4532c 100644 --- a/clients.json +++ b/clients.json @@ -1125,8 +1125,7 @@ "language": "Python", "repository": "https://github.com/pepijndevos/pypredis", "description": "A client focused on arbitrary sharding and parallel pipelining.", - "authors": ["pepijndevos"], - "active": true + "authors": ["pepijndevos"] }, { From 51a8f32144f3a30eedd007d501bbb00846342462 Mon Sep 17 00:00:00 2001 From: thomas Date: Wed, 9 Sep 2015 17:09:00 +0900 Subject: [PATCH 0454/2314] Update clients.json --- clients.json | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/clients.json b/clients.json index 4885e4532c..a3005b4a4b 100644 --- a/clients.json +++ b/clients.json @@ -1258,5 +1258,12 @@ "repository": "https://github.com/dizzus/RedisKit", "description": "RedisKit is a asynchronious client framework for Redis server, written in Objective-C", "authors": ["dizzus"] + }, + { + "name": "RedisPlugin for Phalcon", + "language": "PHP", + "repository": "https://github.com/ienaga/RedisPlugin", + "description": "RedisPlugin for Phalcon (The correspondence of MySQL sharding.)", + "authors": ["ienaga"] } ] From 36eb6a6560f00bee967412339857d46b71b004f4 Mon Sep 17 00:00:00 2001 From: Igor Malinovskiy Date: Thu, 10 Sep 2015 19:00:17 -0400 Subject: [PATCH 0455/2314] Add qredisclient to clients.json --- clients.json | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/clients.json b/clients.json index 4885e4532c..f47db1adfa 100644 --- a/clients.json +++ b/clients.json @@ -1258,5 +1258,15 @@ "repository": "https://github.com/dizzus/RedisKit", "description": "RedisKit is a asynchronious client framework for Redis server, written in Objective-C", "authors": ["dizzus"] - } + }, + + { + "name": "qredisclient", + "language": "C++", + "repository": "https://github.com/uglide/qredisclient", + "description": "Asynchronous Qt-based Redis client with SSL and SSH tunnelling support.", + "authors": ["uglide"], + "active": true + }, + ] From 3a24e3c4c13da23e80e5097585ebea379be8f4fd Mon Sep 17 00:00:00 2001 From: Igor Malinovskiy Date: Thu, 10 Sep 2015 19:01:42 -0400 Subject: [PATCH 0456/2314] Update clients.json --- clients.json | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/clients.json b/clients.json index f47db1adfa..a5478d0d28 100644 --- a/clients.json +++ b/clients.json @@ -1267,6 +1267,5 @@ "description": "Asynchronous Qt-based Redis client with SSL and SSH tunnelling support.", "authors": ["uglide"], "active": true - }, - + } ] From 483887e0f6a6ac559cd1a5e398d5c0f42219386f Mon Sep 17 00:00:00 2001 From: Igor Malinovskiy Date: Fri, 11 Sep 2015 09:38:27 -0400 Subject: [PATCH 0457/2314] Update clients.json --- clients.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clients.json b/clients.json index a5478d0d28..34641f61aa 100644 --- a/clients.json +++ b/clients.json @@ -1265,7 +1265,7 @@ "language": "C++", "repository": "https://github.com/uglide/qredisclient", "description": "Asynchronous Qt-based Redis client with SSL and SSH tunnelling support.", - "authors": ["uglide"], + "authors": ["u_glide"], "active": true } ] From 0a13573844bf37c405604977374a2fb15b6ebdc0 Mon Sep 17 00:00:00 2001 From: Chris Galardi Date: Sun, 13 Sep 2015 14:42:50 -0400 Subject: [PATCH 0458/2314] Grammar --- topics/cluster-tutorial.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/cluster-tutorial.md b/topics/cluster-tutorial.md index 4459fa190c..830146d33a 100644 --- a/topics/cluster-tutorial.md +++ b/topics/cluster-tutorial.md @@ -74,7 +74,7 @@ There are 16384 hash slots in Redis Cluster, and to compute what is the hash slot of a given key, we simply take the CRC16 of the key modulo 16384. -Every node in a Redis Cluster is responsible of a subset of the hash slots, +Every node in a Redis Cluster is responsible for a subset of the hash slots, so for example you may have a cluster with 3 nodes, where: * Node A contains hash slots from 0 to 5500. From 29c9db9122e41a98fe6c51dd48ad6d711b2d3d11 Mon Sep 17 00:00:00 2001 From: Itamar Haber Date: Mon, 14 Sep 2015 14:20:34 +0300 Subject: [PATCH 0459/2314] Added COUNT keyword to GEORADIUS and GEORADIUSBYMEMBER --- commands.json | 2 ++ 1 file changed, 2 insertions(+) diff --git a/commands.json b/commands.json index 60267dbf71..02c0a48f04 100644 --- a/commands.json +++ b/commands.json @@ -829,6 +829,7 @@ "optional": true }, { + "command": "COUNT", "name": "count", "type": "integer", "optional": true @@ -876,6 +877,7 @@ "optional": true }, { + "command": "COUNT", "name": "count", "type": "integer", "optional": true From 6e525534dc5517696118937ec1abf124e9682d0a Mon Sep 17 00:00:00 2001 From: xSky Date: Tue, 15 Sep 2015 15:53:46 +0800 Subject: [PATCH 0460/2314] Update clients.json --- clients.json | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/clients.json b/clients.json index 34641f61aa..bafc0e62bd 100644 --- a/clients.json +++ b/clients.json @@ -1268,4 +1268,12 @@ "authors": ["u_glide"], "active": true } + { + "name": "xredis", + "language": "C++", + "repository": "https://github.com/0xsky/xredis", + "description": "Redis C++ client with data slice storage and connection pool support, requires hiredis only", + "authors": ["0xsky"], + "active": true + } ] From 3d38e580446d53508c37066753134869e4baefb6 Mon Sep 17 00:00:00 2001 From: xSky Date: Tue, 15 Sep 2015 16:09:26 +0800 Subject: [PATCH 0461/2314] Update clients.json --- clients.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/clients.json b/clients.json index bafc0e62bd..65de29041f 100644 --- a/clients.json +++ b/clients.json @@ -1267,7 +1267,8 @@ "description": "Asynchronous Qt-based Redis client with SSL and SSH tunnelling support.", "authors": ["u_glide"], "active": true - } + }, + { "name": "xredis", "language": "C++", From 9412c6395a6d7acdbc349af4ba4e607f89fb990f Mon Sep 17 00:00:00 2001 From: thomas Date: Tue, 15 Sep 2015 22:58:01 +0900 Subject: [PATCH 0462/2314] add tools.json & rm clients.json --- clients.json | 7 ------- tools.json | 7 +++++++ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/clients.json b/clients.json index a3005b4a4b..4885e4532c 100644 --- a/clients.json +++ b/clients.json @@ -1258,12 +1258,5 @@ "repository": "https://github.com/dizzus/RedisKit", "description": "RedisKit is a asynchronious client framework for Redis server, written in Objective-C", "authors": ["dizzus"] - }, - { - "name": "RedisPlugin for Phalcon", - "language": "PHP", - "repository": "https://github.com/ienaga/RedisPlugin", - "description": "RedisPlugin for Phalcon (The correspondence of MySQL sharding.)", - "authors": ["ienaga"] } ] diff --git a/tools.json b/tools.json index d09fd0c834..d1eaf3f4bd 100644 --- a/tools.json +++ b/tools.json @@ -532,5 +532,12 @@ "repository": "https://github.com/maxbrieiev/promise-redis", "description": "Use any promise library with node_redis.", "authors": [] + }, + { + "name": "RedisPlugin for Phalcon", + "language": "PHP", + "repository": "https://github.com/ienaga/RedisPlugin", + "description": "RedisPlugin for Phalcon (The correspondence of MySQL sharding.)", + "authors": ["ienaga"] } ] From 83c3600e025bebc65225855d85869fefd63e54d1 Mon Sep 17 00:00:00 2001 From: thomas Date: Tue, 15 Sep 2015 23:55:58 +0900 Subject: [PATCH 0463/2314] add twitter account --- tools.json | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/tools.json b/tools.json index d1eaf3f4bd..f29d392e37 100644 --- a/tools.json +++ b/tools.json @@ -524,7 +524,7 @@ "language": "PHP", "repository": "https://github.com/ienaga/RedisPlugin", "description": "RedisPlugin for Phalcon (The correspondence of MySQL sharding.)", - "authors": ["ienaga"] + "authors": ["ienagatoshiyuki"] }, { "name": "promise-redis", @@ -532,12 +532,5 @@ "repository": "https://github.com/maxbrieiev/promise-redis", "description": "Use any promise library with node_redis.", "authors": [] - }, - { - "name": "RedisPlugin for Phalcon", - "language": "PHP", - "repository": "https://github.com/ienaga/RedisPlugin", - "description": "RedisPlugin for Phalcon (The correspondence of MySQL sharding.)", - "authors": ["ienaga"] } ] From a3faee0cb8d4ea3ecf4ceed352b0a9431e8e28e4 Mon Sep 17 00:00:00 2001 From: Charles Zhang <43289893@qq.com> Date: Thu, 17 Sep 2015 11:40:54 +0800 Subject: [PATCH 0464/2314] fix some typos. --- commands/cluster-info.md | 2 +- commands/cluster-setslot.md | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/commands/cluster-info.md b/commands/cluster-info.md index f8138486f9..f8c8b26582 100644 --- a/commands/cluster-info.md +++ b/commands/cluster-info.md @@ -20,7 +20,7 @@ cluster_stats_messages_received:1483968 * `cluster_slots_assigned`: Number of slots which are associated to some node (not unbound). This number should be 16384 for the node to work properly, which means that each hash slot should be mapped to a node. * `cluster_slots_ok`: Number of hash slots mapping to a node not in `FAIL` or `PFAIL` state. * `cluster_slots_pfail`: Number of hash slots mapping to a node in `PFAIL` state. Note that those hash slots still work correctly, as long as the `PFAIL` state is not promoted to `FAIL` by the failure detection algorithm. `PFAIL` only means that we are currently not able to talk with the node, but may be just a transient error. -* `cluster_slots_fail`: Number of hash slots in mapping to a node in `FAIL` state. If this number is not zero the node is not able to serve queries unless `cluster-require-full-coverage` is set to `no` in the configuration. +* `cluster_slots_fail`: Number of hash slots mapping to a node in `FAIL` state. If this number is not zero the node is not able to serve queries unless `cluster-require-full-coverage` is set to `no` in the configuration. * `cluster_known_nodes`: The total number of known nodes in the cluster, including nodes in `HANDSHAKE` state that may not currently be proper members of the cluster. * `cluster_size`: The number of master nodes serving at least one hash slot in the cluster. * `cluster_current_epoch`: The local `Current Epoch` variable. This is used in order to create unique increasing version numbers during fail overs. diff --git a/commands/cluster-setslot.md b/commands/cluster-setslot.md index 55f9783e28..6364f7e597 100644 --- a/commands/cluster-setslot.md +++ b/commands/cluster-setslot.md @@ -1,7 +1,7 @@ `CLUSTER SETSLOT` is responsible of changing the state of an hash slot in the receiving node in different ways. It can, depending on the subcommand used: -1. `MIGRATING` subcommand: Set a hash slot in *importing* state. -2. `IMPORTING` subcommand: Set a hash slot in *migrating* state. +1. `MIGRATING` subcommand: Set a hash slot in *migrating* state. +2. `IMPORTING` subcommand: Set a hash slot in *importing* state. 3. `STABLE` subcommand: Clear any importing / migrating state from hash slot. 4. `NODE` subcommand: Bind the hash slot to a different node. From 9e09e93d84af5a783ef507906656a91ca63311a5 Mon Sep 17 00:00:00 2001 From: Hisham Bin Ateya Date: Thu, 17 Sep 2015 11:03:48 +0300 Subject: [PATCH 0465/2314] Change vRedis twitter handle --- clients.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clients.json b/clients.json index 65de29041f..b15aa1279e 100644 --- a/clients.json +++ b/clients.json @@ -1151,7 +1151,7 @@ "language": "VB", "repository": "https://github.com/hishamco/vRedis", "description": "Redis client using VB.NET.", - "authors": ["hishamco"], + "authors": ["hishambinateya"], "active": true }, From af4f0240b55b773ec3b6fe9622aa2bda739d99cb Mon Sep 17 00:00:00 2001 From: Simon Ninon Date: Thu, 17 Sep 2015 00:31:37 +0200 Subject: [PATCH 0466/2314] add cpp_redis to clients.json --- clients.json | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/clients.json b/clients.json index 65de29041f..fc052d429f 100644 --- a/clients.json +++ b/clients.json @@ -1276,5 +1276,14 @@ "description": "Redis C++ client with data slice storage and connection pool support, requires hiredis only", "authors": ["0xsky"], "active": true + }, + + { + "name": "cpp_redis", + "language": "C++", + "repository": "https://github.com/cylix/cpp_redis", + "description": "Modern C++11 Redis client based on boost::asio", + "authors": ["simon_ninon"], + "active": true } ] From 894413e010c7dfb2c4fe1d731aad836feb36e790 Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Mon, 21 Sep 2015 09:56:12 +0200 Subject: [PATCH 0467/2314] fix: Remove superfluous dot --- commands.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands.json b/commands.json index f1124dea0a..0dc5fdada2 100644 --- a/commands.json +++ b/commands.json @@ -2948,7 +2948,7 @@ }, "SCAN": { "summary": "Incrementally iterate the keys space", - "complexity": "O(1) for every call. O(N) for a complete iteration, including enough command calls for the cursor to return back to 0. N is the number of elements inside the collection..", + "complexity": "O(1) for every call. O(N) for a complete iteration, including enough command calls for the cursor to return back to 0. N is the number of elements inside the collection.", "arguments": [ { "name": "cursor", From c7ed08b01e332f8bdca9d608599ebf235791dea1 Mon Sep 17 00:00:00 2001 From: daurnimator Date: Tue, 22 Sep 2015 01:59:16 +1000 Subject: [PATCH 0468/2314] Add lredis lua redis client --- clients.json | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/clients.json b/clients.json index f3f9dd4ef6..0ee8ce7b7a 100644 --- a/clients.json +++ b/clients.json @@ -286,6 +286,15 @@ "active": true }, + { + "name": "lredis", + "language": "Lua", + "repository": "https://github.com/daurnimator/lredis", + "description": "Redis library for Lua", + "authors": ["daurnimator"], + "active": true + }, + { "name": "Redis", "language": "Perl", From 5a64d560e7f75b81e74551d3931eb51a81a2a595 Mon Sep 17 00:00:00 2001 From: Sam Cook Date: Tue, 29 Sep 2015 12:31:21 +0100 Subject: [PATCH 0469/2314] Add RedLock.net to distlock implementation list --- topics/distlock.md | 1 + 1 file changed, 1 insertion(+) diff --git a/topics/distlock.md b/topics/distlock.md index f159684ce5..14d64f6c3b 100644 --- a/topics/distlock.md +++ b/topics/distlock.md @@ -32,6 +32,7 @@ already available that can be used for reference. * [Redis::DistLock](https://github.com/sbertrang/redis-distlock) (Perl implementation). * [Redlock-cpp](https://github.com/jacket-code/redlock-cpp) (C++ implementation). * [Redlock-cs](https://github.com/kidfashion/redlock-cs) (C#/.NET implementation). +* [RedLock.net](https://github.com/samcook/RedLock.net) (C#/.NET implementation). Includes async and lock extension support. * [node-redlock](https://github.com/mike-marcacci/node-redlock) (NodeJS implementation). Includes support for lock extension. Safety and Liveness guarantees From 32d93b2261b5db1d3a6116454fd644cade7fa370 Mon Sep 17 00:00:00 2001 From: Itamar Haber Date: Tue, 29 Sep 2015 17:08:07 +0300 Subject: [PATCH 0470/2314] debug library no longer available See https://github.com/antirez/redis/commit/30278061cc834b4073b004cb1a2bfb0f195734f7#diff-792ebfd4581312e7344b1e48adad7f10R633 --- commands/eval.md | 1 - 1 file changed, 1 deletion(-) diff --git a/commands/eval.md b/commands/eval.md index b5fe028965..1a0f0de80e 100644 --- a/commands/eval.md +++ b/commands/eval.md @@ -508,7 +508,6 @@ The Redis Lua interpreter loads the following Lua libraries: * `table` lib. * `string` lib. * `math` lib. -* `debug` lib. * `struct` lib. * `cjson` lib. * `cmsgpack` lib. From 985de215bd1c0698909109e4ab3536ab705d5ce8 Mon Sep 17 00:00:00 2001 From: Vladimir Korenev Date: Mon, 5 Oct 2015 10:27:31 +0300 Subject: [PATCH 0471/2314] Add missing @return keywords --- commands/debug-object.md | 2 ++ commands/debug-segfault.md | 2 ++ commands/pexpire.md | 2 ++ 3 files changed, 6 insertions(+) diff --git a/commands/debug-object.md b/commands/debug-object.md index 4c8c1bbf27..14550c618e 100644 --- a/commands/debug-object.md +++ b/commands/debug-object.md @@ -1,4 +1,6 @@ `DEBUG OBJECT` is a debugging command that should not be used by clients. Check the `OBJECT` command instead. +@return + @simple-string-reply diff --git a/commands/debug-segfault.md b/commands/debug-segfault.md index 24ac5f4ce6..ed51e9bc21 100644 --- a/commands/debug-segfault.md +++ b/commands/debug-segfault.md @@ -1,4 +1,6 @@ `DEBUG SEGFAULT` performs an invalid memory access that crashes Redis. It is used to simulate bugs during the development. +@return + @simple-string-reply diff --git a/commands/pexpire.md b/commands/pexpire.md index b01c937931..cadaaff2b6 100644 --- a/commands/pexpire.md +++ b/commands/pexpire.md @@ -1,6 +1,8 @@ This command works exactly like `EXPIRE` but the time to live of the key is specified in milliseconds instead of seconds. +@return + @integer-reply, specifically: * `1` if the timeout was set. From 83da11e680afd9f051452dc5f94e5eab7de398f4 Mon Sep 17 00:00:00 2001 From: Itamar Haber Date: Sat, 10 Oct 2015 00:33:25 +0300 Subject: [PATCH 0472/2314] REPLACE is only available from v3 --- commands/restore.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands/restore.md b/commands/restore.md index cfb0fb2c9b..b6ff886d13 100644 --- a/commands/restore.md +++ b/commands/restore.md @@ -5,7 +5,7 @@ If `ttl` is 0 the key is created without any expire, otherwise the specified expire time (in milliseconds) is set. `RESTORE` will return a "Target key name is busy" error when `key` already -exists unless you use the `REPLACE` modifier. +exists unless you use the `REPLACE` modifier (Redis 3.0 or greater). `RESTORE` checks the RDB version and data checksum. If they don't match an error is returned. From 71cce1627fa76d48d1c2b953031bfca67c76d46b Mon Sep 17 00:00:00 2001 From: Adrien Moreau Date: Sat, 10 Oct 2015 06:45:26 +0000 Subject: [PATCH 0473/2314] Add eredis_cluster client --- clients.json | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/clients.json b/clients.json index f3f9dd4ef6..347ddf8ace 100644 --- a/clients.json +++ b/clients.json @@ -53,7 +53,14 @@ "recommended": true, "active": true }, - + { + "name": "eredis_cluster", + "language": "Erlang", + "repository": "https://github.com/adrienmo/eredis_cluster", + "description": "Eredis wrapper providing cluster support and connection pooling", + "authors": ["adrienmo"], + "active": true + }, { "name": "sharded_eredis", "language": "Erlang", From e69e082e4137f8f2a99a6a4df67ff8a6c23138b3 Mon Sep 17 00:00:00 2001 From: David Thomson Date: Tue, 13 Oct 2015 07:06:27 +0100 Subject: [PATCH 0474/2314] Fix ordering of reshard command for scripting --- topics/cluster-tutorial.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/cluster-tutorial.md b/topics/cluster-tutorial.md index 830146d33a..cdcbffc312 100644 --- a/topics/cluster-tutorial.md +++ b/topics/cluster-tutorial.md @@ -574,7 +574,7 @@ Reshardings can be performed automatically without the need to manually enter the parameters in an interactive way. This is possible using a command line like the following: - ./redis-trib.rb reshard : --from --to --slots --yes + ./redis-trib.rb reshard --from --to --slots --yes : This allows to build some automatism if you are likely to reshard often, however currently there is no way for `redis-trib` to automatically From 6155ec0f620a2b0e46bf275d41368841942bb3e6 Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 14 Oct 2015 12:26:50 +0200 Subject: [PATCH 0475/2314] Added documentation about secondary indexes. --- topics/indexes.md | 538 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 538 insertions(+) create mode 100644 topics/indexes.md diff --git a/topics/indexes.md b/topics/indexes.md new file mode 100644 index 0000000000..df232593a8 --- /dev/null +++ b/topics/indexes.md @@ -0,0 +1,538 @@ +Secondary indexing with Redis +=== + +While Redis not exactly a key-value store, since values can be complex data structures, it has an extrenal key-value shell, since at API level data is addressed by the key name. It is fair to say that, natively, Redis only offers primary key access. However since Redis is a data structures server, certain data structures can be used for indexing, in order to create secondary indexes of different kindes, including secondary indexes and composite (multi-column) indexes. + +This document explains how it is possible to create indexes in Redis using the following data structures: + +* Sorted sets to create secondary indexes by ID or other numerical fields. +* Sorted sets with lexicographical ranges for creating more advanced secondary indexes and composite indexes. +* Sets for creating random indexes. +* Lists for creating simple iterable indexes. + +Implementing and maintaining indexes with Redis is an advanced topic, so most +users that need to perform complex queries on data should understand if they +are better served by a relational store. However often, especially in caching +scenarios, there is the explicit need to store indexed data into Redis in order +to speedup common queries which require indexes. + +Simple numerical indexes with sorted sets +=== + +The simplest secondary index you can create with Redis is by using a +sorted set data type, which is a data structure representing a set of +elements ordered by a floating point number which is the *score* of +each element. Elements are ordered from the smallest to the highest score. + +Since the score is a double precision float, indexes you can build with +vanilla sorted sets are limited to things were the indexing field is a number +within a given specific range. + +The two commands to build those kinda of indexes are `ZADD` and +`ZRANGEBYSCORE` to respectively add items and retrive items within a +specified range. + +For instance, it is possible to index a set of names by their +age by adding element to a sorted set. The element will be the name of the +person and the score will be the age. + + ZADD myindex 25 Manuel + ZADD myindex 18 Anna + ZADD myindex 35 Jon + ZADD myindex 67 Helen + +In order to retrieve all the persons with an age between 20 and 40: + + ZRANGEBYSCORE myindex 20 40 + 1) "Manuel" + 2) "Jon" + +By using the **WITHSCORES** option of `ZRANGEBYSCORE` it is also possible +to obtain the scores associated with the returned elements. + +The `ZCOUNT` command can be used in order to retrieve the number of elements +between a given range without actually fetching the elements which is also +useful, especially given the fact the operation has logarithmic time +complexity regardless of the size of the range. + +Ranges can be inclusive or exclusive, please refer to the `ZRANGEBYSCORE` +command documentation for more information. + +**Note**: Using the `ZREVRANGEBYSCORE` it is possible to query range in +reversed order, which is often useful when data is indexed in a given +direction (ascending or descending) but we want to retrieve information +in the other way. + +Using objects IDs as associated values +--- + +In the above example we associated names to ages. However in general we +may want to index some field of an object to some object. Instead of +using as the sorted set value directly the data associated with the +indexed field, it is possible to use an ID which refers to some object +stored at a different key. + +For example I may have Redis hashes, one per key, referring to hashes +representing users: + + HMSET user:1 username id 1 antirez ctime 1444809424 age 38 + HMSET user:2 username id 2 maria ctime 1444808132 age 42 + HMSET user:3 username id 3 jballard ctime 1443246218 age 33 + +If I want to create an index in order to query users by their age, I +could do: + + ZADD user.age.index 38 1 + ZADD user.age.index 42 2 + ZADD user.age.index 33 3 + +This time the value associated with the score in the sorted set is the +ID of the object. So once I query the index with `ZRANGEBYSCORE` I'll +also retrieve the informations I need with `HGETALL` or similar commands. + +In the next examples we'll always use IDs as values associated with the +index, since this is usually the more sounding design. + +Updating simple sorted set indexes +--- + +Often we index things which change during time. For example in the above +example, the age of the user changes every year. In such a case it would +make sense to use the birth date as index instead of the age itself, +but there are other cases where we simple want some field to change from +time to time, and the index to reflect this change. + +The `ZADD` command makes updating simple indexes a very trivial operation +since re-adding back an element with a different score and the same value +will simply update the score and move the element at the right position, +so if the user *antirez* turned 39 years old, in order to update the +data in the hash representing the user, and in the index as well, we need +the following two commands: + + HSET user:1 age 39 + ZADD user.age.index 39 1 + +The operation may be wrapped in a `MULTI`/`EXEC` transaction in order to +make sure both fields are updated or none. + +Turning multi dimensional data into linear data +--- + +Indexes created with sorted sets are able to index only a single numerical +value. Because of this you may think it is impossible to index something +which has multiple dimensions using this kind of indexes, but actually this +is not always true. If you can efficiently represent something +multi-dimensional in a linear way, they it is often possible to use a simple +sorted set for indexing. + +For example the [Redis geo indexing API](/commands/geoadd) users a sorted +set to index places by latitude and longitude using a technique called +[Geo hash](https://en.wikipedia.org/wiki/Geohash). The sorted set score +represents alternating bits of longitude and latitude, so that we map the +linear score of a sorted set to many small *squares* in the earth surface. +By doing an 8+1 style center and neighborhood search it is possible to +retrieve elements by radius. + +Limits of the score +--- + +Sorted set elements scores are double precision integers. It means that +they can represent different decimal or integer values with a different +errors. However what is interesting for indexing is that the score is +always able to represent without any error numbers between -9007199254740992 +and 9007199254740992, which is `-/+ 2^53`. + +When representing much larger numbers, you need a different form if indexing +that is able to index numbers at any precision, called a lexicographical +index. + +Lexicographical indexes +=== + +Redis sorted sets have an interesting property. When elements are added +with the same score, they are sorted lexicographically, comparing the +strings as binary data with the `memcmp()` function. + +Moreover, there are commands such as `ZRANGEBYLEX` and `ZLEXCOUNT` that +are able to query and count ranges in a lexicographically fashion. + +This feature is basically equivalent to a `b-tree` data structure which +is often used in order to implement indexes with traditional databases. +As you can guess, because of this, it is possible to use this Redis data +structure in order to implement pretty fancy indexes. + +Before to dive into using lexicographical indexes, let's check how +sorted sets behave in this special mode of operations. Since we need to +add elements with the same score, we'll always use the special score of +zero. + + ZADD myindex 0 baaa + ZADD myindex 0 abbb + ZADD myindex 0 aaaa + ZADD myindex 0 bbbb + +Fetching all the elements from the sorted set immediately reveals that they +are ordered lexicographically. + + ZRANGE myindex 0 -1 + 1) "aaaa" + 2) "abbb" + 3) "baaa" + 4) "bbbb" + +Now we can use `ZRANGEBYLEX` in order to perform range queries. + + ZRANGEBYLEX myindex [a (b + 1) "aaaa" + 2) "abbb" + +Note that in the range queries I prefixed my min and max element with +`[` and `(`. This prefixes are mandatory, and they specify if the element +we specify for the range is inclusive or exclusive. So the range `[a (b` means give me all the elements lexicographically between `a` inclusive and `b` exclusive, which are all the elements starting with `a`. + +There are also two more special characters indicating the infinitely negative +string and the infinitely positive string, which are `-` and `+`. + + ZRANGEBYLEX myindex [b + + 1) "baaa" + 2) "bbbb" + +That's it basically. Let's see how to use these features to build indexes. + +A first example: completion +--- + +An interesting application of indexing is completion, similar to what happens +in a search engine when you start to type your search query: it will +anticipate what you are likely typing, providing common queries that +start with the same characters. + +A naive approach to completion is to just add every single query we +get from the user into the index. For example if the user search `banana` +we'll just do: + + ZADD myindex 0 banana + +And so forth for each search query ever encountered. Then when we want to +complete the user query, we do a very simple query using `ZRANGEBYLEX`, like +the following. Imagine the user is typing "bit", and we want to complete the +query. We send a command like that: + + ZLEXRANGE myindex "[bit" "[bit\xff" + +Basically we create a range using the string the user is typing right now +as start, and the same sting plus a trailing byte set to 255, which is `\xff` in the example, as the end of the range. In this way we get all the strings that start for the string the user is typing. + +Note that we don't want too many items returned, so we may use the **LIMIT** option in order to reduce the number of results. + +Adding frequency into the mix +--- + +The above approach is a bit naive, because all the user queries are the same +in this way. In a real system we want to complete strings accordingly to their +frequency: very popular queries will be proposed with an higher probability +compared to query strings searched very rarely. + +In order to implement something which depends on the frequency, and at the +same time automatically adapts to future inputs and purges query strings that +are no longer popular, we can use a very simple *streaming algorithm*. + +To start, we modify our index in order to don't have just the search term, +but also the frequency the term is associated with. So instead of just adding +`banana` we add `banana:1`, where 1 is the frequency. + + ZADD myindex 0 banana:1 + +We also need logic in order to increment the index if the search term +already exists in the index, so what we'll actually do is something like +that: + + ZRANGEBYLEX myindex "[banana:" + LIMIT 1 1 + 1) "banana:1" + +This will return the single entry of `banana` if it exists. Then we +can increment the associated frequency and send the following two +commands: + + ZREM myindex 0 banana:1 + ZADD myindex 0 banana:2 + +Note that because it is possible that there are concurrent updates, the +above three commands should be send via a [Lua script](/commands/eval) +instead, so that the Lua script will atomically get the old count and +re-add the item with incremented score. + +So the result will be that, every time an user searches for `banana` we'll +get our entry updated. + +There is more: our goal is to just have items searched very frequently. +So we need some form of purging. So, when we actually query the index +in order to complete the user request, we may see something like that: + + ZRANGEBYLEX myindex "[banana:" + LIMIT 1 10 + 1) "banana:123" + 2) "banahhh:1" + 3) "banned user:49" + 4) "banning:89" + +Apparently nobody searches for "banahhh", for example, but the query was +performed a single time, so we end presenting it to the user. + +So what we do is, out of the returned items, we pick a random one, divide +its score by two, and re-add it with half to score. However if the score +was already "1", we simply remove the item from the list. You can use +much more advanced systems, but the idea is that the index in the long run +will contain top queries, and if top queries will change over the time +it will adapt itself. + +A refinement to this algorithm is to pick entries in the list according to +their weight: the higher the score, the less likely it is picked +in order to halve its score, or evict it. + +Normalizing strings for case and accents +--- + +In the completion examples we always used lowercase strings. However +reality is much more complex than that: languages have capitalized names, +accents, and so forth. + +One simple way do deal with this issues is to actually normalize the +string the user searches. Whatever the user searches for "Banana", +"BANANA" or Ba'nana" we may always turn it into "banana". + +However sometimes we could like to present the user with the original +item typed, even if we normalize the string for indexing. In order to +do this, what we do is to change the format of the index so that instead +of just storing `term:frequency` we store `normalized:frequency:original` +like in the following example: + + ZADD myindex 0 banana:273:Banana + +Basically we add another field that we'll extract and use only for +visualization. Ranges will always be computed using the normalized strings +instead. This is a common trick which has multiple applications. + +Adding auxiliary informations in the index +--- + +When using sorted set in a direct way, we have two different attributes +for each object: the score, which we use as an index, and an associated +value. When using lexicographical indexes instead, the score is always +set to 0 and basically not used at all. We are left with a single string, +which is the element itself. + +Like we did in the previous completion examples, we are still able to +store associated data using separators. For example we used the colon in +order to add the frequency and the original word for completion. + +In general we can add any kind of associated value to our primary key. +In order to use a lexicographic index to implement a simple key-value store +we just store the entry as `key:value`: + + ZADD myindex 0 mykey:myvalue + +And search for the key with: + + ZRANGEBYLEX myindex mykey: + LIMIT 1 1 + 1) "mykey:myvalue" + +Then we just get the part after the colon to retrieve the value. +However a problem to solve in this case is collisions. The colon character +may be part of the key itself, so it must be chosen in order to never +collide with the key we add. + +Since lexicographical ranges in Redis are binary safe you can use any +byte or any sequence of bytes. However if you receive untrusted user +input, it is better to use some form of escaping in order to guarantee +that the separator will never happen to be part of the key. + +For example if you use two null bytes as separator `"\0\0"`, you may +want to always escape null bytes into two bytes sequences in your strings. + +Numerical padding +--- + +Lexicographical indexes may look like good only when the problem at hand +is to index strings. Actually it is very simple to use this kind of index +in order to index arbitrary precision numbers. + +In the ASCII character set, digits appear in the order from 0 to 9, so +if we left-pad numbers with leading zeroes, the result is that comparing +them as strings will order them by their numerical value. + + ZADD myindex 0 00324823481:foo + ZADD myindex 0 12838349234:bar + ZADD myindex 0 00000000111:zap + + ZRANGE myindex 0 -1 + 1) "00000000111:zap" + 2) "00324823481:foo" + 3) "12838349234:bar" + +We effectively created an index using a numerical field which can be as +big as we want. This also works with floating point numbers of any precision +by making sure we left pad the numerical part with leading zeroes and the +decimal part with trailing zeroes like in the following list of numbers: + + 01000000000000.11000000000000 + 01000000000000.02200000000000 + 00000002121241.34893482930000 + 00999999999999.00000000000000 + +Using numbers in binary form +--- + +Storing numbers in decimal may use too much memory. An alternative approach +is just to store numbers, for example 128 bit integers, directly in their +binary form. However for this to work, you need to store the numbers in +*big endian format*, so that the most significant bytes are stored before +the least significant bytes. This way when Redis compares the strings with +`memcmp()`, it will effectively sort the numbers by their value. + +However data stored in binary format is less observable for debugging, harder +to parse and export. So it is definitely a trade off. + +Composite indexes +=== + +So far we explored ways to index single fields. However we all now that +SQL stores are able to create indexes using multiple fields. For example +I may index products in a very large store by room number and price. + +I need to run queries in order to retrieve all the products in a given +room having a given price range. What I can do is to index each product +in the following way: + + ZADD myindex 0 0056:0028.44:90 + ZADD myindex 0 0034:0011.00:832 + +Here the fields are `room:price:product_id`. I used just four digits padding +in the example for simplicity. The auxiliary data (the product ID) does not +need any padding. + +With an index like that, to get all the products in room 56 having a price +between 10 and 30 dollars is very easy. We can just run the following +command: + + ZRANGEBYLEX myindex [0056:0010.00 [0056:0030.00 + +The above is called a composed index. Its effectiveness depends on the +order of the fields and the queries I want to run. For example the above +index cannot be used efficiently in order to get all the products having +a specific prince range regardless of the room number. However I can use +the primary key in order to run queries regardless of the prince, like +*give me all the products in room 44*. + +Composite indexes are very powerful, and are used in traditional stores +in order to optimize complex queries. In Redis they could be useful both +to perform a very fast in-memory Redis index of something stored into +a traditional data store, or in order to directly index Redis data. + +Updating lexicographical indexes +=== + +The value of the index in a lexicographical index can get pretty fancy +and hard or slow to rebuild from what we store about the object. So one +approach to simplify the handling of the index, at the cost of using more +memory, is to also take alongside to the sorted set representing the index +an hash mapping the object ID to the current index value. + +So for example, when we index we also add to an hash: + + MULTI + ZADD myindex 0 0056:0028.44:90 + HSET index.content 90 0056:0028.44:90 + EXEC + +This many not be always needed, but simplifies the operations of updating +the index. In order to remove the old information we indexed for the object +ID 90, regardless of the *current* fields values of the object, we just +have to retrieve the hash value by object id and `ZREM` it in the sorted +set view. + +Representing and querying graphs using an hexastore +=== + +One cool thing about composite indexes is that they are handy in order +to represent graphs, using a data structure which is called +[Hexastore](http://www.vldb.org/pvldb/1/1453965.pdf). + +The hexastore provides a representation for the relations between objects, +formed by a *subject*, a *predicate* and an *object*. +A simple relation between objects could be: + + antirez is-friend-of mcollina + +In order to represent this relation I can store the following element +in my lexicographical index: + + ZADD myindex 0 spo:antirez:is-friend-of:mcollina + +Note that I prefixed my item with the string **spo**. It means that +the item represents a subject,predicate,object relation. + +In can add more 5 items for the same relation, but in a different order: + + ZADD myindex 0 sop:antirez:mcollina:is-friend-of + ZADD myindex 0 ops:mcollina:is-friend-of:antirez + ZADD myindex 0 osp:mcollina:antirez:is-friend-of + ZADD myindex 0 pso:is-friend-of:antirez:mcollina + ZADD myindex 0 pos:is-friend-of:mcollina:antirez + +Now things start to be interesting, and I can query the graph for many +interesting things. For example, what are all the people antirez +*is friend to*? + + ZRANGEBYLEX myindex "[sop:antirez:" "[sop:antirez:\xff" + +Or, what are all the relationships `antirez` and` mcollina` have where +the first is the subject and the second is the object? + + ZRANGEBYLEX myindex "[sop:antirez:mcollina:" "[sop:antirez:mcollina:\xff" + +By combining different queries, I can ask fancy questions. For example: +*What are all my friends that, like beer, live in Barcellona, and mcollina consider friends as well?* +To get this information I start with an `spo` query to find all the people +I'm friend with. Than for each result I get I perform an `spo` query +to check if they like beer, removing the ones for which I can't find +this relation. I do it again to filter by city. Finally I perform an `ops` +query to find, of the list I obtained, who is considered friend by +mcollina. + +Make sure to check [Matteo Collina's slides about Levelgraph](http://nodejsconfit.levelgraph.io/) in order to better understand these ideas. + +Non range indexes +=== + +So far we checked indexes which are useful to query by range or by single +item. However other Redis data structures such as Sets or Lists can be used +in order to build indexes working in different ways. + +For instance I can index object IDs into a Set data type in order to use +the *get random elements* operation via `SRANDMEMBER` in order to retrieve +a set of random objects. Sets can also be used to check for existence when +all I need is to test if a given item exists or not or has a single boolean +property or not. + +Similarly lists can be used in order to index items into a fixed order, +so I can add all my items into a bit list and rotate the list with +RPOPLPUSH using the same list as source and destination. This is useful +when I want to process a given set of items again and again forever. Think +at an RSS feed system that need to refresh the local copy. + +Another popular index often used for Redis is a **capped list**, where items +are added with `LPUSH` and trimmed `LTRIM`, in order to create a view +with just the latest N items encountered. + +Index inconsistencies +=== + +Keeping the index updated may be challenging, in the course of months +or years it is possible that inconsistency are added because of software +bugs, network partitions or other events. + +Different strategies could be used. If the index data is outside Redis +*read reapir* can be a solution, where data is fixed in a lazy way when +it is requested. When we index data which is stored in Redis itself +the `SCAN` family of commands can be used in order to very, update or +rebuild the index from scratch. From ca5a4a8939d69997a35c1851788ebdb134571a9f Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 14 Oct 2015 12:29:26 +0200 Subject: [PATCH 0476/2314] indexes.md: a couple of spelling fixes. --- topics/indexes.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/topics/indexes.md b/topics/indexes.md index df232593a8..a6b7f0af97 100644 --- a/topics/indexes.md +++ b/topics/indexes.md @@ -1,7 +1,7 @@ Secondary indexing with Redis === -While Redis not exactly a key-value store, since values can be complex data structures, it has an extrenal key-value shell, since at API level data is addressed by the key name. It is fair to say that, natively, Redis only offers primary key access. However since Redis is a data structures server, certain data structures can be used for indexing, in order to create secondary indexes of different kindes, including secondary indexes and composite (multi-column) indexes. +While Redis not exactly a key-value store, since values can be complex data structures, it has an extrenal key-value shell, since at API level data is addressed by the key name. It is fair to say that, natively, Redis only offers primary key access. However since Redis is a data structures server, certain data structures can be used for indexing, in order to create secondary indexes of different kinds, including secondary indexes and composite (multi-column) indexes. This document explains how it is possible to create indexes in Redis using the following data structures: @@ -29,7 +29,7 @@ vanilla sorted sets are limited to things were the indexing field is a number within a given specific range. The two commands to build those kinda of indexes are `ZADD` and -`ZRANGEBYSCORE` to respectively add items and retrive items within a +`ZRANGEBYSCORE` to respectively add items and retrieve items within a specified range. For instance, it is possible to index a set of names by their @@ -480,7 +480,7 @@ In can add more 5 items for the same relation, but in a different order: ZADD myindex 0 pos:is-friend-of:mcollina:antirez Now things start to be interesting, and I can query the graph for many -interesting things. For example, what are all the people antirez +interesting things. For example, what are all the people `antirez` *is friend to*? ZRANGEBYLEX myindex "[sop:antirez:" "[sop:antirez:\xff" @@ -524,7 +524,7 @@ Another popular index often used for Redis is a **capped list**, where items are added with `LPUSH` and trimmed `LTRIM`, in order to create a view with just the latest N items encountered. -Index inconsistencies +Index inconsistency === Keeping the index updated may be challenging, in the course of months From 965a62644ccde0d9221a87bf27d6cca23c6666ef Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 14 Oct 2015 12:33:12 +0200 Subject: [PATCH 0477/2314] Matteo twitter handle fixed. --- topics/indexes.md | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/topics/indexes.md b/topics/indexes.md index a6b7f0af97..0a8837f318 100644 --- a/topics/indexes.md +++ b/topics/indexes.md @@ -461,23 +461,23 @@ The hexastore provides a representation for the relations between objects, formed by a *subject*, a *predicate* and an *object*. A simple relation between objects could be: - antirez is-friend-of mcollina + antirez is-friend-of matteocollina In order to represent this relation I can store the following element in my lexicographical index: - ZADD myindex 0 spo:antirez:is-friend-of:mcollina + ZADD myindex 0 spo:antirez:is-friend-of:matteocollina Note that I prefixed my item with the string **spo**. It means that the item represents a subject,predicate,object relation. In can add more 5 items for the same relation, but in a different order: - ZADD myindex 0 sop:antirez:mcollina:is-friend-of - ZADD myindex 0 ops:mcollina:is-friend-of:antirez - ZADD myindex 0 osp:mcollina:antirez:is-friend-of - ZADD myindex 0 pso:is-friend-of:antirez:mcollina - ZADD myindex 0 pos:is-friend-of:mcollina:antirez + ZADD myindex 0 sop:antirez:matteocollina:is-friend-of + ZADD myindex 0 ops:matteocollina:is-friend-of:antirez + ZADD myindex 0 osp:matteocollina:antirez:is-friend-of + ZADD myindex 0 pso:is-friend-of:antirez:matteocollina + ZADD myindex 0 pos:is-friend-of:matteocollina:antirez Now things start to be interesting, and I can query the graph for many interesting things. For example, what are all the people `antirez` @@ -485,19 +485,19 @@ interesting things. For example, what are all the people `antirez` ZRANGEBYLEX myindex "[sop:antirez:" "[sop:antirez:\xff" -Or, what are all the relationships `antirez` and` mcollina` have where +Or, what are all the relationships `antirez` and` matteocollina` have where the first is the subject and the second is the object? - ZRANGEBYLEX myindex "[sop:antirez:mcollina:" "[sop:antirez:mcollina:\xff" + ZRANGEBYLEX myindex "[sop:antirez:matteocollina:" "[sop:antirez:matteocollina:\xff" By combining different queries, I can ask fancy questions. For example: -*What are all my friends that, like beer, live in Barcellona, and mcollina consider friends as well?* +*What are all my friends that, like beer, live in Barcellona, and matteocollina consider friends as well?* To get this information I start with an `spo` query to find all the people I'm friend with. Than for each result I get I perform an `spo` query to check if they like beer, removing the ones for which I can't find this relation. I do it again to filter by city. Finally I perform an `ops` query to find, of the list I obtained, who is considered friend by -mcollina. +matteocollina. Make sure to check [Matteo Collina's slides about Levelgraph](http://nodejsconfit.levelgraph.io/) in order to better understand these ideas. From f9497376229107d0cde7fb8769a20d569c4ff458 Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 14 Oct 2015 12:37:53 +0200 Subject: [PATCH 0478/2314] Small form fixes. --- topics/indexes.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/topics/indexes.md b/topics/indexes.md index 0a8837f318..ef3a6a7237 100644 --- a/topics/indexes.md +++ b/topics/indexes.md @@ -266,7 +266,7 @@ So the result will be that, every time an user searches for `banana` we'll get our entry updated. There is more: our goal is to just have items searched very frequently. -So we need some form of purging. So, when we actually query the index +So we need some form of purging. When we actually query the index in order to complete the user request, we may see something like that: ZRANGEBYLEX myindex "[banana:" + LIMIT 1 10 @@ -278,8 +278,8 @@ in order to complete the user request, we may see something like that: Apparently nobody searches for "banahhh", for example, but the query was performed a single time, so we end presenting it to the user. -So what we do is, out of the returned items, we pick a random one, divide -its score by two, and re-add it with half to score. However if the score +What we could do is, out of the returned items, we pick a random one, divide +its score by two, and re-add it with half the score. However if the score was already "1", we simply remove the item from the list. You can use much more advanced systems, but the idea is that the index in the long run will contain top queries, and if top queries will change over the time From 99c95f0291442ea40ab9843dc1aa2bcfd7d8f133 Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 14 Oct 2015 12:39:26 +0200 Subject: [PATCH 0479/2314] Don't repeat specify. --- topics/indexes.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/indexes.md b/topics/indexes.md index ef3a6a7237..f0f4c0f4f9 100644 --- a/topics/indexes.md +++ b/topics/indexes.md @@ -188,7 +188,7 @@ Now we can use `ZRANGEBYLEX` in order to perform range queries. Note that in the range queries I prefixed my min and max element with `[` and `(`. This prefixes are mandatory, and they specify if the element -we specify for the range is inclusive or exclusive. So the range `[a (b` means give me all the elements lexicographically between `a` inclusive and `b` exclusive, which are all the elements starting with `a`. +of the range is inclusive or exclusive. So the range `[a (b` means give me all the elements lexicographically between `a` inclusive and `b` exclusive, which are all the elements starting with `a`. There are also two more special characters indicating the infinitely negative string and the infinitely positive string, which are `-` and `+`. From e1099797719b5dba3a21ef5577b76d75f64dc028 Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 14 Oct 2015 12:41:14 +0200 Subject: [PATCH 0480/2314] Markdown fix. --- topics/indexes.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/indexes.md b/topics/indexes.md index f0f4c0f4f9..683438e468 100644 --- a/topics/indexes.md +++ b/topics/indexes.md @@ -485,7 +485,7 @@ interesting things. For example, what are all the people `antirez` ZRANGEBYLEX myindex "[sop:antirez:" "[sop:antirez:\xff" -Or, what are all the relationships `antirez` and` matteocollina` have where +Or, what are all the relationships `antirez` and `matteocollina` have where the first is the subject and the second is the object? ZRANGEBYLEX myindex "[sop:antirez:matteocollina:" "[sop:antirez:matteocollina:\xff" From 3cf685dcaf304ec474a88cc039c8d98356f74c82 Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 14 Oct 2015 12:43:37 +0200 Subject: [PATCH 0481/2314] Grammar fix. --- topics/indexes.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/indexes.md b/topics/indexes.md index 683438e468..bbe3518380 100644 --- a/topics/indexes.md +++ b/topics/indexes.md @@ -1,7 +1,7 @@ Secondary indexing with Redis === -While Redis not exactly a key-value store, since values can be complex data structures, it has an extrenal key-value shell, since at API level data is addressed by the key name. It is fair to say that, natively, Redis only offers primary key access. However since Redis is a data structures server, certain data structures can be used for indexing, in order to create secondary indexes of different kinds, including secondary indexes and composite (multi-column) indexes. +While Redis is not exactly a key-value store, since values can be complex data structures, it has an extrenal key-value shell: at API level data is addressed by the key name. It is fair to say that, natively, Redis only offers primary key access. However since Redis is a data structures server, certain data structures can be used for indexing, in order to create secondary indexes of different kinds, including secondary indexes and composite (multi-column) indexes. This document explains how it is possible to create indexes in Redis using the following data structures: From d868c8eb10f90b729a0cc868506d06565f4075db Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 14 Oct 2015 12:44:40 +0200 Subject: [PATCH 0482/2314] Fix command name. --- topics/indexes.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/indexes.md b/topics/indexes.md index bbe3518380..ea5e120a28 100644 --- a/topics/indexes.md +++ b/topics/indexes.md @@ -218,7 +218,7 @@ complete the user query, we do a very simple query using `ZRANGEBYLEX`, like the following. Imagine the user is typing "bit", and we want to complete the query. We send a command like that: - ZLEXRANGE myindex "[bit" "[bit\xff" + ZRANGEBYLEX myindex "[bit" "[bit\xff" Basically we create a range using the string the user is typing right now as start, and the same sting plus a trailing byte set to 255, which is `\xff` in the example, as the end of the range. In this way we get all the strings that start for the string the user is typing. From f6c708d77420bac6b1b4466c30b6f0312773dbe7 Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 14 Oct 2015 12:48:02 +0200 Subject: [PATCH 0483/2314] Simpler and better frequency algorithm. --- topics/indexes.md | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/topics/indexes.md b/topics/indexes.md index ea5e120a28..4a7c1e7052 100644 --- a/topics/indexes.md +++ b/topics/indexes.md @@ -278,16 +278,16 @@ in order to complete the user request, we may see something like that: Apparently nobody searches for "banahhh", for example, but the query was performed a single time, so we end presenting it to the user. -What we could do is, out of the returned items, we pick a random one, divide -its score by two, and re-add it with half the score. However if the score -was already "1", we simply remove the item from the list. You can use -much more advanced systems, but the idea is that the index in the long run -will contain top queries, and if top queries will change over the time -it will adapt itself. +What we could do is, out of the returned items, we pick a random one, +decrement its score by one, and re-add it with the new score. +However if the score reaches 0, we simply remove the item from the list. +You can use much more advanced systems, but the idea is that the index in +the long run will contain top queries, and if top queries will change over +the time it will adapt itself. A refinement to this algorithm is to pick entries in the list according to -their weight: the higher the score, the less likely it is picked -in order to halve its score, or evict it. +their weight: the higher the score, the less likely entries are picked +in order to decrement its score, or evict them. Normalizing strings for case and accents --- From d3b5e4b15892beb46f4f0c4c8ba278f42e05cbcb Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 14 Oct 2015 17:29:51 +0200 Subject: [PATCH 0484/2314] Indexes doc improved for form and content. --- topics/indexes.md | 186 ++++++++++++++++++++++++++-------------------- 1 file changed, 105 insertions(+), 81 deletions(-) diff --git a/topics/indexes.md b/topics/indexes.md index 4a7c1e7052..ab744dcc95 100644 --- a/topics/indexes.md +++ b/topics/indexes.md @@ -1,38 +1,37 @@ Secondary indexing with Redis === -While Redis is not exactly a key-value store, since values can be complex data structures, it has an extrenal key-value shell: at API level data is addressed by the key name. It is fair to say that, natively, Redis only offers primary key access. However since Redis is a data structures server, certain data structures can be used for indexing, in order to create secondary indexes of different kinds, including secondary indexes and composite (multi-column) indexes. +Redis is not exactly a key-value store, since values can be complex data structures. However it has an extrenal key-value shell: at API level data is addressed by the key name. It is fair to say that, natively, Redis only offers *primary key access*. However since Redis is a data structures server, its capabilities can be used for indexing, in order to create secondary indexes of different kinds, including composite (multi-column) indexes. This document explains how it is possible to create indexes in Redis using the following data structures: * Sorted sets to create secondary indexes by ID or other numerical fields. -* Sorted sets with lexicographical ranges for creating more advanced secondary indexes and composite indexes. +* Sorted sets with lexicographical ranges for creating more advanced secondary indexes, composite indexes and graph traversal indexes. * Sets for creating random indexes. -* Lists for creating simple iterable indexes. +* Lists for creating simple iterable indexes and last N items indexes. Implementing and maintaining indexes with Redis is an advanced topic, so most users that need to perform complex queries on data should understand if they are better served by a relational store. However often, especially in caching -scenarios, there is the explicit need to store indexed data into Redis in order -to speedup common queries which require indexes. +scenarios, there is the explicit need to store indexed data into Redis in order to speedup common queries which require some form of indexing in order to be executed. Simple numerical indexes with sorted sets === -The simplest secondary index you can create with Redis is by using a +The simplest secondary index you can create with Redis is by using the sorted set data type, which is a data structure representing a set of elements ordered by a floating point number which is the *score* of each element. Elements are ordered from the smallest to the highest score. Since the score is a double precision float, indexes you can build with vanilla sorted sets are limited to things were the indexing field is a number -within a given specific range. +within a given range. -The two commands to build those kinda of indexes are `ZADD` and +The two commands to build these kind of indexes are `ZADD` and `ZRANGEBYSCORE` to respectively add items and retrieve items within a specified range. -For instance, it is possible to index a set of names by their +For instance, it is possible to index a set of person names by their age by adding element to a sorted set. The element will be the name of the person and the score will be the age. @@ -41,7 +40,8 @@ person and the score will be the age. ZADD myindex 35 Jon ZADD myindex 67 Helen -In order to retrieve all the persons with an age between 20 and 40: +In order to retrieve all persons with an age between 20 and 40, the following +command can be used: ZRANGEBYSCORE myindex 20 40 1) "Manuel" @@ -51,9 +51,9 @@ By using the **WITHSCORES** option of `ZRANGEBYSCORE` it is also possible to obtain the scores associated with the returned elements. The `ZCOUNT` command can be used in order to retrieve the number of elements -between a given range without actually fetching the elements which is also -useful, especially given the fact the operation has logarithmic time -complexity regardless of the size of the range. +within a given range, without actually fetching the elements, which is also +useful, especially given the fact the operation is executed in logarithmic +time regardless of the size of the range. Ranges can be inclusive or exclusive, please refer to the `ZRANGEBYSCORE` command documentation for more information. @@ -61,19 +61,18 @@ command documentation for more information. **Note**: Using the `ZREVRANGEBYSCORE` it is possible to query range in reversed order, which is often useful when data is indexed in a given direction (ascending or descending) but we want to retrieve information -in the other way. +the other way around. Using objects IDs as associated values --- In the above example we associated names to ages. However in general we -may want to index some field of an object to some object. Instead of -using as the sorted set value directly the data associated with the -indexed field, it is possible to use an ID which refers to some object -stored at a different key. +may want to index some field of an object which is stored elsewhere. +Instead of using the sorted set value directly to store the data associated +with the indexed field, it is possible to store just the ID of the object. -For example I may have Redis hashes, one per key, referring to hashes -representing users: +For example I may have Redis hashes representing users. Each user is +represented by a single key, directly accessible by ID: HMSET user:1 username id 1 antirez ctime 1444809424 age 38 HMSET user:2 username id 2 maria ctime 1444808132 age 42 @@ -88,15 +87,18 @@ could do: This time the value associated with the score in the sorted set is the ID of the object. So once I query the index with `ZRANGEBYSCORE` I'll -also retrieve the informations I need with `HGETALL` or similar commands. +also have to retrieve the informations I need with `HGETALL` or similar +commands. The obvious advantage is that objects can change without touching +the index, as long as we don't change the indexed field. -In the next examples we'll always use IDs as values associated with the -index, since this is usually the more sounding design. +In the next examples we'll almost always use IDs as values associated with +the index, since this is usually the more sounding design, with a few +exceptions. Updating simple sorted set indexes --- -Often we index things which change during time. For example in the above +Often we index things which change over time. For example in the above example, the age of the user changes every year. In such a case it would make sense to use the birth date as index instead of the age itself, but there are other cases where we simple want some field to change from @@ -107,7 +109,7 @@ since re-adding back an element with a different score and the same value will simply update the score and move the element at the right position, so if the user *antirez* turned 39 years old, in order to update the data in the hash representing the user, and in the index as well, we need -the following two commands: +to execute the following two commands: HSET user:1 age 39 ZADD user.age.index 39 1 @@ -125,20 +127,21 @@ is not always true. If you can efficiently represent something multi-dimensional in a linear way, they it is often possible to use a simple sorted set for indexing. -For example the [Redis geo indexing API](/commands/geoadd) users a sorted +For example the [Redis geo indexing API](/commands/geoadd) uses a sorted set to index places by latitude and longitude using a technique called [Geo hash](https://en.wikipedia.org/wiki/Geohash). The sorted set score represents alternating bits of longitude and latitude, so that we map the linear score of a sorted set to many small *squares* in the earth surface. -By doing an 8+1 style center and neighborhood search it is possible to +By doing an 8+1 style center plus neighborhood search it is possible to retrieve elements by radius. Limits of the score --- Sorted set elements scores are double precision integers. It means that -they can represent different decimal or integer values with a different -errors. However what is interesting for indexing is that the score is +they can represent different decimal or integer values with different +errors, because they use an exponential representation internally. +However what is interesting for indexing purposes is that the score is always able to represent without any error numbers between -9007199254740992 and 9007199254740992, which is `-/+ 2^53`. @@ -153,16 +156,24 @@ Redis sorted sets have an interesting property. When elements are added with the same score, they are sorted lexicographically, comparing the strings as binary data with the `memcmp()` function. -Moreover, there are commands such as `ZRANGEBYLEX` and `ZLEXCOUNT` that -are able to query and count ranges in a lexicographically fashion. +For people that don't know the C language nor the `memcmp` function, what +it means is that elements with the same score are sorted comparing the +raw values of their bytes, byte after byte. If the first byte is the same, +the second is checked and so forth. If the common prefix of two strings is +the same then the longer string is considered the greater of the two, +so "foobar" is greater than "foo". -This feature is basically equivalent to a `b-tree` data structure which +There are commands such as `ZRANGEBYLEX` and `ZLEXCOUNT` that +are able to query and count ranges in a lexicographically fashion, assuming +they are used with sorted sets where all the elements have the same score. + +This Redis feature is basically equivalent to a `b-tree` data structure which is often used in order to implement indexes with traditional databases. As you can guess, because of this, it is possible to use this Redis data structure in order to implement pretty fancy indexes. Before to dive into using lexicographical indexes, let's check how -sorted sets behave in this special mode of operations. Since we need to +sorted sets behave in this special mode of operation. Since we need to add elements with the same score, we'll always use the special score of zero. @@ -186,9 +197,12 @@ Now we can use `ZRANGEBYLEX` in order to perform range queries. 1) "aaaa" 2) "abbb" -Note that in the range queries I prefixed my min and max element with -`[` and `(`. This prefixes are mandatory, and they specify if the element -of the range is inclusive or exclusive. So the range `[a (b` means give me all the elements lexicographically between `a` inclusive and `b` exclusive, which are all the elements starting with `a`. +Note that in the range queries we prefixed the `min` and `max` elements +identifying the range with the special characters `[` and `(`. +This prefixes are mandatory, and they specify if the elements +of the range are inclusive or exclusive. So the range `[a (b` means give me +all the elements lexicographically between `a` inclusive and `b` exclusive, +which are all the elements starting with `a`. There are also two more special characters indicating the infinitely negative string and the infinitely positive string, which are `-` and `+`. @@ -202,42 +216,43 @@ That's it basically. Let's see how to use these features to build indexes. A first example: completion --- -An interesting application of indexing is completion, similar to what happens -in a search engine when you start to type your search query: it will -anticipate what you are likely typing, providing common queries that -start with the same characters. +An interesting application of indexing is completion. Completion is what +happens when you start typing your query into a search engine: the user +interface will anticipate what you are likely typing, providing common +queries that start with the same characters. A naive approach to completion is to just add every single query we -get from the user into the index. For example if the user search `banana` +get from the user into the index. For example if the user searches `banana` we'll just do: ZADD myindex 0 banana And so forth for each search query ever encountered. Then when we want to -complete the user query, we do a very simple query using `ZRANGEBYLEX`, like -the following. Imagine the user is typing "bit", and we want to complete the -query. We send a command like that: +complete the user input, we execute a range query using `ZRANGEBYLEX`. +Imagine the user is typing "bit" inside the search form, and we want to +offer possible search keywords starting for "bit". We send Redis a command +like that: ZRANGEBYLEX myindex "[bit" "[bit\xff" Basically we create a range using the string the user is typing right now -as start, and the same sting plus a trailing byte set to 255, which is `\xff` in the example, as the end of the range. In this way we get all the strings that start for the string the user is typing. +as start, and the same sting plus a trailing byte set to 255, which is `\xff` in the example, as the end of the range. This way we get all the strings that start for the string the user is typing. Note that we don't want too many items returned, so we may use the **LIMIT** option in order to reduce the number of results. Adding frequency into the mix --- -The above approach is a bit naive, because all the user queries are the same -in this way. In a real system we want to complete strings accordingly to their -frequency: very popular queries will be proposed with an higher probability -compared to query strings searched very rarely. +The above approach is a bit naive, because all the user searches are the same +in this way. In a real system we want to complete strings according to their +frequency: very popular searches will be proposed with an higher probability +compared to search strings typed very rarely. In order to implement something which depends on the frequency, and at the -same time automatically adapts to future inputs and purges query strings that +same time automatically adapts to future inputs, by purging searches that are no longer popular, we can use a very simple *streaming algorithm*. -To start, we modify our index in order to don't have just the search term, +To start, we modify our index in order to store not just the search term, but also the frequency the term is associated with. So instead of just adding `banana` we add `banana:1`, where 1 is the frequency. @@ -267,7 +282,7 @@ get our entry updated. There is more: our goal is to just have items searched very frequently. So we need some form of purging. When we actually query the index -in order to complete the user request, we may see something like that: +in order to complete the user input, we may see something like that: ZRANGEBYLEX myindex "[banana:" + LIMIT 1 10 1) "banana:123" @@ -278,12 +293,12 @@ in order to complete the user request, we may see something like that: Apparently nobody searches for "banahhh", for example, but the query was performed a single time, so we end presenting it to the user. -What we could do is, out of the returned items, we pick a random one, +This is what we can do. Out of the returned items, we pick a random one, decrement its score by one, and re-add it with the new score. However if the score reaches 0, we simply remove the item from the list. You can use much more advanced systems, but the idea is that the index in -the long run will contain top queries, and if top queries will change over -the time it will adapt itself. +the long run will contain top searches, and if top searches will change over +the time it will adapt automatically. A refinement to this algorithm is to pick entries in the list according to their weight: the higher the score, the less likely entries are picked @@ -298,7 +313,7 @@ accents, and so forth. One simple way do deal with this issues is to actually normalize the string the user searches. Whatever the user searches for "Banana", -"BANANA" or Ba'nana" we may always turn it into "banana". +"BANANA" or "Ba'nana" we may always turn it into "banana". However sometimes we could like to present the user with the original item typed, even if we normalize the string for indexing. In order to @@ -315,7 +330,7 @@ instead. This is a common trick which has multiple applications. Adding auxiliary informations in the index --- -When using sorted set in a direct way, we have two different attributes +When using a sorted set in a direct way, we have two different attributes for each object: the score, which we use as an index, and an associated value. When using lexicographical indexes instead, the score is always set to 0 and basically not used at all. We are left with a single string, @@ -325,8 +340,8 @@ Like we did in the previous completion examples, we are still able to store associated data using separators. For example we used the colon in order to add the frequency and the original word for completion. -In general we can add any kind of associated value to our primary key. -In order to use a lexicographic index to implement a simple key-value store +In general we can add any kind of associated value to our indexing key. +In order to use a lexicographical index to implement a simple key-value store we just store the entry as `key:value`: ZADD myindex 0 mykey:myvalue @@ -336,7 +351,7 @@ And search for the key with: ZRANGEBYLEX myindex mykey: + LIMIT 1 1 1) "mykey:myvalue" -Then we just get the part after the colon to retrieve the value. +Then we extract the part after the colon to retrieve the value. However a problem to solve in this case is collisions. The colon character may be part of the key itself, so it must be chosen in order to never collide with the key we add. @@ -354,7 +369,7 @@ Numerical padding Lexicographical indexes may look like good only when the problem at hand is to index strings. Actually it is very simple to use this kind of index -in order to index arbitrary precision numbers. +in order to perform indexing of arbitrary precision numbers. In the ASCII character set, digits appear in the order from 0 to 9, so if we left-pad numbers with leading zeroes, the result is that comparing @@ -389,8 +404,8 @@ binary form. However for this to work, you need to store the numbers in the least significant bytes. This way when Redis compares the strings with `memcmp()`, it will effectively sort the numbers by their value. -However data stored in binary format is less observable for debugging, harder -to parse and export. So it is definitely a trade off. +Keep in mind that data stored in binary format is less observable for +debugging, harder to parse and export. So it is definitely a trade off. Composite indexes === @@ -444,10 +459,10 @@ So for example, when we index we also add to an hash: HSET index.content 90 0056:0028.44:90 EXEC -This many not be always needed, but simplifies the operations of updating +This is not always needed, but simplifies the operations of updating the index. In order to remove the old information we indexed for the object ID 90, regardless of the *current* fields values of the object, we just -have to retrieve the hash value by object id and `ZREM` it in the sorted +have to retrieve the hash value by object ID and `ZREM` it in the sorted set view. Representing and querying graphs using an hexastore @@ -457,7 +472,7 @@ One cool thing about composite indexes is that they are handy in order to represent graphs, using a data structure which is called [Hexastore](http://www.vldb.org/pvldb/1/1453965.pdf). -The hexastore provides a representation for the relations between objects, +The hexastore provides a representation for relations between objects, formed by a *subject*, a *predicate* and an *object*. A simple relation between objects could be: @@ -471,7 +486,7 @@ in my lexicographical index: Note that I prefixed my item with the string **spo**. It means that the item represents a subject,predicate,object relation. -In can add more 5 items for the same relation, but in a different order: +In can add 5 more entries for the same relation, but in a different order: ZADD myindex 0 sop:antirez:matteocollina:is-friend-of ZADD myindex 0 ops:matteocollina:is-friend-of:antirez @@ -479,16 +494,22 @@ In can add more 5 items for the same relation, but in a different order: ZADD myindex 0 pso:is-friend-of:antirez:matteocollina ZADD myindex 0 pos:is-friend-of:matteocollina:antirez -Now things start to be interesting, and I can query the graph for many -interesting things. For example, what are all the people `antirez` +Now things start to be interesting, and I can query the graph in many +different ways. For example, what are all the people `antirez` *is friend to*? - ZRANGEBYLEX myindex "[sop:antirez:" "[sop:antirez:\xff" + ZRANGEBYLEX myindex "[spo:antirez:is-friend-of:" "[spo:antirez:is-friend-of:\xff" + 1) "spo:antirez:is-friend-of:matteocollina" + 2) "spo:antirez:is-friend-of:wonderwoman" + 3) "spo:antirez:is-friend-of:spiderman" Or, what are all the relationships `antirez` and `matteocollina` have where the first is the subject and the second is the object? ZRANGEBYLEX myindex "[sop:antirez:matteocollina:" "[sop:antirez:matteocollina:\xff" + 1) "sop:antirez:matteocollina:is-friend-of" + 2) "sop:antirez:matteocollina:was-at-conference-with" + 2) "sop:antirez:matteocollina:talked-with" By combining different queries, I can ask fancy questions. For example: *What are all my friends that, like beer, live in Barcellona, and matteocollina consider friends as well?* @@ -506,7 +527,8 @@ Non range indexes So far we checked indexes which are useful to query by range or by single item. However other Redis data structures such as Sets or Lists can be used -in order to build indexes working in different ways. +in order to build other kind of indexes. They are very commonly used but +maybe we don't always realize they are actually a form of indexing. For instance I can index object IDs into a Set data type in order to use the *get random elements* operation via `SRANDMEMBER` in order to retrieve @@ -514,25 +536,27 @@ a set of random objects. Sets can also be used to check for existence when all I need is to test if a given item exists or not or has a single boolean property or not. -Similarly lists can be used in order to index items into a fixed order, -so I can add all my items into a bit list and rotate the list with -RPOPLPUSH using the same list as source and destination. This is useful -when I want to process a given set of items again and again forever. Think -at an RSS feed system that need to refresh the local copy. +Similarly lists can be used in order to index items into a fixed order. +I can add all my items into a Redis list and rotate the list with +RPOPLPUSH using the same key name as source and destination. This is useful +when I want to process a given set of items again and again forever in the +same order. Think at an RSS feed system that needs to refresh the local copy +periodically. -Another popular index often used for Redis is a **capped list**, where items +Another popular index often used with Redis is a **capped list**, where items are added with `LPUSH` and trimmed `LTRIM`, in order to create a view -with just the latest N items encountered. +with just the latest N items encountered, in the same order they were +seen. Index inconsistency === Keeping the index updated may be challenging, in the course of months -or years it is possible that inconsistency are added because of software +or years it is possible that inconsistencies are added because of software bugs, network partitions or other events. Different strategies could be used. If the index data is outside Redis -*read reapir* can be a solution, where data is fixed in a lazy way when +*read repair* can be a solution, where data is fixed in a lazy way when it is requested. When we index data which is stored in Redis itself the `SCAN` family of commands can be used in order to very, update or -rebuild the index from scratch. +rebuild the index from scratch, incrementally. From 9e34c8a1b6943ba0887d79112e587fb1349e682c Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 14 Oct 2015 18:19:02 +0200 Subject: [PATCH 0485/2314] More fixes to indexes page. --- topics/indexes.md | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/topics/indexes.md b/topics/indexes.md index ab744dcc95..0b7ad109c0 100644 --- a/topics/indexes.md +++ b/topics/indexes.md @@ -1,7 +1,7 @@ Secondary indexing with Redis === -Redis is not exactly a key-value store, since values can be complex data structures. However it has an extrenal key-value shell: at API level data is addressed by the key name. It is fair to say that, natively, Redis only offers *primary key access*. However since Redis is a data structures server, its capabilities can be used for indexing, in order to create secondary indexes of different kinds, including composite (multi-column) indexes. +Redis is not exactly a key-value store, since values can be complex data structures. However it has an external key-value shell: at API level data is addressed by the key name. It is fair to say that, natively, Redis only offers *primary key access*. However since Redis is a data structures server, its capabilities can be used for indexing, in order to create secondary indexes of different kinds, including composite (multi-column) indexes. This document explains how it is possible to create indexes in Redis using the following data structures: @@ -58,7 +58,7 @@ time regardless of the size of the range. Ranges can be inclusive or exclusive, please refer to the `ZRANGEBYSCORE` command documentation for more information. -**Note**: Using the `ZREVRANGEBYSCORE` it is possible to query range in +**Note**: Using the `ZREVRANGEBYSCORE` it is possible to query a range in reversed order, which is often useful when data is indexed in a given direction (ascending or descending) but we want to retrieve information the other way around. @@ -98,7 +98,7 @@ exceptions. Updating simple sorted set indexes --- -Often we index things which change over time. For example in the above +Often we index things which change over time. In the above example, the age of the user changes every year. In such a case it would make sense to use the birth date as index instead of the age itself, but there are other cases where we simple want some field to change from @@ -107,7 +107,7 @@ time to time, and the index to reflect this change. The `ZADD` command makes updating simple indexes a very trivial operation since re-adding back an element with a different score and the same value will simply update the score and move the element at the right position, -so if the user *antirez* turned 39 years old, in order to update the +so if the user `antirez` turned 39 years old, in order to update the data in the hash representing the user, and in the index as well, we need to execute the following two commands: @@ -132,7 +132,7 @@ set to index places by latitude and longitude using a technique called [Geo hash](https://en.wikipedia.org/wiki/Geohash). The sorted set score represents alternating bits of longitude and latitude, so that we map the linear score of a sorted set to many small *squares* in the earth surface. -By doing an 8+1 style center plus neighborhood search it is possible to +By doing an 8+1 style center plus neighborhoods search it is possible to retrieve elements by radius. Limits of the score @@ -145,7 +145,7 @@ However what is interesting for indexing purposes is that the score is always able to represent without any error numbers between -9007199254740992 and 9007199254740992, which is `-/+ 2^53`. -When representing much larger numbers, you need a different form if indexing +When representing much larger numbers, you need a different form of indexing that is able to index numbers at any precision, called a lexicographical index. @@ -434,13 +434,13 @@ command: The above is called a composed index. Its effectiveness depends on the order of the fields and the queries I want to run. For example the above index cannot be used efficiently in order to get all the products having -a specific prince range regardless of the room number. However I can use +a specific price range regardless of the room number. However I can use the primary key in order to run queries regardless of the prince, like *give me all the products in room 44*. Composite indexes are very powerful, and are used in traditional stores in order to optimize complex queries. In Redis they could be useful both -to perform a very fast in-memory Redis index of something stored into +to implement a very fast in-memory Redis index of something stored into a traditional data store, or in order to directly index Redis data. Updating lexicographical indexes @@ -509,7 +509,7 @@ the first is the subject and the second is the object? ZRANGEBYLEX myindex "[sop:antirez:matteocollina:" "[sop:antirez:matteocollina:\xff" 1) "sop:antirez:matteocollina:is-friend-of" 2) "sop:antirez:matteocollina:was-at-conference-with" - 2) "sop:antirez:matteocollina:talked-with" + 3) "sop:antirez:matteocollina:talked-with" By combining different queries, I can ask fancy questions. For example: *What are all my friends that, like beer, live in Barcellona, and matteocollina consider friends as well?* From e7a7683e5e510db68af6be86fd728a2e4753b068 Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 14 Oct 2015 18:20:10 +0200 Subject: [PATCH 0486/2314] Barcellona -> Barcelona. --- topics/indexes.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/indexes.md b/topics/indexes.md index 0b7ad109c0..fad34bb6e0 100644 --- a/topics/indexes.md +++ b/topics/indexes.md @@ -512,7 +512,7 @@ the first is the subject and the second is the object? 3) "sop:antirez:matteocollina:talked-with" By combining different queries, I can ask fancy questions. For example: -*What are all my friends that, like beer, live in Barcellona, and matteocollina consider friends as well?* +*What are all my friends that, like beer, live in Barcelona, and matteocollina consider friends as well?* To get this information I start with an `spo` query to find all the people I'm friend with. Than for each result I get I perform an `spo` query to check if they like beer, removing the ones for which I can't find From 2a6a1003c29fa116cda748da4244e2bb536cece2 Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 14 Oct 2015 18:23:13 +0200 Subject: [PATCH 0487/2314] More types in indexes.md. --- topics/indexes.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/topics/indexes.md b/topics/indexes.md index fad34bb6e0..2b17d1b58c 100644 --- a/topics/indexes.md +++ b/topics/indexes.md @@ -544,7 +544,7 @@ same order. Think at an RSS feed system that needs to refresh the local copy periodically. Another popular index often used with Redis is a **capped list**, where items -are added with `LPUSH` and trimmed `LTRIM`, in order to create a view +are added with `LPUSH` and trimmed with `LTRIM`, in order to create a view with just the latest N items encountered, in the same order they were seen. @@ -558,5 +558,5 @@ bugs, network partitions or other events. Different strategies could be used. If the index data is outside Redis *read repair* can be a solution, where data is fixed in a lazy way when it is requested. When we index data which is stored in Redis itself -the `SCAN` family of commands can be used in order to very, update or +the `SCAN` family of commands can be used in order to verify, update or rebuild the index from scratch, incrementally. From 192fe6e8c31997bf2eb71f1491613868de495c51 Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 14 Oct 2015 19:23:12 +0200 Subject: [PATCH 0488/2314] Fix HMSET args. --- topics/indexes.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/topics/indexes.md b/topics/indexes.md index 2b17d1b58c..0a8aa47974 100644 --- a/topics/indexes.md +++ b/topics/indexes.md @@ -74,9 +74,9 @@ with the indexed field, it is possible to store just the ID of the object. For example I may have Redis hashes representing users. Each user is represented by a single key, directly accessible by ID: - HMSET user:1 username id 1 antirez ctime 1444809424 age 38 - HMSET user:2 username id 2 maria ctime 1444808132 age 42 - HMSET user:3 username id 3 jballard ctime 1443246218 age 33 + HMSET user:1 id 1 username antirez ctime 1444809424 age 38 + HMSET user:2 id 2 username maria ctime 1444808132 age 42 + HMSET user:3 id 3 username jballard ctime 1443246218 age 33 If I want to create an index in order to query users by their age, I could do: From 8e215c6750f7cdfb26a455a84b75c090ecf776d9 Mon Sep 17 00:00:00 2001 From: matteobaglini Date: Thu, 15 Oct 2015 15:47:44 +0200 Subject: [PATCH 0489/2314] Typos --- topics/indexes.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/topics/indexes.md b/topics/indexes.md index 0a8aa47974..30893b5adc 100644 --- a/topics/indexes.md +++ b/topics/indexes.md @@ -236,7 +236,7 @@ like that: ZRANGEBYLEX myindex "[bit" "[bit\xff" Basically we create a range using the string the user is typing right now -as start, and the same sting plus a trailing byte set to 255, which is `\xff` in the example, as the end of the range. This way we get all the strings that start for the string the user is typing. +as start, and the same string plus a trailing byte set to 255, which is `\xff` in the example, as the end of the range. This way we get all the strings that start for the string the user is typing. Note that we don't want too many items returned, so we may use the **LIMIT** option in order to reduce the number of results. @@ -435,7 +435,7 @@ The above is called a composed index. Its effectiveness depends on the order of the fields and the queries I want to run. For example the above index cannot be used efficiently in order to get all the products having a specific price range regardless of the room number. However I can use -the primary key in order to run queries regardless of the prince, like +the primary key in order to run queries regardless of the price, like *give me all the products in room 44*. Composite indexes are very powerful, and are used in traditional stores From ca7d37c857e294d38c576266e5dadad3054a9311 Mon Sep 17 00:00:00 2001 From: YawarRaza7349 Date: Thu, 15 Oct 2015 18:15:30 -0400 Subject: [PATCH 0490/2314] Typo in indexes.md now -> know --- topics/indexes.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/indexes.md b/topics/indexes.md index 30893b5adc..5d498fcb70 100644 --- a/topics/indexes.md +++ b/topics/indexes.md @@ -410,7 +410,7 @@ debugging, harder to parse and export. So it is definitely a trade off. Composite indexes === -So far we explored ways to index single fields. However we all now that +So far we explored ways to index single fields. However we all know that SQL stores are able to create indexes using multiple fields. For example I may index products in a very large store by room number and price. From 844af6a2ac9f76c46e40adcd7e30835c196a6e2b Mon Sep 17 00:00:00 2001 From: Mark Paluch Date: Thu, 27 Aug 2015 20:20:19 +0200 Subject: [PATCH 0491/2314] Update lettuce client --- clients.json | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/clients.json b/clients.json index f3f9dd4ef6..c3b82f53bb 100644 --- a/clients.json +++ b/clients.json @@ -872,10 +872,12 @@ { "name": "lettuce", "language": "Java", + "url": "http://redis.paluch.biz", "repository": "https://github.com/mp911de/lettuce", - "description": "Thread-safe client supporting async usage and key/value codecs", + "description": "Advanced Redis client for thread-safe sync, async, and reactive usage. Supports Cluster, Sentinel, Pipelining, and codecs.", "authors": ["ar3te", "mp911de"], - "active": true + "active": true, + "recommended": true }, { From bbf04e257d91e630cc5dcc1b3fc623510b2ac0c5 Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Sun, 18 Oct 2015 22:34:08 +0200 Subject: [PATCH 0492/2314] Add arguments to slots parameter --- topics/cluster-tutorial.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/cluster-tutorial.md b/topics/cluster-tutorial.md index cdcbffc312..3f60c25eec 100644 --- a/topics/cluster-tutorial.md +++ b/topics/cluster-tutorial.md @@ -574,7 +574,7 @@ Reshardings can be performed automatically without the need to manually enter the parameters in an interactive way. This is possible using a command line like the following: - ./redis-trib.rb reshard --from --to --slots --yes : + ./redis-trib.rb reshard --from --to --slots --yes : This allows to build some automatism if you are likely to reshard often, however currently there is no way for `redis-trib` to automatically From e20bb1e6afec573904db22d967ab1d7f66499645 Mon Sep 17 00:00:00 2001 From: Viranch Mehta Date: Mon, 19 Oct 2015 18:16:07 +0530 Subject: [PATCH 0493/2314] Cluster tutorial- fix 'did not received's to 'did not receive' --- topics/cluster-tutorial.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/topics/cluster-tutorial.md b/topics/cluster-tutorial.md index 3f60c25eec..5adfe6959e 100644 --- a/topics/cluster-tutorial.md +++ b/topics/cluster-tutorial.md @@ -144,7 +144,7 @@ As you can see B does not wait for an acknowledge from B1, B2, B3 before replying to the client, since this would be a prohibitive latency penalty for Redis, so if your client writes something, B acknowledges the write, but crashes before being able to send the write to its slaves, one of the -slaves (that did not received the write) can be promoted to master, losing +slaves (that did not receive the write) can be promoted to master, losing the write forever. This is **very similar to what happens** with most databases that are @@ -603,7 +603,7 @@ However instead of just writing, the application does two additional things: What this means is that this application is a simple **consistency checker**, and is able to tell you if the cluster lost some write, or if it accepted -a write that we did not received acknowledgment for. In the first case we'll +a write that we did not receive acknowledgment for. In the first case we'll see a counter having a value that is smaller than the one we remember, while in the second case the value will be greater. From 1113e948d4d297257388eee455d9ee8ab8761ed7 Mon Sep 17 00:00:00 2001 From: Viranch Mehta Date: Mon, 19 Oct 2015 18:16:43 +0530 Subject: [PATCH 0494/2314] Cluster tutorial- fix 'to be not available' to 'to be unavailable' --- topics/cluster-tutorial.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/cluster-tutorial.md b/topics/cluster-tutorial.md index 5adfe6959e..654ca393c4 100644 --- a/topics/cluster-tutorial.md +++ b/topics/cluster-tutorial.md @@ -206,7 +206,7 @@ as you continue reading. * **cluster-enabled ``**: If yes enables Redis Cluster support in a specific Redis instance. Otherwise the instance starts as a stand alone instance as usually. * **cluster-config-file ``**: Note that despite the name of this option, this is not an user editable configuration file, but the file where a Redis Cluster node automatically persists the cluster configuration (the state, basically) every time there is a change, in order to be able to re-read it at startup. The file lists things like the other nodes in the cluster, their state, persistent variables, and so forth. Often this file is rewritten and flushed on disk as a result of some message reception. * **cluster-node-timeout ``**: The maximum amount of time a Redis Cluster node can be unavailable, without it being considered as failing. If a master node is not reachable for more than the specified amount of time, it will be failed over by its slaves. This parameter controls other important things in Redis Cluster. Notably, every node that can't reach the majority of master nodes for the specified amount of time, will stop accepting queries. -* **cluster-slave-validity-factor ``**: If set to zero, a slave will always try to failover a master, regardless of the amount of time the link between the master and the slave remained disconnected. If the value is positive, a maximum disconnection time is calculated as the *node timeout* value multiplied by the factor provided with this option, and if the node is a slave, it will not try to start a failover if the master link was disconnected for more than the specified amount of time. For example if the node timeout is set to 5 seconds, and the validity factor is set to 10, a slave disconnected from the master for more than 50 seconds will not try to failover its master. Note that any value different than zero may result in Redis Cluster to be not available after a master failure if there is no slave able to failover it. In that case the cluster will return back available only when the original master rejoins the cluster. +* **cluster-slave-validity-factor ``**: If set to zero, a slave will always try to failover a master, regardless of the amount of time the link between the master and the slave remained disconnected. If the value is positive, a maximum disconnection time is calculated as the *node timeout* value multiplied by the factor provided with this option, and if the node is a slave, it will not try to start a failover if the master link was disconnected for more than the specified amount of time. For example if the node timeout is set to 5 seconds, and the validity factor is set to 10, a slave disconnected from the master for more than 50 seconds will not try to failover its master. Note that any value different than zero may result in Redis Cluster to be unavailable after a master failure if there is no slave able to failover it. In that case the cluster will return back available only when the original master rejoins the cluster. * **cluster-migration-barrier ``**: Minimum number of slaves a master will remain connected with, for another slave to migrate to a master which is no longer covered by any slave. See the appropriate section about replica migration in this tutorial for more information. * **cluster-require-full-coverage ``**: If this is set to yes, as it is by default, the cluster stops accepting writes if some percentage of the key space is not covered by any node. If the option is set to no, the cluster will still serve queries even if only requests about a subset of keys can be processed. From 23b5acabf9ace3f0e847153c978dfcbeadcc18aa Mon Sep 17 00:00:00 2001 From: kewang Date: Fri, 23 Oct 2015 08:50:19 +0800 Subject: [PATCH 0495/2314] Add Hedis support --- tools.json | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/tools.json b/tools.json index f29d392e37..78c0510e9a 100644 --- a/tools.json +++ b/tools.json @@ -532,5 +532,13 @@ "repository": "https://github.com/maxbrieiev/promise-redis", "description": "Use any promise library with node_redis.", "authors": [] + }, + { + "name": "Hedis", + "language": "C", + "url": "http://hedis.io/", + "repository": "https://github.com/hedisdb/hedis", + "description": "Hedis can retrieve data from **ANY** database directly via Redis", + "authors": ["kewang"] } ] From 95e3f7bb96740e3fd478a1167f90d1110d8bde68 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 23 Oct 2015 12:26:57 +0200 Subject: [PATCH 0496/2314] Added spatial queries to indexes doc. --- topics/indexes.md | 177 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 177 insertions(+) diff --git a/topics/indexes.md b/topics/indexes.md index 0a8aa47974..ec1bedd5b6 100644 --- a/topics/indexes.md +++ b/topics/indexes.md @@ -522,6 +522,183 @@ matteocollina. Make sure to check [Matteo Collina's slides about Levelgraph](http://nodejsconfit.levelgraph.io/) in order to better understand these ideas. +Multi dimensional indexes +=== + +A more complex type of index is an index that allows to perform queries +where two or multiple variables are queried at the same time for specific +ranges. For example I may have a data set representing persons age and +salary, and I want to retrieve all the people between 50 and 55 years old +having a salaty between 70000 and 85000. + +This query may be performed with a multi column index, but this requires +us to select the first variable and then scan the second, which means we +may do a lot more work than needed. It is possible to perform this kind of +queries involving multiple variables using different data structures. +For example, multi-dimensional trees such as *k-d trees* or *r-trees* are +sometimes used. Here we'll describe a different way to index data into +multiple dimensions, using a representation trick that allows us to perform +the query in a very efficient way using Redis lexicographical ranges. + +Let's start by visualizing the problem. In this picture we have points +in the space, which represent our data samples, where `x` and `y` are +our coordinates. Both variables are is from 0 to 400. + +The blue box in the picture represents our query. We want all the points +where `x` is between 50 and 100, and where `y` is between 100 and 300. + +![Points in the space](http://redis.io/images/redisdoc/2idx_0.png) + +In order to represent data that makes this kind of queries fast to perform, +we start by padding our numbers with 0. So for example imagine we want to +add the point 10,25 (x,y) to our index. Given that the maximum range in the +example is 400 we can just pad to three digits, so we obtain: + + x = 010 + y = 025 + +Now what we do is to interleave the digits, taking the leftmost digit +in x, and the leftmost digit in y, and so forth, in order to create a single +number: + + 001205 + +This is our index, however in order to more easily reconstruct the original +representation, if we want (at the cost of space), we may also add the +original values as additional columns: + + 001205:10:25 + +Now, let's reason about this representation and why it is useful in the +context of range queries. For example let's take the center of our blue +fox, which is at `x=75` and `y=200`. We can encode this number as we did +earlier by interleaving the digits, obtaining: + + 027005 + +What happens if we substitute the last two digits respectively with 00 and 99? +We obtain a range which is lexicographically continue: + + 027000 to 027099 + +What this maps to is to a square representing all values where the `x` +variable is between 70 and 79, and the `y` variable is between 200 and 209. +We can write random points in this interval, in order to identify this +specific area: + +![Small area](http://redis.io/images/redisdoc/2idx_1.png) + +So the above lexicographic query allows us to easily query for points in +a specific square in the picture. However the square may be too small for +the box we are searching, so that too many queries are needed. +So we can do the same but instead of replacing the last two digits with 00 +and 99, we can do it for the last four digits, obtaining the following +range: + + 020000 029999 + +This time the range represents all the points where `x` is between 0 and 99 +and `y` is between 200 and 299. Drawing random points in this interval +shows us this larger area: + +![Large area](http://redis.io/images/redisdoc/2idx_2.png) + +Oops now our area is ways too big for our query, and still our search box is +not completely included. We need more granularity, but we can easily obtain +it by representing our numbers in binary form. This time, when we replace +digits instead of getting squares which are ten times bigger, we get squares +which are just two times bigger. + +Our numbers in binary form, assuming we need just 9 bits for each variable +(in order to represent numbers up to 400 in value) would be: + + x = 75 -> 001001011 + y = 200 -> 011001000 + +So by interleaving our representation in the index would be: + + 0001110011001010:75:200 + +Let's see what are our ranges as we substitute the last 2, 4, 6, 8, ... +bits with 0s ad 1s in the interleaved representation: + + 2 bits: x between 70 and 75, y between 200 and 201 (range=2) + 4 bits: x between 72 and 75, y between 200 and 203 (range=4) + 6 bits: x between 72 and 79, y between 200 and 207 (range=8) + 8 bits: x between 64 and 79, y between 192 and 207 (range=16) + +And so forth. Now we have definitely better granularity! +As you can see substituting N bits from the index gives us +search boxes of side `2^(N/2)`. + +So what we do is to check the dimension where our search box is smaller, +and check the nearest power of two to this number. Our search box +was 50,100 to 100,300, so it has a width of 50 and an height of 200. +We take the smaller of the two, 50, and check the nearest power of two +which is 64. 64 is 2^6, so we would work with indexes obtained replacing +the latest 12 bits from the interleaved representation. + +However single squares may not cover all our search, so we may need more. +What we do is to start with the left bottom corner of our search box, +which is 50,100, and find the first range by substituting the last 6 bits +in each number with 0. Then we do the same with the right top corner. + +With two trivial nested for loops where we increment only the significative +bits, we can find all the squares between this two. For each square we +convert the two numbers into our interleaved representation, and create +the range using the converted representation as our start, and the same +representation but with the latest 6 bits turned on as end range. + +For each square found we perform our query and get the elements inside, +removing the elements which are outside our search box. + +Turning this into code is simple. Here is a Ruby example: + + def spacequery(x0,y0,x1,y1,exp) + bits=exp*2 + x_start = x0/(2**exp) + x_end = x1/(2**exp) + y_start = y0/(2**exp) + y_end = y1/(2**exp) + (x_start..x_end).each{|x| + (y_start..y_end).each{|y| + x_range_start = x*(2**exp) + x_range_end = x_range_start | ((2**exp)-1) + y_range_start = y*(2**exp) + y_range_end = y_range_start | ((2**exp)-1) + puts "#{x},#{y} x from #{x_range_start} to #{x_range_end}, y from #{y_range_start} to #{y_range_end}" + + # Turn it into interleaved form for ZRANGEBYLEX query. + # We assume we need 9 bits for each integer, so the final + # interleaved representation will be 18 bits. + xbin = x_range_start.to_s(2).rjust(9,'0') + ybin = x_range_start.to_s(2).rjust(9,'0') + s = xbin.split("").zip(ybin.split("")).flatten.compact.join("") + # Now that we have the start of the range, calculate the end + # by replacing the specified number of bits from 0 to 1. + e = s[0..-(bits+1)]+("1"*bits) + puts "ZRANGEBYLEX myindex [#{s} [#{e}" + } + } + end + + spacequery(50,100,100,300,6) + +While non immediately trivial this is a very useful indexing strategy that +in the future may be implemented in Redis in a native way. + +Multi dimensional indexes with negative or floating point numbers +=== + +The simplest way to represent negative value is just to work with unsigned +integers and represent them using an offset, so that when you index, before +translating numbers in the indexed representation, you add the absolute value +of your smaller negative integer. + +For floating point numbers, the simplest thing is to convert them to integers +by multiplying the integer for a power of ten proportional to the number of +integers after the dot you want to retain. + Non range indexes === From cbebed6ff914749c5ca904f55ea2b75c39d367e2 Mon Sep 17 00:00:00 2001 From: Eagle Flies Date: Fri, 23 Oct 2015 12:27:29 +0200 Subject: [PATCH 0497/2314] Added note about 'redis' gem. --- topics/cluster-tutorial.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/topics/cluster-tutorial.md b/topics/cluster-tutorial.md index 3f60c25eec..1a9ebc146b 100644 --- a/topics/cluster-tutorial.md +++ b/topics/cluster-tutorial.md @@ -290,6 +290,10 @@ command line utility called `redis-trib`, that is a Ruby program executing special commands in the instances in order to create new clusters, check or reshard an existing cluster, and so forth. +You need to install 'redis' gem to be able to run 'redis-trib'. + + gem install redis + The `redis-trib` utility is in the `src` directory of the Redis source code distribution. To create your cluster simply type: From 0a7bd8e5b346efa9ee98851217cba47928883e9c Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 23 Oct 2015 12:41:17 +0200 Subject: [PATCH 0498/2314] First set of fixes to new indexes doc. --- topics/indexes.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/topics/indexes.md b/topics/indexes.md index 924d889897..e073a37916 100644 --- a/topics/indexes.md +++ b/topics/indexes.md @@ -529,7 +529,7 @@ A more complex type of index is an index that allows to perform queries where two or multiple variables are queried at the same time for specific ranges. For example I may have a data set representing persons age and salary, and I want to retrieve all the people between 50 and 55 years old -having a salaty between 70000 and 85000. +having a salary between 70000 and 85000. This query may be performed with a multi column index, but this requires us to select the first variable and then scan the second, which means we @@ -542,7 +542,7 @@ the query in a very efficient way using Redis lexicographical ranges. Let's start by visualizing the problem. In this picture we have points in the space, which represent our data samples, where `x` and `y` are -our coordinates. Both variables are is from 0 to 400. +our coordinates. Both variables max value is 400. The blue box in the picture represents our query. We want all the points where `x` is between 50 and 100, and where `y` is between 100 and 300. @@ -571,7 +571,7 @@ original values as additional columns: Now, let's reason about this representation and why it is useful in the context of range queries. For example let's take the center of our blue -fox, which is at `x=75` and `y=200`. We can encode this number as we did +box, which is at `x=75` and `y=200`. We can encode this number as we did earlier by interleaving the digits, obtaining: 027005 From 5f9c8ab61f5fb6c3beba38f2d8f6868c57121fb3 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 23 Oct 2015 12:48:19 +0200 Subject: [PATCH 0499/2314] More fixes to the indexes doc. --- topics/indexes.md | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/topics/indexes.md b/topics/indexes.md index e073a37916..30341a0157 100644 --- a/topics/indexes.md +++ b/topics/indexes.md @@ -615,7 +615,7 @@ Our numbers in binary form, assuming we need just 9 bits for each variable x = 75 -> 001001011 y = 200 -> 011001000 -So by interleaving our representation in the index would be: +So by interleaving digits, our representation in the index would be: 0001110011001010:75:200 @@ -636,7 +636,8 @@ and check the nearest power of two to this number. Our search box was 50,100 to 100,300, so it has a width of 50 and an height of 200. We take the smaller of the two, 50, and check the nearest power of two which is 64. 64 is 2^6, so we would work with indexes obtained replacing -the latest 12 bits from the interleaved representation. +the latest 12 bits from the interleaved representation (so that we end +replacing just 6 bits of each variable). However single squares may not cover all our search, so we may need more. What we do is to start with the left bottom corner of our search box, @@ -647,7 +648,7 @@ With two trivial nested for loops where we increment only the significative bits, we can find all the squares between this two. For each square we convert the two numbers into our interleaved representation, and create the range using the converted representation as our start, and the same -representation but with the latest 6 bits turned on as end range. +representation but with the latest 12 bits turned on as end range. For each square found we perform our query and get the elements inside, removing the elements which are outside our search box. @@ -686,18 +687,20 @@ Turning this into code is simple. Here is a Ruby example: While non immediately trivial this is a very useful indexing strategy that in the future may be implemented in Redis in a native way. +For now, the good thing is that the complexity may be easily incapsualted +inside a library that can be used in order to perform indexing and queries. Multi dimensional indexes with negative or floating point numbers === -The simplest way to represent negative value is just to work with unsigned +The simplest way to represent negative values is just to work with unsigned integers and represent them using an offset, so that when you index, before translating numbers in the indexed representation, you add the absolute value of your smaller negative integer. -For floating point numbers, the simplest thing is to convert them to integers -by multiplying the integer for a power of ten proportional to the number of -integers after the dot you want to retain. +For floating point numbers, the simplest approach is probably to convert them +to integers by multiplying the integer for a power of ten proportional to the +number of digits after the dot you want to retain. Non range indexes === From 627f8d700366fa44d6be77a1b0883d9b44d04dd2 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 23 Oct 2015 16:44:22 +0200 Subject: [PATCH 0500/2314] Fixed typo in example code. --- topics/indexes.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/indexes.md b/topics/indexes.md index 30341a0157..f1f206118d 100644 --- a/topics/indexes.md +++ b/topics/indexes.md @@ -673,7 +673,7 @@ Turning this into code is simple. Here is a Ruby example: # We assume we need 9 bits for each integer, so the final # interleaved representation will be 18 bits. xbin = x_range_start.to_s(2).rjust(9,'0') - ybin = x_range_start.to_s(2).rjust(9,'0') + ybin = y_range_start.to_s(2).rjust(9,'0') s = xbin.split("").zip(ybin.split("")).flatten.compact.join("") # Now that we have the start of the range, calculate the end # by replacing the specified number of bits from 0 to 1. From 683e4348a5079eace198c50b816de8d16099c8ab Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Sat, 24 Oct 2015 20:41:46 +0200 Subject: [PATCH 0501/2314] Reorder and reformulate redis-trib usage --- topics/cluster-tutorial.md | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/topics/cluster-tutorial.md b/topics/cluster-tutorial.md index 5fe9dc8a20..cb222863cf 100644 --- a/topics/cluster-tutorial.md +++ b/topics/cluster-tutorial.md @@ -286,16 +286,18 @@ Now that we have a number of instances running, we need to create our cluster by writing some meaningful configuration to the nodes. This is very easy to accomplish as we are helped by the Redis Cluster -command line utility called `redis-trib`, that is a Ruby program -executing special commands in the instances in order to create new clusters, +command line utility called `redis-trib`, a Ruby program +executing special commands on instances in order to create new clusters, check or reshard an existing cluster, and so forth. -You need to install 'redis' gem to be able to run 'redis-trib'. + +The `redis-trib` utility is in the `src` directory of the Redis source code +distribution. +You need to install `redis` gem to be able to run `redis-trib`. gem install redis -The `redis-trib` utility is in the `src` directory of the Redis source code -distribution. To create your cluster simply type: + To create your cluster simply type: ./redis-trib.rb create --replicas 1 127.0.0.1:7000 127.0.0.1:7001 \ 127.0.0.1:7002 127.0.0.1:7003 127.0.0.1:7004 127.0.0.1:7005 From 7f196f62d27bbed8fb6589c2ed87805cf7e4db20 Mon Sep 17 00:00:00 2001 From: Reda Bouchaala Date: Tue, 27 Oct 2015 17:25:26 +0100 Subject: [PATCH 0502/2314] Added missing word in topics/sentinel.md --- topics/sentinel.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/sentinel.md b/topics/sentinel.md index 044b1fc0d7..ef1d377ecf 100644 --- a/topics/sentinel.md +++ b/topics/sentinel.md @@ -290,7 +290,7 @@ discarding its data set. This problem can be mitigated using the following Redis replication feature, that allows to stop accepting writes if a master detects that -is no longer to transfer its writes to the specified number of slaves. +is no longer able to transfer its writes to the specified number of slaves. min-slaves-to-write 1 min-slaves-max-lag 10 From d878975b17c4cca2c42f28a7f24f79c09825a00a Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 28 Oct 2015 12:41:28 +0100 Subject: [PATCH 0503/2314] Link at Redimension lib. --- topics/indexes.md | 1 + 1 file changed, 1 insertion(+) diff --git a/topics/indexes.md b/topics/indexes.md index f1f206118d..ce7a489cc6 100644 --- a/topics/indexes.md +++ b/topics/indexes.md @@ -689,6 +689,7 @@ While non immediately trivial this is a very useful indexing strategy that in the future may be implemented in Redis in a native way. For now, the good thing is that the complexity may be easily incapsualted inside a library that can be used in order to perform indexing and queries. +One example of such library is [Redimension](https://github.com/antirez/redimension), a proof of concept Ruby library which indexes N-dimensional data inside Redis using the technique described here. Multi dimensional indexes with negative or floating point numbers === From df7ccd47a61225a3ac284eef327ec3769f29942f Mon Sep 17 00:00:00 2001 From: antirez Date: Mon, 2 Nov 2015 11:04:31 +0100 Subject: [PATCH 0504/2314] Document new Lua replication features in 3.2. --- commands/eval.md | 152 +++++++++++++++++++++++++++++++++++++---------- 1 file changed, 119 insertions(+), 33 deletions(-) diff --git a/commands/eval.md b/commands/eval.md index 1a0f0de80e..097fe3a170 100644 --- a/commands/eval.md +++ b/commands/eval.md @@ -326,10 +326,11 @@ SCRIPT currently accepts three different commands: ## Scripts as pure functions A very important part of scripting is writing scripts that are pure functions. -Scripts executed in a Redis instance are replicated on slaves by sending the -script -- not the resulting commands. -The same happens for the Append Only File. -The reason is that sending a script to another Redis instance is much +Scripts executed in a Redis instance are, by default, replicated on slaves +and into the AOF file by sending the script itself -- not the resulting +commands. + +The reason is that sending a script to another Redis instance is often much faster than sending the multiple commands the script generates, so if the client is sending many scripts to the master, converting the scripts into individual commands for the slave / AOF would result in too much bandwidth @@ -337,11 +338,18 @@ for the replication link or the Append Only File (and also too much CPU since dispatching a command received via network is a lot more work for Redis compared to dispatching a command invoked by Lua scripts). -The only drawback with this approach is that scripts are required to have the -following property: +Normally replicationg scripts instead of the effects of the scripts makes sense, +however not in all the cases. So starting with Redis 3.2 (currently not stable), +the scripting engine is able to, alternatively, replicate the sequence of write +commands resulting from the script execution, instead of replication the +script itself. See th next section for more information. +In this section we'll assume that scripts are replicated by sending the whole +script. Let's call this replication mode **whole scripts replication**. + +The main drawback with the *whole scripts replication* approach is that scripts are required to have the following property: -* The script always evaluates the same Redis _write_ commands with the same - arguments given the same input data set. +* The script must always evaluates the same Redis _write_ commands with the + same arguments given the same input data set. Operations performed by the script cannot depend on any hidden (non-explicit) information or state that may change as script execution proceeds or between different executions of the script, nor can it depend on any external input @@ -353,31 +361,28 @@ that will not always evaluate in the same way. In order to enforce this behavior in scripts Redis does the following: -* Lua does not export commands to access the system time or other external - state. - -* Redis will block the script with an error if a script calls a Redis - command able to alter the data set **after** a Redis _random_ command like - `RANDOMKEY`, `SRANDMEMBER`, `TIME`. - This means that if a script is read-only and does not modify the data set it - is free to call those commands. - Note that a _random command_ does not necessarily mean a command that uses - random numbers: any non-deterministic command is considered a random command - (the best example in this regard is the `TIME` command). - -* Redis commands that may return elements in random order, like `SMEMBERS` - (because Redis Sets are _unordered_) have a different behavior when called - from Lua, and undergo a silent lexicographical sorting filter before - returning data to Lua scripts. - So `redis.call("smembers",KEYS[1])` will always return the Set elements - in the same order, while the same command invoked from normal clients may - return different results even if the key contains exactly the same elements. - -* Lua pseudo random number generation functions `math.random` and - `math.randomseed` are modified in order to always have the same seed every - time a new script is executed. - This means that calling `math.random` will always generate the same sequence - of numbers every time a script is executed if `math.randomseed` is not used. +* Lua does not export commands to access the system time or other external + state. +* Redis will block the script with an error if a script calls a Redis + command able to alter the data set **after** a Redis _random_ command like + `RANDOMKEY`, `SRANDMEMBER`, `TIME`. + This means that if a script is read-only and does not modify the data set it + is free to call those commands. + Note that a _random command_ does not necessarily mean a command that uses + random numbers: any non-deterministic command is considered a random command + (the best example in this regard is the `TIME` command). +* Redis commands that may return elements in random order, like `SMEMBERS` + (because Redis Sets are _unordered_) have a different behavior when called + from Lua, and undergo a silent lexicographical sorting filter before + returning data to Lua scripts. + So `redis.call("smembers",KEYS[1])` will always return the Set elements + in the same order, while the same command invoked from normal clients may + return different results even if the key contains exactly the same elements. +* Lua pseudo random number generation functions `math.random` and + `math.randomseed` are modified in order to always have the same seed every + time a new script is executed. + This means that calling `math.random` will always generate the same sequence + of numbers every time a script is executed if `math.randomseed` is not used. However the user is still able to write commands with random behavior using the following simple trick. @@ -459,6 +464,87 @@ regardless of the architecture of the system running Redis. 32-bit, 64-bit, big-endian and little-endian systems will all produce the same output. +## Replicating commands instead of scripts + +Starting with Redis 3.2 (not yet stable) it is possible to select an +alternative replication method. Instead of replication whole scripts, we +can just replicate single write commands generated by the script. +We call this **script effects replication**. + +In this replication mode, while Lua scripts are executed, Redis collects +all the commands executed by the Lua scripting engine that actually modify +the dataset. When the script execution finishes, the sequence of commands +that the script generated are wrapped into a MULTI / EXEC transaction and +are sent to slaves and AOF. + +This is useful in several ways depending on the use case: + +* When the script is slow to compute, but the effects can be summarized by +a few write commands, it is a shame to re-compute the script on the slaves +or when reloading the AOF. In this case to replicate just the effect of the +script is much better. +* When script effects replication is enabled, the controls about non +deterministic functions are disabled. You can, for example, use the `TIME` +or `SRANDMEMBMER` commands inside your scripts freely at any place. +* The Lua PRNG in this mode is seeded randomly at every call. + +In order to enable script effects replication, you need to issue the +following Lua command before any write operated by the script: + + redis.replicate_commands(); + +The function returns true if the script effects replication was enabled, +otherwise if the function was called after the script already called +some write command, it returns false, and normal whole script replication +is used. + +## Selective replication of commands + +When script effects replication is selected (see the previous section), it +is possible to have more control in the way commands are replicated to slaves +and AOF. This is a very advanced feature since **a misuse can do damage** by +breaking the contract that the master, slaves, and AOF, all must contain the +same logical content. + +However this is a useful feature since, sometimes, we need to execute certain +commands only in the master in order to create, for example, intermediate +values. + +Think at a Lua script where we perform an intersection between two sets. +Pick five random elements, and create a new set with this five random +elements. Finally we delete the temporary key representing the intersection +between the two original sets. What we want to replicate is only the creating +of the new set with the five elements. It's not useful to also replicate the +commands creating the temporary key. + +For this reason, Redis 3.2 introduces a new command that only works when +script effects replication is enabled, and is able to control the scripting +replication engine. The command is called `redis.set_repl()` and fails raising +an error if called when script effects replication is disabled. + +The command can be called with four different arguments: + + redis.set_repl(redis.REPL_ALL); -- Replicte to AOF and slaves. + redis.set_repl(redis.REPL_AOF); -- Replicte only to AOF. + redis.set_repl(redis.REPL_SLAVE); -- Replicte only to slaves. + redis.set_repl(redis.REPL_NONE); -- Don't replicate at all. + +By default the scripting engine is always set to `REPL_ALL`. By calling +this function the user can switch on/off AOF and or slaves replication, and +turn them back later at her/his wish. + +A simple example follows: + + redis.replicate_commands(); -- Enable effects replication. + redis.call('set','A','1'); + redis.set_repl(redis.REPL_NONE); + redis.call('set','B','2'); + redis.set_repl(redis.REPL_ALL); + redis.call('set','C','3'); + +After running the above script, the result is that only keys A and C +will be created on slaves and AOF. + ## Global variables protection Redis scripts are not allowed to create global variables, in order to avoid From 1cc54499bd7be5908fa5d0b99d337f842d1c87c2 Mon Sep 17 00:00:00 2001 From: antirez Date: Mon, 2 Nov 2015 11:06:34 +0100 Subject: [PATCH 0505/2314] Fixed typo. --- commands/eval.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands/eval.md b/commands/eval.md index 097fe3a170..531a63c3a2 100644 --- a/commands/eval.md +++ b/commands/eval.md @@ -485,7 +485,7 @@ or when reloading the AOF. In this case to replicate just the effect of the script is much better. * When script effects replication is enabled, the controls about non deterministic functions are disabled. You can, for example, use the `TIME` -or `SRANDMEMBMER` commands inside your scripts freely at any place. +or `SRANDMEMBER` commands inside your scripts freely at any place. * The Lua PRNG in this mode is seeded randomly at every call. In order to enable script effects replication, you need to issue the From 4f409eb58865fd5e8ffca0992171009d56a5c612 Mon Sep 17 00:00:00 2001 From: antirez Date: Mon, 2 Nov 2015 11:35:32 +0100 Subject: [PATCH 0506/2314] EVAL grammar fix. --- commands/eval.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands/eval.md b/commands/eval.md index 531a63c3a2..79309a7277 100644 --- a/commands/eval.md +++ b/commands/eval.md @@ -513,7 +513,7 @@ values. Think at a Lua script where we perform an intersection between two sets. Pick five random elements, and create a new set with this five random elements. Finally we delete the temporary key representing the intersection -between the two original sets. What we want to replicate is only the creating +between the two original sets. What we want to replicate is only the creation of the new set with the five elements. It's not useful to also replicate the commands creating the temporary key. From 261ced5b5404e98367509dfbeb0d20c60f4e33ab Mon Sep 17 00:00:00 2001 From: antirez Date: Mon, 2 Nov 2015 11:43:28 +0100 Subject: [PATCH 0507/2314] Improve cluster forget doc as per issue #636. --- commands/cluster-forget.md | 28 +++++++++++++++++++--------- 1 file changed, 19 insertions(+), 9 deletions(-) diff --git a/commands/cluster-forget.md b/commands/cluster-forget.md index cd356ee7c9..63b69a0772 100644 --- a/commands/cluster-forget.md +++ b/commands/cluster-forget.md @@ -1,22 +1,32 @@ -The command is used in order to remove the node, specified via its node ID, -from the set of nodes known by the Redis Cluster node receiving the command. +The command is used in order to remove a node, specified via its node ID, +from the set of *known nodes* of the Redis Cluster node receiving the command. In other words the specified node is removed from the *nodes table* of the node receiving the command. -However the command cannot simply drop the node from its internal configuration, -it also implements a ban-list, not allowing the same node to be added again -as a side effect of processing the *gossip section* of the heartbeat packets -received from other nodes. +Because when a given node is part of the cluster, all the other nodes +participating in the cluster knows about it, in order for a node to be +completely removed from a cluster, the `CLUSTER FORGET` command must be +sent to all the remaining nodes, regardless of the fact they are masters +or slaves. -## Details on the command behavior +However the command cannot simply drop the node from the internal node +table of the node receiving the command, it also implements a ban-list, not +allowing the same node to be added again as a side effect of processing the +*gossip section* of the heartbeat packets received from other nodes. -For example, let's assume we have four nodes, A, B, C and D. In order to +## Details on why the ban-list is needed + +In the following example we'll show why the command must not just remove +a given node from the nodes table, but also prevent it for being re-inserted +again for some time. + +Let's assume we have four nodes, A, B, C and D. In order to end with just a three nodes cluster A, B, C we may follow these steps: 1. Reshard all the hash slots from D to nodes A, B, C. 2. D is now empty, but still listed in the nodes table of A, B and C. 3. We contact A, and send `CLUSTER FORGET D`. -4. B sends A a heartbeat packet, where node D is listed. +4. B sends node A a heartbeat packet, where node D is listed. 5. A does no longer known node D (see step 3), so it starts an handshake with D. 6. D ends re-added in the nodes table of A. From c9f2d89a9fddd3510b596746c3783cb488580e4c Mon Sep 17 00:00:00 2001 From: antirez Date: Mon, 2 Nov 2015 14:58:33 +0100 Subject: [PATCH 0508/2314] Grammar fixes to indexing doc. --- topics/indexes.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/topics/indexes.md b/topics/indexes.md index ce7a489cc6..6fc433b3ce 100644 --- a/topics/indexes.md +++ b/topics/indexes.md @@ -172,7 +172,7 @@ is often used in order to implement indexes with traditional databases. As you can guess, because of this, it is possible to use this Redis data structure in order to implement pretty fancy indexes. -Before to dive into using lexicographical indexes, let's check how +Before we dive into using lexicographical indexes, let's check how sorted sets behave in this special mode of operation. Since we need to add elements with the same score, we'll always use the special score of zero. @@ -277,7 +277,7 @@ above three commands should be send via a [Lua script](/commands/eval) instead, so that the Lua script will atomically get the old count and re-add the item with incremented score. -So the result will be that, every time an user searches for `banana` we'll +So the result will be that, every time a user searches for `banana` we'll get our entry updated. There is more: our goal is to just have items searched very frequently. @@ -450,9 +450,9 @@ The value of the index in a lexicographical index can get pretty fancy and hard or slow to rebuild from what we store about the object. So one approach to simplify the handling of the index, at the cost of using more memory, is to also take alongside to the sorted set representing the index -an hash mapping the object ID to the current index value. +a hash mapping the object ID to the current index value. -So for example, when we index we also add to an hash: +So for example, when we index we also add to a hash: MULTI ZADD myindex 0 0056:0028.44:90 @@ -721,7 +721,7 @@ Similarly lists can be used in order to index items into a fixed order. I can add all my items into a Redis list and rotate the list with RPOPLPUSH using the same key name as source and destination. This is useful when I want to process a given set of items again and again forever in the -same order. Think at an RSS feed system that needs to refresh the local copy +same order. Think of an RSS feed system that needs to refresh the local copy periodically. Another popular index often used with Redis is a **capped list**, where items From 450143a3fa2d689c7b26161fef5f376024d49ab9 Mon Sep 17 00:00:00 2001 From: antirez Date: Tue, 3 Nov 2015 11:17:01 +0100 Subject: [PATCH 0509/2314] Appendix about THP removed. While THPs really make fork faster, it actually creates a huge delay in all the cases but when there is no load at all, because of a mass copy on write event. So let's drop this conflicting statement. We must say, all compact, with our hand raised, DISABLE THPs! --- topics/latency.md | 71 ----------------------------------------------- 1 file changed, 71 deletions(-) diff --git a/topics/latency.md b/topics/latency.md index c421e848e5..27a3ed3cef 100644 --- a/topics/latency.md +++ b/topics/latency.md @@ -617,74 +617,3 @@ Note: in the example the **DEBUG SLEEP** command was used in order to block the If you happen to collect multiple watchdog stack traces you are encouraged to send everything to the Redis Google Group: the more traces we obtain, the simpler it will be to understand what the problem with your instance is. -APPENDIX A: Experimenting with huge pages ------------------------------------------ - -Latency introduced by fork can be mitigated using huge pages at the cost of a bigger memory usage during persistence. The following appendix describe in details this feature as implemented in the Linux kernel. - -Some CPUs can use different page size though. AMD and Intel CPUs can support -2 MB page size if needed. These pages are nicknamed *huge pages*. Some -operating systems can optimize page size in real time, transparently -aggregating small pages into huge pages on the fly. - -On Linux, explicit huge pages management has been introduced in 2.6.16, and -implicit transparent huge pages are available starting in 2.6.38. If you -run recent Linux distributions (for example RH 6 or derivatives), transparent -huge pages can be activated, and you can use a vanilla Redis version with them. - -This is the preferred way to experiment/use with huge pages on Linux. - -Now, if you run older distributions (RH 5, SLES 10-11, or derivatives), and -not afraid of a few hacks, Redis requires to be patched in order to support -huge pages. - -The first step would be to read [Mel Gorman's primer on huge pages](http://lwn.net/Articles/374424/) - -There are currently two ways to patch Redis to support huge pages. - -+ For Redis 2.4, the embedded jemalloc allocator must be patched. -[patch](https://gist.github.com/1171054) by Pieter Noordhuis. -Note this patch relies on the anonymous mmap huge page support, -only available starting 2.6.32, so this method cannot be used for older -distributions (RH 5, SLES 10, and derivatives). - -+ For Redis 2.2, or 2.4 with the libc allocator, Redis makefile -must be altered to link Redis with -[the libhugetlbfs library](http://libhugetlbfs.sourceforge.net/). -It is a straightforward [change](https://gist.github.com/1240452) - -Then, the system must be configured to support huge pages. - -The following command allocates and makes N huge pages available: - - $ sudo sysctl -w vm.nr_hugepages= - -The following command mounts the huge page filesystem: - - $ sudo mount -t hugetlbfs none /mnt/hugetlbfs - -In all cases, once Redis is running with huge pages (transparent or -not), the following benefits are expected: - -+ The latency due to the fork operations is dramatically reduced. - This is mostly useful for very large instances, and especially - on a VM. -+ Redis is faster due to the fact the translation look-aside buffer - (TLB) of the CPU is more efficient to cache page table entries - (i.e. the hit ratio is better). Do not expect miracle, it is only - a few percent gain at most. -+ Redis memory cannot be swapped out anymore, which is interesting - to avoid outstanding latencies due to virtual memory. - -Unfortunately, and on top of the extra operational complexity, -there is also a significant drawback of running Redis with -huge pages. The COW mechanism granularity is the page. With -2 MB pages, the probability a page is modified during a background -save operation is 512 times higher than with 4 kB pages. The actual -memory required for a background save therefore increases a lot, -especially if the write traffic is truly random, with poor locality. -With huge pages, using twice the memory while saving is not anymore -a theoretical incident. It really happens. - -The result of a complete benchmark can be found -[here](https://gist.github.com/1272254). From c442622b18b4102e43a6a5171cba08bc966d2e02 Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Tue, 3 Nov 2015 22:46:00 +0100 Subject: [PATCH 0510/2314] Fix typos --- commands/eval.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/commands/eval.md b/commands/eval.md index 79309a7277..0ef4860dfc 100644 --- a/commands/eval.md +++ b/commands/eval.md @@ -338,11 +338,11 @@ for the replication link or the Append Only File (and also too much CPU since dispatching a command received via network is a lot more work for Redis compared to dispatching a command invoked by Lua scripts). -Normally replicationg scripts instead of the effects of the scripts makes sense, +Normally replicating scripts instead of the effects of the scripts makes sense, however not in all the cases. So starting with Redis 3.2 (currently not stable), the scripting engine is able to, alternatively, replicate the sequence of write commands resulting from the script execution, instead of replication the -script itself. See th next section for more information. +script itself. See the next section for more information. In this section we'll assume that scripts are replicated by sending the whole script. Let's call this replication mode **whole scripts replication**. From 13db0a5497fa70d08ab666edb44cf061c9b80176 Mon Sep 17 00:00:00 2001 From: Sankalp Khare Date: Wed, 4 Nov 2015 17:27:43 +0530 Subject: [PATCH 0511/2314] Fix path of kernel huge pages config file --- topics/admin.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/admin.md b/topics/admin.md index 3ea8fa32d7..d99cfb83f2 100644 --- a/topics/admin.md +++ b/topics/admin.md @@ -9,7 +9,7 @@ Redis setup hints + We suggest deploying Redis using the **Linux operating system**. Redis is also tested heavily on OS X, and tested from time to time on FreeBSD and OpenBSD systems. However Linux is where we do all the major stress testing, and where most production deployments are working. + Make sure to set the Linux kernel **overcommit memory setting to 1**. Add `vm.overcommit_memory = 1` to `/etc/sysctl.conf` and then reboot or run the command `sysctl vm.overcommit_memory=1` for this to take effect immediately. -* Make sure to disable Linux kernel feature *transparent huge pages*, it will affect greatly both memory usage and latency in a negative way. This is accomplished with the following command: `echo never > sys/kernel/mm/transparent_hugepage/enabled`. +* Make sure to disable Linux kernel feature *transparent huge pages*, it will affect greatly both memory usage and latency in a negative way. This is accomplished with the following command: `echo never > /sys/kernel/mm/transparent_hugepage/enabled`. + Make sure to **setup some swap** in your system (we suggest as much as swap as memory). If Linux does not have swap and your Redis instance accidentally consumes too much memory, either Redis will crash for out of memory or the Linux kernel OOM killer will kill the Redis process. + Set an explicit `maxmemory` option limit in your instance in order to make sure that the instance will report errors instead of failing when the system memory limit is near to be reached. + If you are using Redis in a very write-heavy application, while saving an RDB file on disk or rewriting the AOF log **Redis may use up to 2 times the memory normally used**. The additional memory used is proportional to the number of memory pages modified by writes during the saving process, so it is often proportional to the number of keys (or aggregate types items) touched during this time. Make sure to size your memory accordingly. From 495253c8f9d95b2b7183ceacc399d0656096a61e Mon Sep 17 00:00:00 2001 From: Jip van Reijsen Date: Thu, 5 Nov 2015 16:02:39 +0100 Subject: [PATCH 0512/2314] Add link to self documented redis.conf for Redis 3.0 --- topics/config.md | 1 + 1 file changed, 1 insertion(+) diff --git a/topics/config.md b/topics/config.md index 4ebcf7b3e5..374b87c02e 100644 --- a/topics/config.md +++ b/topics/config.md @@ -26,6 +26,7 @@ The list of configuration directives, and their meaning and intended usage is available in the self documented example redis.conf shipped into the Redis distribution. +* The self documented [redis.conf for Redis 3.0](https://raw.githubusercontent.com/antirez/redis/3.0/redis.conf) * The self documented [redis.conf for Redis 2.8](https://raw.githubusercontent.com/antirez/redis/2.8/redis.conf) * The self documented [redis.conf for Redis 2.6](https://raw.githubusercontent.com/antirez/redis/2.6/redis.conf). * The self documented [redis.conf for Redis 2.4](https://raw.githubusercontent.com/antirez/redis/2.4/redis.conf). From fc24b3d6df2d91dde60f13d24e272f0db66af06b Mon Sep 17 00:00:00 2001 From: ronp001 Date: Mon, 9 Nov 2015 02:37:16 -0500 Subject: [PATCH 0513/2314] Update clients.json --- clients.json | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/clients.json b/clients.json index c3b82f53bb..e4a59b7658 100644 --- a/clients.json +++ b/clients.json @@ -1245,6 +1245,15 @@ "active": true }, + { + "name": "SwiftRedis", + "language": "Swift", + "repository": "https://github.com/ronp001/SwiftRedis", + "description": "Basic async client for Redis in Swift (iOS)", + "authors": ["ronp001"], + "active": true + }, + { "name": "Rackdis", "language": "Racket", From 1837ff7c2107d09beb6f268340eb2efc1a9cb3d4 Mon Sep 17 00:00:00 2001 From: antirez Date: Tue, 10 Nov 2015 17:48:49 +0100 Subject: [PATCH 0514/2314] Securing Redis seciton added. --- topics/quickstart.md | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/topics/quickstart.md b/topics/quickstart.md index f52eefecc1..e15b033267 100644 --- a/topics/quickstart.md +++ b/topics/quickstart.md @@ -85,6 +85,24 @@ Another interesting way to run redis-cli is without arguments: the program will At this point you are able to talk with Redis. It is the right time to pause a bit with this tutorial and start the [fifteen minutes introduction to Redis data types](http://redis.io/topics/data-types-intro) in order to learn a few Redis commands. Otherwise if you already know a few basic Redis commands you can keep reading. +Securing Redis +=== + +By default Redis binds to **all the interfaces** and has no authentication at +all. If you use Redis into a very controlled environment, separated from the +external internet and in general from attackers, that's fine. However if Redis +without any hardening is exposed to the internet, it is a big security +concern. If you are not 100% sure your environment is secured properly, please +check the following steps in order to make Redis more secure, which are +enlisted in order of increased security. + +1. Make sure the port Redis uses to listen for connections (by default 6379 and additionally 16379 if you run Redis in cluster mode, plus 26379 for Sentinel) is firewalled, so that it is not possible to contact Redis from the outside world. +2. Use a configuration file where the `bind` directive is set in order to guarantee that Redis listens just in as little network interfaces you are using. For example only the loopback interface (127.0.0.1) if you are accessing Redis just locally from the same computer, and so forth. +3. Use the `requirepass` option in order to add an additional layer of security so that clients will require to authenticate using the `AUTH` command. +4. Use [spiped](http://www.tarsnap.com/spiped.html) or another SSL tunnelling software in order to encrypt traffic between Redis servers and Redis clients if your environment requires encryption. + +Note that a Redis exposed to the internet without any security [is very simple to exploit](http://antirez.com/news/96), so make sure you understand the above and apply **at least** a firewalling layer. After the firewalling is in place, try to connect with `redis-cli` from an external host in order to prove yourself the instance is actually not reachable. + Using Redis from your application === From 71b438b03b7d7969d7c7b207abd1cc24c20dd296 Mon Sep 17 00:00:00 2001 From: antirez Date: Tue, 10 Nov 2015 17:56:52 +0100 Subject: [PATCH 0515/2314] Security info added into admin page. --- topics/admin.md | 1 + 1 file changed, 1 insertion(+) diff --git a/topics/admin.md b/topics/admin.md index d99cfb83f2..42d5c95527 100644 --- a/topics/admin.md +++ b/topics/admin.md @@ -16,6 +16,7 @@ Redis setup hints + Use `daemonize no` when run under daemontools. + Even if you have persistence disabled, Redis will need to perform RDB saves if you use replication, unless you use the new diskless replication feature, which is currently experimental. + If you are using replication, make sure that either your master has persistence enabled, or that it does not automatically restarts on crashes: slaves will try to be an exact copy of the master, so if a master restarts with an empty data set, slaves will be wiped as well. ++ By default Redis does not require **any authentication and listens to all the network interfaces**. This is a big security issue if you leave Redis exposed on the internet or other places where attackers can reach it. See for example [this attack](http://antirez.com/news/96) to see how dangerous it can be. Please check our [security page](/topics/security) and the [quick start](/topic/quickstart) for information about how to secure Redis. Running Redis on EC2 -------------------- From ce19ded4ad87df1ad033298f00cd33065fea5450 Mon Sep 17 00:00:00 2001 From: antirez Date: Tue, 10 Nov 2015 18:00:31 +0100 Subject: [PATCH 0516/2314] Security page updated. --- topics/security.md | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/topics/security.md b/topics/security.md index 777c9bff58..6d5bfd51fe 100644 --- a/topics/security.md +++ b/topics/security.md @@ -87,7 +87,7 @@ Data encryption support Redis does not support encryption. In order to implement setups where trusted parties can access a Redis instance over the internet or other untrusted networks, an additional layer of protection should be implemented, -such as an SSL proxy. +such as an SSL proxy. We recommend [spiped](http://www.tarsnap.com/spiped.html). Disabling of specific commands --- @@ -157,15 +157,12 @@ prevent buffer overflows, format bugs and other memory corruption issues. However, the ability to control the server configuration using the **CONFIG** command makes the client able to change the working dir of the program and the name of the dump file. This allows clients to write RDB Redis files -at random paths, that is a security issue that may easily lead to the ability -to run untrusted code as the same user as Redis is running. +at random paths, that is [a security issue](http://antirez.com/news/96) that may easily lead to the ability to compromise the system and/or run untrusted code as the same user as Redis is running. Redis does not requires root privileges to run. It is recommended to run it as an unprivileged *redis* user that is only used for this purpose. The Redis authors are currently investigating the possibility of adding a new -configuration parameter to prevent **CONFIG SET/GET dir** and other similar run-time -configuration directives. This would prevent clients from forcing the server to -write Redis dump files at arbitrary locations. +configuration parameter to prevent **CONFIG SET/GET dir** and other similar run-time configuration directives. This would prevent clients from forcing the server to write Redis dump files at arbitrary locations. GPG key --- From 5984d88aaf591b264098d5aad8aae956473fa2d0 Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Wed, 11 Nov 2015 15:37:29 +0100 Subject: [PATCH 0517/2314] Remove useless apostrophe --- commands/zrange.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands/zrange.md b/commands/zrange.md index f1a842e610..663de5b2a2 100644 --- a/commands/zrange.md +++ b/commands/zrange.md @@ -11,7 +11,7 @@ They can also be negative numbers indicating offsets from the end of the sorted set, with `-1` being the last element of the sorted set, `-2` the penultimate element and so on. -`start` and `stop` are **inclusive ranges**, so for example `ZRANGE myzset 0 1`` +`start` and `stop` are **inclusive ranges**, so for example `ZRANGE myzset 0 1` will return both the first and the second element of the sorted set. Out of range indexes will not produce an error. From 06fcadf2b929995a369be0e2d3d460c4a0a5e45d Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 27 Nov 2015 10:25:30 +0100 Subject: [PATCH 0518/2314] Redis Cluster Go client added. --- topics/cluster-tutorial.md | 1 + 1 file changed, 1 insertion(+) diff --git a/topics/cluster-tutorial.md b/topics/cluster-tutorial.md index cb222863cf..7329015b4e 100644 --- a/topics/cluster-tutorial.md +++ b/topics/cluster-tutorial.md @@ -361,6 +361,7 @@ I'm aware of the following implementations: * The most used Java client, [Jedis](https://github.com/xetorthio/jedis) recently added support for Redis Cluster, see the *Jedis Cluster* section in the project README. * [StackExchange.Redis](https://github.com/StackExchange/StackExchange.Redis) offers support for C# (and should work fine with most .NET languages; VB, F#, etc) * [thunk-redis](https://github.com/thunks/thunk-redis) offers support for Node.js and io.js, it is a thunk/promise-based redis client with pipelining and cluster. +* [redis-go-cluster](https://github.com/chasex/redis-go-cluster) is an implementation of Redis Cluster for the Go language using the [Redigo library client](https://github.com/garyburd/redigo) as the base client. Implements MGET/MSET via result aggregation. * The `redis-cli` utility in the unstable branch of the Redis repository at GitHub implements a very basic cluster support when started with the `-c` switch. An easy way to test Redis Cluster is either to try any of the above clients From 64a56cfb4ee2054de6fe3198eb84f086da831fcc Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 27 Nov 2015 11:31:32 +0100 Subject: [PATCH 0519/2314] LDB documentation. --- commands/eval.md | 9 ++ topics/ldb.md | 210 +++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 219 insertions(+) create mode 100644 topics/ldb.md diff --git a/commands/eval.md b/commands/eval.md index 0ef4860dfc..19b0b75434 100644 --- a/commands/eval.md +++ b/commands/eval.md @@ -599,6 +599,7 @@ The Redis Lua interpreter loads the following Lua libraries: * `cmsgpack` lib. * `bitop` lib. * `redis.sha1hex` function. +* `redis.breakpoint and redis.debug` function in the context of the [Redis Lua debugger](/topics/ldb). Every Redis instance is _guaranteed_ to have all the above libraries so you can be sure that the environment for your Redis scripts is always the same. @@ -793,3 +794,11 @@ The client library implementation should take one of the following approaches: already defined. If not, add `SCRIPT LOAD` commands on top of the pipeline as required, and use `EVALSHA` for all the `EVAL` calls. + +## Debugging Lua scripts + +Starting with Redis 3.2 (currently in beta), Redis has support for native +Lua debugging. The Redis Lua debugger is a remote debugger consisting of +a server, which is Redis itself, and a client, which is by default `redis-cli`. + +The Lua debugger is described in the [Lua scripts debugging](/topics/ldb) section of the Redis documentation. diff --git a/topics/ldb.md b/topics/ldb.md new file mode 100644 index 0000000000..d1a5448900 --- /dev/null +++ b/topics/ldb.md @@ -0,0 +1,210 @@ +# Redis Lua scripts debugger + +Starting with version 3.2 Redis includes a complete Lua debugger, that can be +used in order to make the task of writing complex Redis scripts much simpler. + +Because Redis 3.2 is still in beta, please download the `unstable` branch of Redis from Github and compile it in order to test the debugger. You can use Redis unstable in order to debug your scripts that you'll later run in a stable version of Redis, so the debugger is already usable in practical terms. + +The Redis Lua debugger, codename LDB, has the following important features: + +* It uses a server-client model, so it's a remote debugger. The Redis server acts as the debugging server, while the default client is `redis-cli`. However other clients can be developed by following the simple protocol implemented by the server. +* By default every new debugging session is a forked session. It means that while the Redis Lua script is being debugged, the server does not block and is usable for development or in order to execute multiple debugging sessions in parallel. This also means that changes are **rolled back** after the script debugging session finished, so that's possible to restart a new debugging session again, using exactly the same Redis data set as the previous debugging session. +* An alternative synchronous (non forked) debugging model is available on demand, so that changes to the dataset can be retained. In this mode the server blocks for the time the debugging session is active. +* Support for step by step execution. +* Support for static and dynamic breakpoints. +* Support from logging to the debugged script into the debugger console. +* Inspection of Lua variables. +* Tracing of Redis commands executed by the script. +* Pretty printing of Redis and Lua values. +* Infinite loops and long execution detection, which simulates a breakpoint. + +Quick start +--- + +A simple way to get started with the Lua debugger is to watch this video +introduction: + + + +**Important note:** please make sure to avoid debugging Lua scripts using your Redis production server. Use a development server instead. Also note that using the synchronous debugging mode (which is NOT the default) results into the Redis server blocking for all the time the debugging session lasts. + +To start a new debugging session using `redis-cli` do the following steps: + +1. Create your script in some file with your preferred editor. Let's assume you are editing your Redis Lua script located at `/tmp/script.lua`. +2. Start a debugging session with: + + ./redis-cli --ldb --eval /tmp/script.lua + +Note that with the `--eval` option if `redis-cli` you can pass key names and arguments to the script, separated by a comma, like in the following example: + + ./redis-cli --ldb --eval /tmp/script.lua mykey somekey , arg1 arg2 + +You'll enter a special mode where `redis-cli` no longer accepts its normal +commands, but instead prints an help screen and passes the unmodified debugging +commands directly to Redis. + +The only commands which are not passed to the Redis debugger are: + +* `quit` -- this will terminate the debugging session. It's like removing all the breakpoints and using the `continue` debugging command. Moreover the command will exit from `redis-cli`. +* `restart` -- the debugging session will restart from scratch, **reloading the new version of the script from the file**. So a normal debugging cycle involves modifying the script after some debugging, and calling `restart` in order to start debugging again with the new script changes. +* `help` -- this command is passed to the Redis Lua debugger, that will print a list of commands like the following: + +``` +lua debugger> help +Redis Lua debugger help: +[h]elp Show this help. +[s]tep Run current line and stop again. +[n]ext Alias for step. +[c]continue Run till next breakpoint. +[l]list List source code around current line. +[l]list [line] List source code around [line]. + line = 0 means: current position. +[l]list [line] [ctx] In this form [ctx] specifies how many lines + to show before/after [line]. +[w]hole List all source code. Alias for 'list 1 1000000'. +[p]rint Show all the local variables. +[p]rint Show the value of the specified variable. + Can also show global vars KEYS and ARGV. +[b]reak Show all breakpoints. +[b]reak Add a breakpoint to the specified line. +[b]reak - Remove breakpoint from the specified line. +[b]reak 0 Remove all breakpoints. +[t]race Show a backtrace. +[e]eval Execute some Lua code (in a different callframe). +[r]edis Execute a Redis command. +[m]axlen [len] Trim logged Redis replies and Lua var dumps to len. + Specifying zero as means unlimited. +[a]abort Stop the execution of the script. In sync + mode dataset changes will be retained. + +Debugger functions you can call from Lua scripts: +redis.debug() Produce logs in the debugger console. +redis.breakpoint() Stop execution like if there was a breakpoing. + in the next line of code. +``` + +Note that when you start the debugger it will start in **steppign mode**. It will stop at the first line of the script that actually does something, and stops before executing this line. + +From this point you usually call `step` in order to execute the line and go to the next line. While you step Redis will show all the commands executed by the server like in the following example: + +``` +* Stopped at 1, stop reason = step over +-> 1 redis.call('ping') +lua debugger> step + ping + "+PONG" +* Stopped at 2, stop reason = step over +``` + +The `` and `` lines show the command executed by the line just +executed, and the reply from the server. Note that this happens only in stepping mode. If you use `continue` in order to execute the script till the next breakpoint, commands will not be dumped on the screen to prevent too much output. + +Termination of the debugging session +--- + +When the scripts terminates naturally, the debugging session ends and +`redis-cli` returns in its normal non debugging mode. You can restart the +session using the `restart` command as usually. + +Another way to stop a debugging session is just interrupting `redis-cli` +manually by pressing `Ctrl+C`. Note that also any event breaking the +connection between `redis-cli` and the `redis-server` will interrupt the +debugging session. + +All the forked debugging sessions are terminated when the server is shut +down. + +Abbreviating debugging commands +--- + +Debugging can be a very repetitive task. For this reason every Redis +debugger command starts with a different character, and you can use the single +initial character in order to refer to the command. + +So for example instead of typing `step` you can just type `s`. + +Breakpoints +--- + +Adding and removing breakpoints is trivial as described in the online help. +Just use `b 1 2 3 4` to add a breakpoint in line 1, 2, 3, 4. +The command `b 0` removes all the breakpoints. Selected breakpoints can be +removed using as argument the line where the breakpoint we want to remove is, but prefixed by a minus sign. So for example `b -3` removes the breakpoint from line 3. + +Note that adding breakpoints to lines that Lua never executes, like declaration of local variables or comments, will not work. The breakpoint will be added but since this part of the script will never be executed, the program will never stop. + +Dynamic breakpoints +--- + +Using the `breakpoint` command it is possible to add breakpoints into specific +lines. However sometimes we want to stop the execution of the program only +when something special happens. In order to do so, you can use the +`redis.breakpoint()` function. When called it simulates a breakpoint in the +next line that will be executed. + + if counter > 10 then redis.breakpoint() end + +This feature is extremely useful when debugging, so that we can avoid to +continue the script execution manually multiple times until a given condition +is encountered. + +Synchronous mode +--- + +As explained previously, but default LDB uses forked sessions with rollback +of all the data changes operated by the script while it has being debugged. +Determinism is usually a good thing to have during debugging, so that successive +debugging sessions can be started without having to reset the database content +to its original state. + +However for tracking certain bugs, you may want to retain the changes performed +to the key space by each debugging session. When this is a good idea you +should start the debugger using a special option in `redis-cli`. + + ./redis-cli --ldb --ldb-sync-mode --eval /tmp/script.lua + +**Note that the Redis server remains not reachable during the debugging session in this mode**, so use with care. + +In this special mode, the `abort` command can stop the script half-way taking the changes operated to the dataset. Note that this is different compared to ending the debugging session normally. If you just interrupt `redis-cli` the script will be fully executed and then the session terminated. Instead with `abort` you can interrupt the script execution in the middle and start a new debugging session if needed. + +Logging from scripts +--- + +The `redis.debug()` command is a powerful debugging facility that can be +called inside the Redis Lua script in order to log things into the debug +console: + +``` +lua debugger> list +-> 1 local a = {1,2,3} + 2 local b = false + 3 redis.debug(a,b) +lua debugger> continue + line 3: {1; 2; 3}, false +``` + +If the script is executed outside of a debugging session, `redis.debug()` has no effects at all. Note that the function accepts multiple arguments, that are separated by a comma and a space in the output. + +Tables and nested tables are displayed correctly in order to make values simple to observe for the programmer debugging the script. + +Inspecting the program state with `print` and `eval` +--- + +While the `redis.debug()` function can be used in order to print values +directly from within the Lua script, often it is useful to observe the local +variables of a program while stepping or when stopped into a breakpoint. + +The `print` command does just that, and performs lookup in the call frames +starting from the current one back to the previous ones, up to top-level. +This means that even if we are into a nested function inside a Lua script, +we can still use `print foo` to look at the value of `foo` in the context +of the calling function. + +The `eval` command executes small pieces of Lua scripts **but not in the context of the current call frame**, which is not possible with the current Lua internals. However you can use this command in order to test Lua functions. + +``` +lua debugger> e redis.sha1hex('foo') + "0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33" +``` + + From 7edc8f833c8408e00265729978bfcd6afead5ba6 Mon Sep 17 00:00:00 2001 From: Itamar Haber Date: Fri, 27 Nov 2015 16:22:10 +0200 Subject: [PATCH 0520/2314] Documentation edits --- topics/ldb.md | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/topics/ldb.md b/topics/ldb.md index d1a5448900..de9f63dbae 100644 --- a/topics/ldb.md +++ b/topics/ldb.md @@ -12,7 +12,7 @@ The Redis Lua debugger, codename LDB, has the following important features: * An alternative synchronous (non forked) debugging model is available on demand, so that changes to the dataset can be retained. In this mode the server blocks for the time the debugging session is active. * Support for step by step execution. * Support for static and dynamic breakpoints. -* Support from logging to the debugged script into the debugger console. +* Support from logging the debugged script into the debugger console. * Inspection of Lua variables. * Tracing of Redis commands executed by the script. * Pretty printing of Redis and Lua values. @@ -35,7 +35,7 @@ To start a new debugging session using `redis-cli` do the following steps: ./redis-cli --ldb --eval /tmp/script.lua -Note that with the `--eval` option if `redis-cli` you can pass key names and arguments to the script, separated by a comma, like in the following example: +Note that with the `--eval` option of `redis-cli` you can pass key names and arguments to the script, separated by a comma, like in the following example: ./redis-cli --ldb --eval /tmp/script.lua mykey somekey , arg1 arg2 @@ -83,7 +83,7 @@ redis.breakpoint() Stop execution like if there was a breakpoing. in the next line of code. ``` -Note that when you start the debugger it will start in **steppign mode**. It will stop at the first line of the script that actually does something, and stops before executing this line. +Note that when you start the debugger it will start in **stepping mode**. It will stop at the first line of the script that actually does something before executing it. From this point you usually call `step` in order to execute the line and go to the next line. While you step Redis will show all the commands executed by the server like in the following example: @@ -103,7 +103,7 @@ Termination of the debugging session --- When the scripts terminates naturally, the debugging session ends and -`redis-cli` returns in its normal non debugging mode. You can restart the +`redis-cli` returns in its normal non-debugging mode. You can restart the session using the `restart` command as usually. Another way to stop a debugging session is just interrupting `redis-cli` @@ -139,8 +139,8 @@ Dynamic breakpoints Using the `breakpoint` command it is possible to add breakpoints into specific lines. However sometimes we want to stop the execution of the program only when something special happens. In order to do so, you can use the -`redis.breakpoint()` function. When called it simulates a breakpoint in the -next line that will be executed. +`redis.breakpoint()` function inside your Lua script. When called it simulates +a breakpoint in the next line that will be executed. if counter > 10 then redis.breakpoint() end @@ -159,11 +159,11 @@ to its original state. However for tracking certain bugs, you may want to retain the changes performed to the key space by each debugging session. When this is a good idea you -should start the debugger using a special option in `redis-cli`. +should start the debugger using a special option, `ldb-sync-mode`, in `redis-cli`. - ./redis-cli --ldb --ldb-sync-mode --eval /tmp/script.lua + ./redis-cli --ldb-sync-mode --eval /tmp/script.lua -**Note that the Redis server remains not reachable during the debugging session in this mode**, so use with care. +**Note that the Redis server will be unreachable during the debugging session in this mode**, so use with care. In this special mode, the `abort` command can stop the script half-way taking the changes operated to the dataset. Note that this is different compared to ending the debugging session normally. If you just interrupt `redis-cli` the script will be fully executed and then the session terminated. Instead with `abort` you can interrupt the script execution in the middle and start a new debugging session if needed. @@ -198,13 +198,12 @@ The `print` command does just that, and performs lookup in the call frames starting from the current one back to the previous ones, up to top-level. This means that even if we are into a nested function inside a Lua script, we can still use `print foo` to look at the value of `foo` in the context -of the calling function. +of the calling function. When called without a variable name, `print` will +print all variables and their respective values. -The `eval` command executes small pieces of Lua scripts **but not in the context of the current call frame**, which is not possible with the current Lua internals. However you can use this command in order to test Lua functions. +The `eval` command executes small pieces of Lua scripts **outside the context of the current call frame** (evaluating inside the context of the current call frame is not possible with the current Lua internals). However you can use this command in order to test Lua functions. ``` lua debugger> e redis.sha1hex('foo') "0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33" ``` - - From 4fbe1bb80fc0484792f97da410480cfc17ac1f70 Mon Sep 17 00:00:00 2001 From: Itamar Haber Date: Sun, 29 Nov 2015 16:41:49 +0200 Subject: [PATCH 0521/2314] List of commands allowed for subscriber + typo "reply"->"replies" --- topics/pubsub.md | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/topics/pubsub.md b/topics/pubsub.md index f4294dd832..3ed37ee138 100644 --- a/topics/pubsub.md +++ b/topics/pubsub.md @@ -23,9 +23,12 @@ to all the subscribed clients. A client subscribed to one or more channels should not issue commands, although it can subscribe and unsubscribe to and from other channels. -The reply of the `SUBSCRIBE` and `UNSUBSCRIBE` operations are sent in -the form of messages, so that the client can just read a coherent stream -of messages where the first element indicates the type of message. +The replies to subscription and unsubscription operations are sent in +the form of messages, so that the client can just read a coherent +stream of messages where the first element indicates the type of +message. The commands that are allowed in the context of a subscribed +client are `SUBSCRIBE`, `PSUBSCRIBE`, `UNSUBSCRIBE`, `PUNSUBSCRIBE`, +`PING` and `QUIT`. ## Format of pushed messages From 51d1d51801c7ca7dd6746840bb72427255a78f45 Mon Sep 17 00:00:00 2001 From: t-mw Date: Tue, 1 Dec 2015 17:32:25 +0000 Subject: [PATCH 0522/2314] Clarify which events clear EXPIRE timeout --- commands/expire.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/commands/expire.md b/commands/expire.md index 250692b743..192ffb6860 100644 --- a/commands/expire.md +++ b/commands/expire.md @@ -3,8 +3,9 @@ After the timeout has expired, the key will automatically be deleted. A key with an associated timeout is often said to be _volatile_ in Redis terminology. -The timeout is cleared only when the key is removed using the `DEL` command or -overwritten using the `SET` or `GETSET` commands. +The timeout will only be cleared by commands that delete or overwrite the +contents of the key, including `DEL`, `SET`, `GETSET` and all the `*STORE` +commands. This means that all the operations that conceptually _alter_ the value stored at the key without replacing it with a new one will leave the timeout untouched. For instance, incrementing the value of a key with `INCR`, pushing a new value From 7ebf3adb4c288a29b38e4de14f275406473ed5c0 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 11 Dec 2015 09:52:24 +0100 Subject: [PATCH 0523/2314] Fix stated default values for hash-max-ziplist. --- topics/memory-optimization.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/topics/memory-optimization.md b/topics/memory-optimization.md index 2267439efc..b7e61c8dbf 100644 --- a/topics/memory-optimization.md +++ b/topics/memory-optimization.md @@ -8,8 +8,8 @@ Since Redis 2.2 many data types are optimized to use less space up to a certain This is completely transparent from the point of view of the user and API. Since this is a CPU / memory trade off it is possible to tune the maximum number of elements and maximum element size for special encoded types using the following redis.conf directives. - hash-max-zipmap-entries 64 (hash-max-ziplist-entries for Redis >= 2.6) - hash-max-zipmap-value 512 (hash-max-ziplist-value for Redis >= 2.6) + hash-max-zipmap-entries 512 (hash-max-ziplist-entries for Redis >= 2.6) + hash-max-zipmap-value 64 (hash-max-ziplist-value for Redis >= 2.6) list-max-ziplist-entries 512 list-max-ziplist-value 64 zset-max-ziplist-entries 128 From 7904013cd8cad3feda0ffed410f765de14299a24 Mon Sep 17 00:00:00 2001 From: antirez Date: Sun, 13 Dec 2015 10:42:11 +0100 Subject: [PATCH 0524/2314] Instructions about using Redis Cluster with Docker. --- topics/cluster-tutorial.md | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/topics/cluster-tutorial.md b/topics/cluster-tutorial.md index 7329015b4e..a9f47f03d5 100644 --- a/topics/cluster-tutorial.md +++ b/topics/cluster-tutorial.md @@ -64,6 +64,21 @@ The cluster bus uses a different, binary protocol, for node to node data exchange, which is more suited to exchange information between nodes using little bandwidth and processing time. +Redis Cluster and Docker +--- + +Currently Redis Cluster does not support NATted environments and in general +environments where IP addresses or TCP ports are remapped. + +Docker uses a technique called *port mapping*: programs running inside Docker +containers may be exposed with a different port compared to the one the +program believes to be using. This is useful in order to run multiple +containers using the same ports, at the same time, in the same server. + +In order to make Docker compatible with Redis Cluster you need to use +the **host networking mode** of Docker. Please check the `--net=host` option +in the [Docker documentation](https://docs.docker.com/engine/userguide/networking/dockernetworks/) for more information. + Redis Cluster data sharding --- From 3d688157599701104d2300e8c1776397f97ede9f Mon Sep 17 00:00:00 2001 From: Anton Davydov Date: Fri, 4 Dec 2015 00:04:00 +0300 Subject: [PATCH 0525/2314] Add mruby redis client to list --- clients.json | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/clients.json b/clients.json index cbea891fab..30961003fe 100644 --- a/clients.json +++ b/clients.json @@ -1312,5 +1312,14 @@ "description": "Modern C++11 Redis client based on boost::asio", "authors": ["simon_ninon"], "active": true + }, + + { + "name": "mruby-redis", + "language": "mruby", + "repository": "https://github.com/matsumoto-r/mruby-redis", + "description": "Redis class for mruby based on Hiredis", + "authors": [], + "active": true } ] From 8e983b5d24b2b78c4d472e8c7bebfc3c00e1f11c Mon Sep 17 00:00:00 2001 From: "MATSUMOTO, Ryosuke" Date: Wed, 16 Dec 2015 13:43:43 +0900 Subject: [PATCH 0526/2314] Update mruby redis client of list --- clients.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clients.json b/clients.json index 30961003fe..428fe6ce5e 100644 --- a/clients.json +++ b/clients.json @@ -1319,7 +1319,7 @@ "language": "mruby", "repository": "https://github.com/matsumoto-r/mruby-redis", "description": "Redis class for mruby based on Hiredis", - "authors": [], + "authors": ["matsumotory"], "active": true } ] From 76c58f421f80469d9f18630f1a1a558f8f133623 Mon Sep 17 00:00:00 2001 From: Itamar Haber Date: Sat, 19 Dec 2015 06:49:55 -0800 Subject: [PATCH 0527/2314] Fixes a couple of typos --- commands/eval.md | 4 ++-- topics/ldb.md | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/commands/eval.md b/commands/eval.md index 19b0b75434..dde33f3ced 100644 --- a/commands/eval.md +++ b/commands/eval.md @@ -149,8 +149,8 @@ As you can see 3.333 is converted into 3, and the *bar* string is never returned There are two helper functions to return Redis types from Lua. -* `redis.error_reply(error_string)` returns an error reply. This function simply returns the single field table with the `err` field set to the specified string for you. -* `redis.status_reply(status_string)` returns a status reply. This function simply returns the single field table with the `ok` field set to the specified string for you. +* `redis.error_reply(error_string)` returns an error reply. This function simply returns a single field table with the `err` field set to the specified string for you. +* `redis.status_reply(status_string)` returns a status reply. This function simply returns a single field table with the `ok` field set to the specified string for you. There is no difference between using the helper functions or directly returning the table with the specified format, so the following two forms are equivalent: diff --git a/topics/ldb.md b/topics/ldb.md index de9f63dbae..04800238a9 100644 --- a/topics/ldb.md +++ b/topics/ldb.md @@ -79,8 +79,8 @@ Redis Lua debugger help: Debugger functions you can call from Lua scripts: redis.debug() Produce logs in the debugger console. -redis.breakpoint() Stop execution like if there was a breakpoing. - in the next line of code. +redis.breakpoint() Stop execution as if there was a breakpoint in the + next line of code. ``` Note that when you start the debugger it will start in **stepping mode**. It will stop at the first line of the script that actually does something before executing it. From 13b44c0a20e04983caba6013e8fecd7d85850009 Mon Sep 17 00:00:00 2001 From: Paul Kuruvilla Date: Tue, 22 Dec 2015 14:57:01 +0530 Subject: [PATCH 0528/2314] Fix active/passive terminology in EXPIRE docs, fixes #491 --- commands/expire.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands/expire.md b/commands/expire.md index 192ffb6860..db9add01d1 100644 --- a/commands/expire.md +++ b/commands/expire.md @@ -125,7 +125,7 @@ lasting for 1000 seconds. Redis keys are expired in two ways: a passive way, and an active way. -A key is actively expired simply when some client tries to access it, and the +A key is passively expired simply when some client tries to access it, and the key is found to be timed out. Of course this is not enough as there are expired keys that will never be From c103dc0b409c3a8ae4ebb8741c8db8fba2a5f651 Mon Sep 17 00:00:00 2001 From: Itamar Haber Date: Wed, 23 Dec 2015 06:45:12 -0800 Subject: [PATCH 0529/2314] Added 3.2's `master` type to the `CLIENT KILL` command --- commands.json | 2 +- commands/client-kill.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/commands.json b/commands.json index 0dc5fdada2..413a880cc7 100644 --- a/commands.json +++ b/commands.json @@ -172,7 +172,7 @@ { "command": "TYPE", "type": "enum", - "enum": ["normal", "slave", "pubsub"], + "enum": ["normal", "master", "slave", "pubsub"], "optional": true }, { diff --git a/commands/client-kill.md b/commands/client-kill.md index 72d8470ffb..879231ac05 100644 --- a/commands/client-kill.md +++ b/commands/client-kill.md @@ -14,7 +14,7 @@ instead of killing just by address. The following filters are available: * `CLIENT KILL ADDR ip:port`. This is exactly the same as the old three-arguments behavior. * `CLIENT KILL ID client-id`. Allows to kill a client by its unique `ID` field, which was introduced in the `CLIENT LIST` command starting from Redis 2.8.12. -* `CLIENT KILL TYPE type`, where *type* is one of `normal`, `slave`, `pubsub`. This closes the connections of **all the clients** in the specified class. Note that clients blocked into the `MONITOR` command are considered to belong to the `normal` class. +* `CLIENT KILL TYPE type`, where *type* is one of `normal`, `master`, `slave` and `pubsub` (the `master` type is available from v3.2). This closes the connections of **all the clients** in the specified class. Note that clients blocked into the `MONITOR` command are considered to belong to the `normal` class. * `CLIENT KILL SKIPME yes/no`. By default this option is set to `yes`, that is, the client calling the command will not get killed, however setting this option to `no` will have the effect of also killing the client calling the command. It is possible to provide multiple filters at the same time. The command will handle multiple filters via logical AND. For example: From 53404b15d28cc37b031df1239b6400f78b478ea9 Mon Sep 17 00:00:00 2001 From: Itamar Haber Date: Wed, 23 Dec 2015 07:57:09 -0800 Subject: [PATCH 0530/2314] Added 3.2's `CLIENT REPLY` command --- commands.json | 13 +++++++++++++ commands/client-reply.md | 13 +++++++++++++ 2 files changed, 26 insertions(+) create mode 100644 commands/client-reply.md diff --git a/commands.json b/commands.json index 413a880cc7..1c5ee6e74e 100644 --- a/commands.json +++ b/commands.json @@ -215,6 +215,19 @@ "since": "2.9.50", "group": "server" }, + "CLIENT REPLY": { + "summary": "Instruct the server whether to reply to commands", + "complexity": "O(1)", + "arguments": [ + { + "name": "reply-mode", + "type": "enum", + "enum": ["ON", "OFF", "SKIP"] + } + ], + "since": "3.2", + "group": "server" + }, "CLIENT SETNAME": { "summary": "Set the current connection name", "complexity": "O(1)", diff --git a/commands/client-reply.md b/commands/client-reply.md new file mode 100644 index 0000000000..b73a9d66d5 --- /dev/null +++ b/commands/client-reply.md @@ -0,0 +1,13 @@ +Sometimes it can be useful for clients to completely disable replies from the Redis server. For example when the client sends fire and forget commands or performs a mass loading of data, or in caching contexts where new data is streamed constantly. In such contexts to use server time and bandwidth in order to send back replies to clients, which are going to be ignored, is a shame. + +The `CLIENT REPLY` command controls whether the server will reply the client's commands. The following modes are available: + +* `ON`. This is the default mode in which the server returns a reply to every command. +* `OFF`. In this mode the server will not reply to client commands. +* `SKIP`. This mode skips the reply of command immediately after it. + +@return + +When called with either `OFF` or `SKIP` subcommands, no reply is made. When called with `ON`: + +@simple-string-reply: `OK` if the connection name was successfully set. From bb4ec232f71994562c0b22568cb9f9f698ec37a3 Mon Sep 17 00:00:00 2001 From: Zach Tellman Date: Sat, 26 Dec 2015 14:26:19 -0800 Subject: [PATCH 0531/2314] Remove Aleph from client implementation list As of the latest release, it only provides TCP, HTTP, and UDP protocol implementations. --- clients.json | 8 -------- 1 file changed, 8 deletions(-) diff --git a/clients.json b/clients.json index 428fe6ce5e..1807a9bf36 100644 --- a/clients.json +++ b/clients.json @@ -18,14 +18,6 @@ "recommended": true, "active": true }, - { - "name": "aleph", - "language": "Clojure", - "repository": "https://github.com/ztellman/aleph", - "description": "Redis client build on top of lamina", - "authors": ["ztellman"], - "active": true - }, { "name": "CL-Redis", "language": "Common Lisp", From a52b3b16f3bcf7aabd2602e6ed09fb4afe0e6bf2 Mon Sep 17 00:00:00 2001 From: Ovan Crone Date: Mon, 21 Sep 2015 14:39:24 -0500 Subject: [PATCH 0532/2314] Added reference to ScarletLock --- topics/distlock.md | 1 + 1 file changed, 1 insertion(+) diff --git a/topics/distlock.md b/topics/distlock.md index 14d64f6c3b..f9d304794b 100644 --- a/topics/distlock.md +++ b/topics/distlock.md @@ -33,6 +33,7 @@ already available that can be used for reference. * [Redlock-cpp](https://github.com/jacket-code/redlock-cpp) (C++ implementation). * [Redlock-cs](https://github.com/kidfashion/redlock-cs) (C#/.NET implementation). * [RedLock.net](https://github.com/samcook/RedLock.net) (C#/.NET implementation). Includes async and lock extension support. +* [ScarletLock](https://github.com/psibernetic/scarletlock) (C# .NET implementation with configurable datastore) * [node-redlock](https://github.com/mike-marcacci/node-redlock) (NodeJS implementation). Includes support for lock extension. Safety and Liveness guarantees From 78750ae45768216839c58fc4328dd91f860638ce Mon Sep 17 00:00:00 2001 From: Itamar Haber Date: Tue, 29 Dec 2015 17:27:26 +0200 Subject: [PATCH 0533/2314] Update client-reply.md Removed "is a shame", which is a shame because this how @antirez talks/writes :) Removed copy-and-paste crap. --- commands/client-reply.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/commands/client-reply.md b/commands/client-reply.md index b73a9d66d5..f2c3ed8b42 100644 --- a/commands/client-reply.md +++ b/commands/client-reply.md @@ -1,4 +1,4 @@ -Sometimes it can be useful for clients to completely disable replies from the Redis server. For example when the client sends fire and forget commands or performs a mass loading of data, or in caching contexts where new data is streamed constantly. In such contexts to use server time and bandwidth in order to send back replies to clients, which are going to be ignored, is a shame. +Sometimes it can be useful for clients to completely disable replies from the Redis server. For example when the client sends fire and forget commands or performs a mass loading of data, or in caching contexts where new data is streamed constantly. In such contexts to use server time and bandwidth in order to send back replies to clients, which are going to be ignored, is considered wasteful. The `CLIENT REPLY` command controls whether the server will reply the client's commands. The following modes are available: @@ -10,4 +10,4 @@ The `CLIENT REPLY` command controls whether the server will reply the client's c When called with either `OFF` or `SKIP` subcommands, no reply is made. When called with `ON`: -@simple-string-reply: `OK` if the connection name was successfully set. +@simple-string-reply: `OK`. From c6b63dbfeaadc33a03aa7965bad74ed7a96e0925 Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Tue, 29 Dec 2015 16:43:35 +0100 Subject: [PATCH 0534/2314] nokogiri is required for remarkdown --- .gems | 1 + 1 file changed, 1 insertion(+) diff --git a/.gems b/.gems index 6f5dc2c08e..bb185cab96 100644 --- a/.gems +++ b/.gems @@ -1,2 +1,3 @@ batch -v 1.0.4 redcarpet -v 3.3.2 +nokogiri -v 1.6.7.1 From f96dfdf90cd90b7ac5d504cab80457c8a17858f0 Mon Sep 17 00:00:00 2001 From: Holger Woltersdorf Date: Tue, 29 Dec 2015 17:03:53 +0100 Subject: [PATCH 0535/2314] Added hollodotme/readis --- tools.json | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/tools.json b/tools.json index 78c0510e9a..330c3921ca 100644 --- a/tools.json +++ b/tools.json @@ -48,6 +48,14 @@ "description": "Python persistent object library based on Redis.", "authors": ["hongminhee"] }, + { + "name": "readis", + "language": "PHP", + "url": "https://readis.hollo.me", + "repository": "https://github.com/hollodotme/readis", + "description": "Lightweight web frontend in PHP for reading data, stats and config from multiple redis servers.", + "authors": ["hollodotme"] + }, { "name": "Redis-objects", "language": "Ruby", From 53687abc8a8ff79eda99fb7957ec8352582fd689 Mon Sep 17 00:00:00 2001 From: Itamar Haber Date: Wed, 30 Dec 2015 12:19:09 -0800 Subject: [PATCH 0536/2314] Adds SCRIPT DEBUG --- commands/script-debug.md | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) create mode 100644 commands/script-debug.md diff --git a/commands/script-debug.md b/commands/script-debug.md new file mode 100644 index 0000000000..d3fe44b4a8 --- /dev/null +++ b/commands/script-debug.md @@ -0,0 +1,26 @@ +Set the debug mode for subsequent scripts executed with `EVAL`. Redis includes a +complete Lua debugger, codename LDB, that can be used to make the task of +writing complex scripts much simpler. In debug mode Redis acts as a remote +debugging server and a client, such as `redis-cli`, can execute scripts step by +step, set breakpoints, inspect variables and more - for additional information +about the Redis Lua scripts debugger refer to [LDB's +documentation](/topics/ldb). + +**Important note: avoid debugging Lua scripts using your Redis production +**server. Use a development server instead. + +LDB can be enabled in one of two modes: asynchronous or synchronous. In +asynchronous mode the server creates a forked debugging session that does not +block and all changes to the data are **rolled back** after the session +finishes, so debugging can be restarted using the same initial state. The +alternative synchronous debug mode blocks the server while the debugging session +is active and retains all changes to the data set once it ends. + +* `YES`. Enable asynchronous debugging of Lua scripts (non-blocking, changes are discarded). +* `SYNC`. Enable synchronous debugging of Lua scripts (blocking and save changes). +* `NO`. Disables Lua scripts debugging. This is the default for new connections. + +@return + +@simple-string-reply: `OK`. + From 0cbab0cfea956d0eb9e35fc8b457b48a6d7b6a16 Mon Sep 17 00:00:00 2001 From: Itamar Haber Date: Thu, 31 Dec 2015 05:24:41 -0800 Subject: [PATCH 0537/2314] Add LDB to allowed words --- wordlist | 1 + 1 file changed, 1 insertion(+) diff --git a/wordlist b/wordlist index 1001731b83..0f5e354a88 100644 --- a/wordlist +++ b/wordlist @@ -51,6 +51,7 @@ IRC Inline JPEG JSON +LDB LF LLOOGG LRU From 2ebdf5d18e83c8ae588732960cfc7b73daabacde Mon Sep 17 00:00:00 2001 From: Itamar Haber Date: Thu, 31 Dec 2015 05:32:56 -0800 Subject: [PATCH 0538/2314] Wordsmithing --- commands/script-debug.md | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/commands/script-debug.md b/commands/script-debug.md index d3fe44b4a8..1b52217adf 100644 --- a/commands/script-debug.md +++ b/commands/script-debug.md @@ -3,11 +3,10 @@ complete Lua debugger, codename LDB, that can be used to make the task of writing complex scripts much simpler. In debug mode Redis acts as a remote debugging server and a client, such as `redis-cli`, can execute scripts step by step, set breakpoints, inspect variables and more - for additional information -about the Redis Lua scripts debugger refer to [LDB's -documentation](/topics/ldb). +about LDB refer to the [Redis Lua debugger](/topics/ldb) page. -**Important note: avoid debugging Lua scripts using your Redis production -**server. Use a development server instead. +**Important note:** avoid debugging Lua scripts using your Redis production +server. Use a development server instead. LDB can be enabled in one of two modes: asynchronous or synchronous. In asynchronous mode the server creates a forked debugging session that does not @@ -16,9 +15,9 @@ finishes, so debugging can be restarted using the same initial state. The alternative synchronous debug mode blocks the server while the debugging session is active and retains all changes to the data set once it ends. -* `YES`. Enable asynchronous debugging of Lua scripts (non-blocking, changes are discarded). -* `SYNC`. Enable synchronous debugging of Lua scripts (blocking and save changes). -* `NO`. Disables Lua scripts debugging. This is the default for new connections. +* `YES`. Enable non-blocking asynchronous debugging of Lua scripts (changes are discarded). +* `SYNC`. Enable blocking synchronous debugging of Lua scripts (saves changes to data). +* `NO`. Disables scripts debug mode. @return From ba046799d5b9a255d9b5c1c22ebb266e98069dfa Mon Sep 17 00:00:00 2001 From: Itamar Haber Date: Thu, 31 Dec 2015 11:39:13 -0800 Subject: [PATCH 0539/2314] Add SCRIPT DEBUG --- commands.json | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/commands.json b/commands.json index 1c5ee6e74e..6d7994d488 100644 --- a/commands.json +++ b/commands.json @@ -1934,6 +1934,19 @@ "since": "1.0.0", "group": "set" }, + "SCRIPT DEBUG": { + "summary": "Set the debug mode for executed scripts.", + "complexity": "O(1)", + "arguments": [ + { + "name": "mode", + "type": "enum", + "enum": ["ON", "SYNC", "OFF"] + } + ], + "since": "3.2.0", + "group": "scripting" + }, "SCRIPT EXISTS": { "summary": "Check existence of scripts in the script cache.", "complexity": "O(N) with N being the number of scripts to check (so checking a single script is an O(1) operation).", From 88c9f934d4336e1de45d013535661a3ed651e551 Mon Sep 17 00:00:00 2001 From: Alex Grintsvayg Date: Fri, 1 Jan 2016 17:39:52 -0500 Subject: [PATCH 0540/2314] Remove outdated PHP libraries These libraries are no longer maintained. They all have no new commits in over 2 years. --- clients.json | 28 ++-------------------------- 1 file changed, 2 insertions(+), 26 deletions(-) diff --git a/clients.json b/clients.json index 1807a9bf36..e71143357e 100644 --- a/clients.json +++ b/clients.json @@ -401,21 +401,12 @@ "active": true }, - { - "name": "RedisServer", - "language": "PHP", - "repository": "https://github.com/e-oz/Memory/blob/master/lib/Jamm/Memory/RedisServer.php", - "description": "Standalone and full-featured class for Redis in PHP", - "authors": ["eugeniyoz"] - }, - { "name": "Redisent", "language": "PHP", "repository": "https://github.com/jdp/redisent", "description": "", - "authors": ["justinpoliey"], - "active": true + "authors": ["justinpoliey"] }, { @@ -423,25 +414,10 @@ "language": "PHP", "repository": "https://github.com/colinmollenhour/credis", "description": "Lightweight, standalone, unit-tested fork of Redisent which wraps phpredis for best performance if available.", - "authors": ["colinmollenhour"] - }, - - { - "name": "Kdyby/Redis", - "language": "PHP", - "repository": "https://github.com/kdyby/redis", - "description": "Powerful Redis storage for Nette Framework", + "authors": ["colinmollenhour"], "active": true }, - { - "name": "phpish/redis", - "language": "PHP", - "repository": "https://github.com/phpish/redis", - "description": "Simple Redis client in PHP", - "authors": ["sandeepshetty"] - }, - { "name": "PHP Sentinel Client", "language": "PHP", From 30714cc45a9fe2067650f5720c2a6e11e684d813 Mon Sep 17 00:00:00 2001 From: Itamar Haber Date: Tue, 5 Jan 2016 06:28:10 -0800 Subject: [PATCH 0541/2314] Documents 3.2 pipelined migrate --- commands.json | 10 +++++++++- commands/migrate.md | 6 ++++-- 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/commands.json b/commands.json index 1c5ee6e74e..1d97cf1996 100644 --- a/commands.json +++ b/commands.json @@ -1467,7 +1467,8 @@ }, { "name": "key", - "type": "key" + "type": "enum", + "enum": ["key", "\"\""] }, { "name": "destination-db", @@ -1488,6 +1489,13 @@ "type": "enum", "enum": ["REPLACE"], "optional": true + }, + { + "name": "key", + "command": "KEYS", + "type": "key", + "variadic": true, + "optional": true } ], "since": "2.6.0", diff --git a/commands/migrate.md b/commands/migrate.md index fda904a99d..241b60ab26 100644 --- a/commands/migrate.md +++ b/commands/migrate.md @@ -5,7 +5,9 @@ exist in the target instance. The command is atomic and blocks the two instances for the time required to transfer the key, at any given time the key will appear to exist in a given -instance or in the other instance, unless a timeout error occurs. +instance or in the other instance, unless a timeout error occurs. In 3.2 and +above, multiple keys can be pipelined in a single call to `MIGRATE` by passing +the empty string ("") as key and adding the `KEYS` clause. The command internally uses `DUMP` to generate the serialized version of the key value, and `RESTORE` in order to synthesize the key in the target instance. @@ -42,7 +44,7 @@ On success OK is returned. * `COPY` -- Do not remove the key from the local instance. * `REPLACE` -- Replace existing key on the remote instance. -`COPY` and `REPLACE` will be available in 3.0 and are not available in 2.6 or 2.8 +`COPY` and `REPLACE` are available only in 3.0 and above. @return From 572d18cf3e4351e0d6f2fdfb86ea50c92191a842 Mon Sep 17 00:00:00 2001 From: xSky Date: Thu, 7 Jan 2016 13:29:49 +0800 Subject: [PATCH 0542/2314] update --- clients.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/clients.json b/clients.json index 1807a9bf36..9e0d898bbb 100644 --- a/clients.json +++ b/clients.json @@ -1291,8 +1291,9 @@ { "name": "xredis", "language": "C++", + "url": "http://xredis.0xsky.com/", "repository": "https://github.com/0xsky/xredis", - "description": "Redis C++ client with data slice storage and connection pool support, requires hiredis only", + "description": "Redis C++ client with data slice storage, connection pool, master slave connection, read/write separation; requires hiredis only", "authors": ["0xsky"], "active": true }, From ca949409ce5e0c706d6e34bc88a9f38115b1559a Mon Sep 17 00:00:00 2001 From: Itamar Haber Date: Thu, 7 Jan 2016 06:43:43 -0800 Subject: [PATCH 0543/2314] Adds Redis plugin for ZeroBrane Studio --- tools.json | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tools.json b/tools.json index 330c3921ca..4b08e923e3 100644 --- a/tools.json +++ b/tools.json @@ -548,5 +548,12 @@ "repository": "https://github.com/hedisdb/hedis", "description": "Hedis can retrieve data from **ANY** database directly via Redis", "authors": ["kewang"] + }, + { + "name": "Redis plugin for ZeroBrane Studio Lua IDE", + "language": "Lua", + "repository": "https://github.com/pkulchenko/ZeroBranePackage", + "description": "Enables support for the Redis Lua API, provides remote script execution and debugging", + "authors": ["zerobrane"] } ] From 830c480d87d1b3c86557f1898413ab9261934cd4 Mon Sep 17 00:00:00 2001 From: Itamar Haber Date: Thu, 7 Jan 2016 06:47:19 -0800 Subject: [PATCH 0544/2314] Proper cAPITALIZATION for Redis --- tools.json | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/tools.json b/tools.json index 4b08e923e3..a4f2fd3561 100644 --- a/tools.json +++ b/tools.json @@ -38,7 +38,7 @@ "name": "Kombu", "language": "Python", "repository": "https://github.com/celery/kombu", - "description": "Python AMQP Framework with redis support", + "description": "Python AMQP Framework with Redis support", "authors": [] }, { @@ -53,7 +53,7 @@ "language": "PHP", "url": "https://readis.hollo.me", "repository": "https://github.com/hollodotme/readis", - "description": "Lightweight web frontend in PHP for reading data, stats and config from multiple redis servers.", + "description": "Lightweight web frontend in PHP for reading data, stats and config from multiple Redis servers.", "authors": ["hollodotme"] }, { @@ -81,14 +81,14 @@ "name": "Rdb-parser", "language": "Javascript", "repository": "https://github.com/pconstr/rdb-parser", - "description": "node.js asynchronous streaming parser for redis RDB database dumps.", + "description": "node.js asynchronous streaming parser for Redis RDB database dumps.", "authors": ["pconstr"] }, { "name": "Redis-sync", "language": "Javascript", "repository": "https://github.com/pconstr/redis-sync", - "description": "A node.js redis replication slave toolkit", + "description": "A node.js Redis replication slave toolkit", "authors": ["pconstr"] }, { @@ -130,14 +130,14 @@ "name": "Redmon", "language": "Ruby", "repository": "https://github.com/steelThread/redmon", - "description": "A web interface for managing redis: cli, admin, and live monitoring.", + "description": "A web interface for managing Redis: cli, admin, and live monitoring.", "authors": ["steel_thread"] }, { "name": "Rollout", "language": "Ruby", "repository": "https://github.com/FetLife/rollout", - "description": "Conditionally roll out features with redis.", + "description": "Conditionally roll out features with Redis.", "authors": ["jamesgolick"] }, { @@ -180,14 +180,14 @@ "name": "Omhiredis", "language": "C", "repository": "http://www.rsyslog.com/doc/build_from_repo.html", - "description": "redis output plugin for rsyslog (rsyslog dev, and rsyslog head).", + "description": "Redis output plugin for rsyslog (rsyslog dev, and rsyslog head).", "authors": ["taotetek"] }, { "name": "Mod_redis", "language": "C", "repository": "https://github.com/sneakybeaky/mod_redis", - "description": "An Apache HTTPD module for speaking to redis via HTTP", + "description": "An Apache HTTPD module for speaking to Redis via HTTP", "authors": [] }, { @@ -257,7 +257,7 @@ "name": "Recurrent", "language": "Javascript", "repository": "https://github.com/pconstr/recurrent", - "description": "A redis-backed manager of recurrent jobs, for node.js", + "description": "A Redis-backed manager of recurrent jobs, for node.js", "authors": ["pconstr"] }, { @@ -293,7 +293,7 @@ "name": "redis-tcl", "language": "Tcl", "repository" : "https://github.com/bradvoth/redis-tcl", - "description" : "Tcl library largely copied from the redis test tree, modified for minor bug fixes and expanded pub/sub capabilities", + "description" : "Tcl library largely copied from the Redis test tree, modified for minor bug fixes and expanded pub/sub capabilities", "authors" : ["bradvoth","antirez"] }, { @@ -379,7 +379,7 @@ "name": "Redis-RdbParser", "language": "Perl", "repository": "https://github.com/flygoast/Redis-RdbParser", - "description": "Redis-RdbParser is a streaming parser for redis RDB database dumps.", + "description": "Redis-RdbParser is a streaming parser for Redis RDB database dumps.", "authors": [] }, { @@ -394,7 +394,7 @@ "language": "Web", "repository": "https://github.com/Redsmin/redsmin", "url":"https://redsmin.com/", - "description": "A fully featured Redis GUI for managing and monitoring redis.", + "description": "A fully featured Redis GUI for managing and monitoring Redis.", "authors": ["fgribreau"] }, { @@ -486,7 +486,7 @@ "language": "Python", "url": "https://github.com/no13bus/redispapa", "repository": "https://github.com/no13bus/redispapa", - "description": "RedisPAPA is a redis monitor which watch the redis-info by using flask, angular, socket.io", + "description": "RedisPAPA is a Redis monitor which watches `INFO` by using flask, angular, socket.io", "authors": ["no13bus"] }, { @@ -502,7 +502,7 @@ "language": "C++", "url": "https://github.com/zhengshuxin/acl/tree/master/app/redis_tools/redis_builder", "repository": "https://github.com/zhengshuxin/acl/tree/master/app/redis_tools/redis_builder", - "description": "A C++ redis tool to create and manage a redis cluster, basing on acl redis lib in https://github.com/zhengshuxin/acl/tree/master/lib_acl_cpp/samples/redis", + "description": "A C++ Redis tool to create and manage a Redis cluster, basing on acl Redis lib in https://github.com/zhengshuxin/acl/tree/master/lib_acl_cpp/samples/redis", "authors": ["zhengshuxin"] }, { @@ -524,7 +524,7 @@ "language": "C", "url": "http://www.jacketzhong.com/?p=220", "repository": "https://github.com/jacket-code/redisPlatform", - "description": "A rpc platform that base on redis, You can use it to do a lot of things, it can be a game server", + "description": "A rpc platform that base on Redis, You can use it to do a lot of things, it can be a game server", "authors": ["jacketzhong"] }, { From 96ae3efe14f34ab5beb2a4be016a797bcd1831a9 Mon Sep 17 00:00:00 2001 From: Itamar Haber Date: Thu, 7 Jan 2016 08:20:38 -0800 Subject: [PATCH 0545/2314] More information in debugging clients --- topics/ldb.md | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/topics/ldb.md b/topics/ldb.md index 04800238a9..4749e28b69 100644 --- a/topics/ldb.md +++ b/topics/ldb.md @@ -207,3 +207,33 @@ The `eval` command executes small pieces of Lua scripts **outside the context of lua debugger> e redis.sha1hex('foo') "0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33" ``` + +Debugging clients +--- + +LDB uses the client-server model where the Redis servers acts as a debugging server that communicates using [RESP](/topics/protocol). While `redis-cli` is the default debug client, any [client](/clients) can be used for debugging as long as it meets one of the following conditions: + +1. The client provides a native interface for setting the debug mode and controlling the debug session. +2. The client provides an interface for sending arbitrary commands over RESP. +3. The client allows sending raw messages to the Redis server. + +For example, the [Redis plugin](https://redislabs.com/blog/zerobrane-studio-plugin-for-redis-lua-scripts) for [ZeroBrane Studio](http://studio.zerobrane.com/) integrates with LDB using [redis-lua](https://github.com/nrk/redis-lua). The following Lua code is a simplified example of how the plugin achieves that: + +```Lua +local redis = require 'redis' + +-- add LDB's Continue command +redis.commands['ldbcontinue'] = redis.command('C') + +-- script to be debugged +local script = [[ + local x, y = tonumber(ARGV[1]), tonumber(ARGV[2]) + local result = x * y + return result +]] + +local client = redis.connect('127.0.0.1', 6379) +client:script("DEBUG", "YES") +print(unpack(client:eval(script, 0, 6, 9))) +client:ldbcontinue() +``` From 079c0ec0177a4aed491eb4dcaae3be696db9b366 Mon Sep 17 00:00:00 2001 From: Itamar Haber Date: Tue, 12 Jan 2016 12:53:32 -0800 Subject: [PATCH 0546/2314] Adds details about NOKEY reply --- commands/migrate.md | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/commands/migrate.md b/commands/migrate.md index 241b60ab26..62bb2c3a61 100644 --- a/commands/migrate.md +++ b/commands/migrate.md @@ -37,7 +37,9 @@ When any other error is returned (starting with `ERR`) `MIGRATE` guarantees that the key is still only present in the originating instance (unless a key with the same name was also _already_ present on the target instance). -On success OK is returned. +If there are no keys to migrate in the source instance NOKEY is returned. +Because missing keys are possible in normal conditions, from expiry for example, +NOKEY isn't an error. ## Options @@ -48,4 +50,5 @@ On success OK is returned. @return -@simple-string-reply: The command returns OK on success. +@simple-string-reply: The command returns OK on success, or NOKEY if no keys were +found in the source instance. From 4a3a777e25a459cfea021fe61409fed91611e938 Mon Sep 17 00:00:00 2001 From: Raj Shah Date: Tue, 17 Nov 2015 11:27:01 -0800 Subject: [PATCH 0547/2314] Add Pottery Python Redis client --- clients.json | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/clients.json b/clients.json index 1807a9bf36..c20e2123e1 100644 --- a/clients.json +++ b/clients.json @@ -1313,5 +1313,14 @@ "description": "Redis class for mruby based on Hiredis", "authors": ["matsumotory"], "active": true + }, + + { + "name": "Pottery", + "language": "Python", + "repository": "https://github.com/brainix/pottery", + "description": "High level Pythonic dict, set, and list like containers around Redis data types (Python 3 only)", + "authors": ["brainix"], + "active": true } ] From 3793ac4a877c36035eddb8109b46590133331ab6 Mon Sep 17 00:00:00 2001 From: Itamar Haber Date: Wed, 13 Jan 2016 00:47:09 -0800 Subject: [PATCH 0548/2314] Adds backticks to NOKEY --- commands/migrate.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/commands/migrate.md b/commands/migrate.md index 62bb2c3a61..388920841a 100644 --- a/commands/migrate.md +++ b/commands/migrate.md @@ -37,9 +37,9 @@ When any other error is returned (starting with `ERR`) `MIGRATE` guarantees that the key is still only present in the originating instance (unless a key with the same name was also _already_ present on the target instance). -If there are no keys to migrate in the source instance NOKEY is returned. +If there are no keys to migrate in the source instance `NOKEY` is returned. Because missing keys are possible in normal conditions, from expiry for example, -NOKEY isn't an error. +`NOKEY` isn't an error. ## Options @@ -50,5 +50,5 @@ NOKEY isn't an error. @return -@simple-string-reply: The command returns OK on success, or NOKEY if no keys were +@simple-string-reply: The command returns OK on success, or `NOKEY` if no keys were found in the source instance. From 0f0db1548bef3e005e775ead79b5b9c13be4baed Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 22 Jan 2016 16:08:55 +0100 Subject: [PATCH 0549/2314] Document that ZREM is the GEODEL people seek for. --- commands/geoadd.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/commands/geoadd.md b/commands/geoadd.md index f95a1e4d33..e7d42b8f47 100644 --- a/commands/geoadd.md +++ b/commands/geoadd.md @@ -11,6 +11,8 @@ limits, as specified by EPSG:900913 / EPSG:3785 / OSGEO:41001 are the following: The command will report an error when the user attempts to index coordinates outside the specified ranges. +**Note:** this command has no a symmetric **GEODEL** command simply because you can use `ZREM` in order to remove elements from the sorted set, and the Geo index structure is just a sorted set. + How does it work? --- From 9a899886297bac9da247a7fb095514e312c287a2 Mon Sep 17 00:00:00 2001 From: Alexander Cheprasov Date: Sun, 24 Jan 2016 22:32:23 +0000 Subject: [PATCH 0550/2314] Added clinet cheprasov/php-redis-client for PHP --- clients.json | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/clients.json b/clients.json index 70a033544a..1957c620ec 100644 --- a/clients.json +++ b/clients.json @@ -1323,5 +1323,14 @@ "description": "High level Pythonic dict, set, and list like containers around Redis data types (Python 3 only)", "authors": ["brainix"], "active": true + }, + + { + "name": "cheprasov/php-redis-client", + "language": "PHP", + "repository": "https://github.com/cheprasov/php-redis-client", + "description": "Fast, fully-functional and user-friendly client, optimized for performance.", + "authors": ["cheprasov84"], + "active": true } ] From e4fd8c6a9e372066f819c5df86f109599c44b0ac Mon Sep 17 00:00:00 2001 From: Alexander Cheprasov Date: Sun, 24 Jan 2016 22:41:14 +0000 Subject: [PATCH 0551/2314] fix script debug params --- commands.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands.json b/commands.json index 4482e067a9..3d11290308 100644 --- a/commands.json +++ b/commands.json @@ -1949,7 +1949,7 @@ { "name": "mode", "type": "enum", - "enum": ["ON", "SYNC", "OFF"] + "enum": ["YES", "SYNC", "NO"] } ], "since": "3.2.0", From 06d7f4ecfd417b83e4527f37d90c0b2d27b35797 Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Sun, 24 Jan 2016 18:13:39 -0700 Subject: [PATCH 0552/2314] Mark geodel as known word --- wordlist | 1 + 1 file changed, 1 insertion(+) diff --git a/wordlist b/wordlist index 0f5e354a88..321f73d590 100644 --- a/wordlist +++ b/wordlist @@ -28,6 +28,7 @@ Facebook Fsyncing GCC GDB +GEODEL GETs GHz GPG From df95e78521559f66de4d25b9a69a2bbb035ba3e2 Mon Sep 17 00:00:00 2001 From: Gautier TANGUY Date: Thu, 28 Jan 2016 00:15:13 +0100 Subject: [PATCH 0553/2314] add redis-client library for Rust --- clients.json | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/clients.json b/clients.json index 1957c620ec..f6a6304a68 100644 --- a/clients.json +++ b/clients.json @@ -923,6 +923,14 @@ "active": true }, + { + "name": "redis-client", + "language": "Rust", + "repository": "https://github.com/AsoSunag/redis-client", + "description": "A Redis client library for Rust.", + "authors": ["gtanguy"] + }, + { "name": "redic", "language": "Ruby", From 3b836180e5a3907983eb6e02a7c2f33638d36a82 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 29 Jan 2016 12:06:26 +0100 Subject: [PATCH 0554/2314] CLUSTER SLOTS doc updated with node IDs. --- commands/cluster-slots.md | 33 ++++++++++++++++++++++++++++++++- 1 file changed, 32 insertions(+), 1 deletion(-) diff --git a/commands/cluster-slots.md b/commands/cluster-slots.md index daa71933d1..39d745b6b7 100644 --- a/commands/cluster-slots.md +++ b/commands/cluster-slots.md @@ -28,11 +28,13 @@ If a cluster instance has non-contiguous slots (e.g. 1-400,900,1800-6000) then master and replica IP/Port results will be duplicated for each top-level slot range reply. +**Warning:** Newer versions of Redis Cluster will output, for each Redis instance, not just the IP and port, but also the node ID as third element of the array. In future versions there could be more elements describing the node better. In general a client implementation should just rely on the fact that certain parameters are at fixed positions as specified, but more parameters may follow and should be ignored. Similarly a client library should try if possible to cope with the fact that older versions may just have the IP and port parameter. + @return @array-reply: nested list of slot ranges with IP/Port mappings. -### Sample Output +### Sample Output (old version) ``` 127.0.0.1:7001> cluster slots 1) 1) (integer) 0 @@ -62,3 +64,32 @@ slot range reply. ``` +### Sample Output (new version, includes IDs) +``` +127.0.0.1:30001> cluster slots +1) 1) (integer) 0 + 2) (integer) 5460 + 3) 1) "127.0.0.1" + 2) (integer) 30001 + 3) "09dbe9720cda62f7865eabc5fd8857c5d2678366" + 4) 1) "127.0.0.1" + 2) (integer) 30004 + 3) "821d8ca00d7ccf931ed3ffc7e3db0599d2271abf" +2) 1) (integer) 5461 + 2) (integer) 10922 + 3) 1) "127.0.0.1" + 2) (integer) 30002 + 3) "c9d93d9f2c0c524ff34cc11838c2003d8c29e013" + 4) 1) "127.0.0.1" + 2) (integer) 30005 + 3) "faadb3eb99009de4ab72ad6b6ed87634c7ee410f" +3) 1) (integer) 10923 + 2) (integer) 16383 + 3) 1) "127.0.0.1" + 2) (integer) 30003 + 3) "044ec91f325b7595e76dbcb18cc688b6a5b434a1" + 4) 1) "127.0.0.1" + 2) (integer) 30006 + 3) "58e6e48d41228013e5d9c1c37c5060693925e97e" +``` + From c610a1fff4dd3c9164db47dad78134a556d1f07c Mon Sep 17 00:00:00 2001 From: Gautier Tanguy Date: Fri, 29 Jan 2016 16:03:38 +0100 Subject: [PATCH 0555/2314] remove wrong twitter account --- clients.json | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/clients.json b/clients.json index f6a6304a68..06f72adb54 100644 --- a/clients.json +++ b/clients.json @@ -927,8 +927,7 @@ "name": "redis-client", "language": "Rust", "repository": "https://github.com/AsoSunag/redis-client", - "description": "A Redis client library for Rust.", - "authors": ["gtanguy"] + "description": "A Redis client library for Rust." }, { From a549e1b337506e765ce62794dce5e02f7e77b049 Mon Sep 17 00:00:00 2001 From: Ke Date: Mon, 1 Feb 2016 08:55:21 +1300 Subject: [PATCH 0556/2314] Update clients.json Added Lasso Redis client. --- clients.json | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/clients.json b/clients.json index 06f72adb54..ecc90fd992 100644 --- a/clients.json +++ b/clients.json @@ -1339,5 +1339,15 @@ "description": "Fast, fully-functional and user-friendly client, optimized for performance.", "authors": ["cheprasov84"], "active": true + }, + + { + "name": "lasso-redis", + "language": "Lasso", + "repository": "https://github.com/Zeroloop/lasso-redis", + "description": "High performance Redis client for Lasso, supports pub/sub and piping.", + "authors": ["Ke-"], + "active": true } + ] From 4870bc97a86a4e207415a5bca3f86f87e0f31fac Mon Sep 17 00:00:00 2001 From: Ke Date: Mon, 1 Feb 2016 10:17:34 +1300 Subject: [PATCH 0557/2314] Update clients.json Removed Github handle --- clients.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clients.json b/clients.json index ecc90fd992..b054f5d34d 100644 --- a/clients.json +++ b/clients.json @@ -1346,7 +1346,7 @@ "language": "Lasso", "repository": "https://github.com/Zeroloop/lasso-redis", "description": "High performance Redis client for Lasso, supports pub/sub and piping.", - "authors": ["Ke-"], + "authors": [], "active": true } From f4708121d37999f06ae450e660f67cf4d27c1e82 Mon Sep 17 00:00:00 2001 From: Lion Yang Date: Tue, 2 Feb 2016 23:35:09 +0800 Subject: [PATCH 0558/2314] fix typo in data-type-intro.md --- topics/data-types-intro.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/data-types-intro.md b/topics/data-types-intro.md index 9133d8987b..4c8336d9ff 100644 --- a/topics/data-types-intro.md +++ b/topics/data-types-intro.md @@ -711,7 +711,7 @@ sorted set elements, with their year of birth as "score". > zadd hackers 1940 "Alan Kay" (integer) 1 > zadd hackers 1957 "Sophie Wilson" - (integer 1) + (integer) 1 > zadd hackers 1953 "Richard Stallman" (integer) 1 > zadd hackers 1949 "Anita Borg" From f86c504c25c15e18b6aa4d295c45de8efe6127bd Mon Sep 17 00:00:00 2001 From: Jordan Evans Date: Wed, 3 Feb 2016 16:38:54 -0800 Subject: [PATCH 0559/2314] fix sentinel ascii art box alignment --- topics/sentinel.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/sentinel.md b/topics/sentinel.md index ef1d377ecf..e2266586d3 100644 --- a/topics/sentinel.md +++ b/topics/sentinel.md @@ -177,7 +177,7 @@ format, this is what the different symbols means: +--------------------+ | This is a computer | | or VM that fails | - | independently. We | + | independently. We | | call it a "box" | +--------------------+ From 352b52a45047583185ef6fbd7ebd44a9aa78110f Mon Sep 17 00:00:00 2001 From: JXU Date: Thu, 4 Feb 2016 11:24:18 +0800 Subject: [PATCH 0560/2314] add a c++ redis client --- clients.json | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/clients.json b/clients.json index b054f5d34d..bd1e976306 100644 --- a/clients.json +++ b/clients.json @@ -1348,6 +1348,14 @@ "description": "High performance Redis client for Lasso, supports pub/sub and piping.", "authors": [], "active": true - } + }, + { + "name": "c+redis+client", + "language": "C++", + "repository": "https://github.com/shawn246/redis_client", + "description": "A redis client based on hiredis, supports cluster/pipeline and is thread safe and includes two files only. The transaction is on the way:)", + "authors": ["shawn"], + "active": true + } ] From 8a851d3e79bff7eef8daf745a358699e232f3440 Mon Sep 17 00:00:00 2001 From: Curtis Forrester Date: Thu, 4 Feb 2016 11:21:35 -0500 Subject: [PATCH 0561/2314] Spelling of "usual" as "unusally" was usual. --- topics/data-types.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/data-types.md b/topics/data-types.md index f67c225c6b..285488fbe2 100644 --- a/topics/data-types.md +++ b/topics/data-types.md @@ -81,7 +81,7 @@ You can do many interesting things using Redis Sets, for instance you can: * You can use Sets to extract elements at random using the [SPOP](/commands/spop) or [SRANDMEMBER](/commands/srandmember) commands. -As usually check the [full list of Set commands](/commands#set) for more information, or read the [introduction to Redis data types](/topics/data-types-intro). +As usual, check the [full list of Set commands](/commands#set) for more information, or read the [introduction to Redis data types](/topics/data-types-intro). Hashes From e7472597843a35a1ed5d898a68759fd7bd3c77bd Mon Sep 17 00:00:00 2001 From: Itamar Haber Date: Thu, 4 Feb 2016 13:59:54 -0800 Subject: [PATCH 0562/2314] Adds notification for `LREM` operations Dependent on the merging of https://github.com/antirez/redis/pull/3059 --- topics/notifications.md | 1 + 1 file changed, 1 insertion(+) diff --git a/topics/notifications.md b/topics/notifications.md index 0a392f35e2..0884d720fe 100644 --- a/topics/notifications.md +++ b/topics/notifications.md @@ -110,6 +110,7 @@ Different commands generate different kind of events according to the following * `LPOP` generates an `lpop` event. Additionally a `del` event is generated if the key is removed because the last element from the list was popped. * `LINSERT` generates an `linsert` event. * `LSET` generates an `lset` event. +* `LREM` generates an `lrem` event, and additionally a `del` event if the resulting list is empty and the key is removed. * `LTRIM` generates an `ltrim` event, and additionally a `del` event if the resulting list is empty and the key is removed. * `RPOPLPUSH` and `BRPOPLPUSH` generate an `rpop` event and an `lpush` event. In both cases the order is guaranteed (the `lpush` event will always be delivered after the `rpop` event). Additionally a `del` event will be generated if the resulting list is zero length and the key is removed. * `HSET`, `HSETNX` and `HMSET` all generate a single `hset` event. From 0915485a857206e672e0a3e7861be5035b427a99 Mon Sep 17 00:00:00 2001 From: Itamar Haber Date: Thu, 4 Feb 2016 14:33:56 -0800 Subject: [PATCH 0563/2314] Corrects a typo --- commands.json | 2 +- commands/cluster-nodes.md | 2 +- commands/cluster-setslot.md | 8 ++++---- commands/hincrbyfloat.md | 2 +- commands/scan.md | 4 ++-- topics/pubsub.md | 2 +- topics/security.md | 2 +- topics/twitter-clone.md | 12 ++++++------ 8 files changed, 17 insertions(+), 17 deletions(-) diff --git a/commands.json b/commands.json index 3d11290308..55b56fc5ad 100644 --- a/commands.json +++ b/commands.json @@ -417,7 +417,7 @@ "group": "cluster" }, "CLUSTER SETSLOT": { - "summary": "Bind an hash slot to a specific node", + "summary": "Bind a hash slot to a specific node", "complexity": "O(1)", "arguments": [ { diff --git a/commands/cluster-nodes.md b/commands/cluster-nodes.md index fcc79a481f..87985efce8 100644 --- a/commands/cluster-nodes.md +++ b/commands/cluster-nodes.md @@ -45,7 +45,7 @@ The meaning of each filed is the following: 6. `pong-recv`: Milliseconds unix time the last pong was received. 7. `config-epoch`: The configuration epoch (or version) of the current node (or of the current master if the node is a slave). Each time there is a failover, a new, unique, monotonically increasing configuration epoch is created. If multiple nodes claim to serve the same hash slots, the one with higher configuration epoch wins. 8. `link-state`: The state of the link used for the node-to-node cluster bus. We use this link to communicate with the node. Can be `connected` or `disconnected`. -9. `slot`: An hash slot number or range. Starting from argument number 9, but there may be up to 16384 entries in total (limit never reached). This is the list of hash slots served by this node. If the entry is just a number, is parsed as such. If it is a range, it is in the form `start-end`, and means that the node is responsible for all the hash slots from `start` to `end` including the start and end values. +9. `slot`: A hash slot number or range. Starting from argument number 9, but there may be up to 16384 entries in total (limit never reached). This is the list of hash slots served by this node. If the entry is just a number, is parsed as such. If it is a range, it is in the form `start-end`, and means that the node is responsible for all the hash slots from `start` to `end` including the start and end values. Meaning of the flags (field number 3): diff --git a/commands/cluster-setslot.md b/commands/cluster-setslot.md index 6364f7e597..f819abc006 100644 --- a/commands/cluster-setslot.md +++ b/commands/cluster-setslot.md @@ -1,11 +1,11 @@ -`CLUSTER SETSLOT` is responsible of changing the state of an hash slot in the receiving node in different ways. It can, depending on the subcommand used: +`CLUSTER SETSLOT` is responsible of changing the state of a hash slot in the receiving node in different ways. It can, depending on the subcommand used: 1. `MIGRATING` subcommand: Set a hash slot in *migrating* state. 2. `IMPORTING` subcommand: Set a hash slot in *importing* state. 3. `STABLE` subcommand: Clear any importing / migrating state from hash slot. 4. `NODE` subcommand: Bind the hash slot to a different node. -The command with its set of subcommands is useful in order to start and end cluster live resharding operations, which are accomplished by setting an hash slot in migrating state in the source node, and importing state in the destination node. +The command with its set of subcommands is useful in order to start and end cluster live resharding operations, which are accomplished by setting a hash slot in migrating state in the source node, and importing state in the destination node. Each subcommand is documented below. At the end you'll find a description of how live resharding is performed using this command and other related commands. @@ -35,7 +35,7 @@ When a slot is set in importing state, the node changes behavior in the followin In this way when a node in migrating state generates an `ASK` redirection, the client contacts the target node, sends `ASKING`, and immediately after sends the command. This way commands about non-existing keys in the old node or keys already migrated to the target node are executed in the target node, so that: -1. New keys are always created in the target node. During an hash slot migration we'll have to move only old keys, not new ones. +1. New keys are always created in the target node. During a hash slot migration we'll have to move only old keys, not new ones. 2. Commands about keys already migrated are correctly processed in the context of the node which is the target of the migration, the new hash slot owner, in order to guarantee consistency. 3. Without `ASKING` the behavior is the same as usually. This guarantees that clients with a broken hash slots mapping will not write for error in the target node, creating a new version of a key that has yet to be migrated. @@ -56,7 +56,7 @@ command: 1. If the current hash slot owner is the node receiving the command, but for effect of the command the slot would be assigned to a different node, the command will return an error if there are still keys for that hash slot in the node receiving the command. 2. If the slot is in *migrating* state, the state gets cleared when the slot is assigned to another node. -3. If the slot was in *importing* state in the node receiving the command, and the command assigns the slot to this node (which happens in the target node at the end of the resharding of an hash slot from one node to another), the command has the following side effects: A) the *importing* state is cleared. B) If the node config epoch is not already the greatest of the cluster, it generates a new one and assigns the new config epoch to itself. This way its new hash slot ownership will win over any past configuration created by previous failovers or slot migrations. +3. If the slot was in *importing* state in the node receiving the command, and the command assigns the slot to this node (which happens in the target node at the end of the resharding of a hash slot from one node to another), the command has the following side effects: A) the *importing* state is cleared. B) If the node config epoch is not already the greatest of the cluster, it generates a new one and assigns the new config epoch to itself. This way its new hash slot ownership will win over any past configuration created by previous failovers or slot migrations. It is important to note that step 3 is the only time when a Redis Cluster node will create a new config epoch without agreement from other nodes. This only happens when a manual configuration is operated. However it is impossible that this creates a non-transient setup where two nodes have the same config epoch, since Redis Cluster uses a config epoch collision resolution algorithm. diff --git a/commands/hincrbyfloat.md b/commands/hincrbyfloat.md index 07bb16d848..f5cacc3a96 100644 --- a/commands/hincrbyfloat.md +++ b/commands/hincrbyfloat.md @@ -1,4 +1,4 @@ -Increment the specified `field` of an hash stored at `key`, and representing a +Increment the specified `field` of a hash stored at `key`, and representing a floating point number, by the specified `increment`. If the field does not exist, it is set to `0` before performing the operation. An error is returned if one of the following conditions occur: diff --git a/commands/scan.md b/commands/scan.md index ccbd0944d5..940e11811f 100644 --- a/commands/scan.md +++ b/commands/scan.md @@ -75,7 +75,7 @@ However there is a way for the user to tune the order of magnitude of the number While `SCAN` does not provide guarantees about the number of elements returned at every iteration, it is possible to empirically adjust the behavior of `SCAN` using the **COUNT** option. Basically with COUNT the user specified the *amount of work that should be done at every call in order to retrieve elements from the collection*. This is **just an hint** for the implementation, however generally speaking this is what you could expect most of the times from the implementation. * The default COUNT value is 10. -* When iterating the key space, or a Set, Hash or Sorted Set that is big enough to be represented by an hash table, assuming no **MATCH** option is used, the server will usually return *count* or a bit more than *count* elements per call. +* When iterating the key space, or a Set, Hash or Sorted Set that is big enough to be represented by a hash table, assuming no **MATCH** option is used, the server will usually return *count* or a bit more than *count* elements per call. * When iterating Sets encoded as intsets (small sets composed of just integers), or Hashes and Sorted Sets encoded as ziplists (small hashes and sets composed of small individual values), usually all the elements are returned in the first `SCAN` call regardless of the COUNT value. Important: **there is no need to use the same COUNT value** for every iteration. The caller is free to change the count from one iteration to the other as required, as long as the cursor passed in the next call is the one obtained in the previous call to the command. @@ -173,7 +173,7 @@ This is easy to see intuitively: if the collection grows there is more and more ## Additional examples -Iteration of an Hash value. +Iteration of a Hash value. ``` redis 127.0.0.1:6379> hmset hash name Jack age 33 diff --git a/topics/pubsub.md b/topics/pubsub.md index 3ed37ee138..f20284cf94 100644 --- a/topics/pubsub.md +++ b/topics/pubsub.md @@ -178,7 +178,7 @@ Because all the messages received contain the original subscription causing the message delivery (the channel in the case of message type, and the original pattern in the case of pmessage type) client libraries may bind the original subscription to callbacks (that can be anonymous -functions, blocks, function pointers), using an hash table. +functions, blocks, function pointers), using a hash table. When a message is received an O(1) lookup can be done in order to deliver the message to the registered callback. diff --git a/topics/security.md b/topics/security.md index 6d5bfd51fe..851017bf2d 100644 --- a/topics/security.md +++ b/topics/security.md @@ -121,7 +121,7 @@ the ability to insert data into Redis that triggers pathological (worst case) algorithm complexity on data structures implemented inside Redis internals. For instance an attacker could supply, via a web form, a set of strings that -is known to hash to the same bucket into an hash table in order to turn the +is known to hash to the same bucket into a hash table in order to turn the O(1) expected time (the average time) to the O(N) worst case, consuming more CPU than expected, and ultimately causing a Denial of Service. diff --git a/topics/twitter-clone.md b/topics/twitter-clone.md index 1378882066..792636ed8c 100644 --- a/topics/twitter-clone.md +++ b/topics/twitter-clone.md @@ -158,7 +158,7 @@ collection of fields associated with values: `HMSET` can be used to set fields in the hash, that can be retrieved with `HGET` later. It is possible to check if a field exists with `HEXISTS`, or -to increment an hash field with `HINCRBY` and so forth. +to increment a hash field with `HINCRBY` and so forth. Hashes are the ideal data structure to represent *objects*. For example we use Hashes in order to represent Users and Updates in our Twitter clone. @@ -183,11 +183,11 @@ Let's start with Users. We need to represent users, of course, with their userna INCR next_user_id => 1000 HMSET user:1000 username antirez password p1pp0 -*Note: you should use an hashed password in a real application, for simplicity +*Note: you should use a hashed password in a real application, for simplicity we store the password in clear text.* -We use the `next_user_id` key in order to always get an unique ID for every new user. Then we use this unique ID to name the key holding an Hash with user's data. *This is a common design pattern* with key-values stores! Keep it in mind. -Besides the fields already defined, we need some more stuff in order to fully define a User. For example, sometimes it can be useful to be able to get the user ID from the username, so every time we add an user, we also populate the `users` key, which is an Hash, with the username as field, and its ID as value. +We use the `next_user_id` key in order to always get an unique ID for every new user. Then we use this unique ID to name the key holding a Hash with user's data. *This is a common design pattern* with key-values stores! Keep it in mind. +Besides the fields already defined, we need some more stuff in order to fully define a User. For example, sometimes it can be useful to be able to get the user ID from the username, so every time we add an user, we also populate the `users` key, which is a Hash, with the username as field, and its ID as value. HSET users antirez 1000 @@ -235,7 +235,7 @@ an `auth` field in its Hash: HSET user:1000 auth fea5e81ac8ca77622bed1c2132a021f9 Moreover, we need a way to map authentication secrets to user IDs, so -we also take an `auths` key, which has as value an Hash type mapping +we also take an `auths` key, which has as value a Hash type mapping authentication secrets to user IDs. HSET auths fea5e81ac8ca77622bed1c2132a021f9 1000 @@ -339,7 +339,7 @@ Updates, also known as posts, are even simpler. In order to create a new post in INCR next_post_id => 10343 HMSET post:10343 user_id $owner_id time $time body "I'm having fun with Retwis" -As you can see each post is just represented by an Hash with three fields. The ID of the user owning the post, the time at which the post was published, and finally the body of the post, which is, the actual status message. +As you can see each post is just represented by a Hash with three fields. The ID of the user owning the post, the time at which the post was published, and finally the body of the post, which is, the actual status message. After we create a post and we obtain the post ID, we need to LPUSH the ID in the timeline of every user that is following the author of the post, and of course in the list of posts of the author itself (everybody is virtually following herself/himself). This is the file `post.php` that shows how this is performed: From 6c8eb3010f666bce7a5a4b9f12da1f53a2f6e93b Mon Sep 17 00:00:00 2001 From: Itamar Haber Date: Fri, 5 Feb 2016 14:17:28 +0200 Subject: [PATCH 0564/2314] Removes semicolon from Lua code While using a semicolon after each statement is legit in Lua, it isn't needed nor customary. --- commands/eval.md | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/commands/eval.md b/commands/eval.md index dde33f3ced..50f1cb4ce0 100644 --- a/commands/eval.md +++ b/commands/eval.md @@ -491,7 +491,7 @@ or `SRANDMEMBER` commands inside your scripts freely at any place. In order to enable script effects replication, you need to issue the following Lua command before any write operated by the script: - redis.replicate_commands(); + redis.replicate_commands() The function returns true if the script effects replication was enabled, otherwise if the function was called after the script already called @@ -524,10 +524,10 @@ an error if called when script effects replication is disabled. The command can be called with four different arguments: - redis.set_repl(redis.REPL_ALL); -- Replicte to AOF and slaves. - redis.set_repl(redis.REPL_AOF); -- Replicte only to AOF. - redis.set_repl(redis.REPL_SLAVE); -- Replicte only to slaves. - redis.set_repl(redis.REPL_NONE); -- Don't replicate at all. + redis.set_repl(redis.REPL_ALL) -- Replicte to AOF and slaves. + redis.set_repl(redis.REPL_AOF) -- Replicte only to AOF. + redis.set_repl(redis.REPL_SLAVE) -- Replicte only to slaves. + redis.set_repl(redis.REPL_NONE) -- Don't replicate at all. By default the scripting engine is always set to `REPL_ALL`. By calling this function the user can switch on/off AOF and or slaves replication, and @@ -535,12 +535,12 @@ turn them back later at her/his wish. A simple example follows: - redis.replicate_commands(); -- Enable effects replication. - redis.call('set','A','1'); - redis.set_repl(redis.REPL_NONE); - redis.call('set','B','2'); - redis.set_repl(redis.REPL_ALL); - redis.call('set','C','3'); + redis.replicate_commands() -- Enable effects replication. + redis.call('set','A','1') + redis.set_repl(redis.REPL_NONE) + redis.call('set','B','2') + redis.set_repl(redis.REPL_ALL) + redis.call('set','C','3') After running the above script, the result is that only keys A and C will be created on slaves and AOF. From f99290e02a9af1abcc0edc40b0151c726583c314 Mon Sep 17 00:00:00 2001 From: Curtis Forrester Date: Fri, 5 Feb 2016 07:42:28 -0500 Subject: [PATCH 0565/2314] Spelling, rewording updates --- topics/data-types-intro.md | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/topics/data-types-intro.md b/topics/data-types-intro.md index 4c8336d9ff..cfb0e3d328 100644 --- a/topics/data-types-intro.md +++ b/topics/data-types-intro.md @@ -29,8 +29,8 @@ It's not always trivial to grasp how these data types work and what to use in order to solve a given problem from the [command reference](/commands), so this document is a crash course to Redis data types and their most common patterns. -For all the examples we'll use the `redis-cli` utility, that's a simple but -handy command line utility to issue commands against the Redis server. +For all the examples we'll use the `redis-cli` utility, a simple but +handy command-line utility, to issue commands against the Redis server. Redis keys --- @@ -41,11 +41,11 @@ The empty string is also a valid key. A few other rules about keys: -* Very long keys are not a good idea, for instance a key of 1024 bytes is a bad +* Very long keys are not a good idea. For instance a key of 1024 bytes is a bad idea not only memory-wise, but also because the lookup of the key in the dataset may require several costly key-comparisons. Even when the task at hand - is to match the existence of a large value, to resort to hashing it (for example - with SHA1) is a better idea, especially from the point of view of memory + is to match the existence of a large value, hashing it (for example + with SHA1) is a better idea, especially from the perspective of memory and bandwidth. * Very short keys are often not a good idea. There is little point in writing "u1000flw" as a key if you can instead write "user:1000:followers". The latter @@ -572,7 +572,7 @@ elements. As you can see they are not sorted -- Redis is free to return the elements in any order at every call, since there is no contract with the user about element ordering. -Redis has commands to test for membership. Does a given element exist? +Redis has commands to test for membership. For example, checking if an element exists: > sismember myset 3 (integer) 1 @@ -587,14 +587,14 @@ For instance we can easily use sets in order to implement tags. A simple way to model this problem is to have a set for every object we want to tag. The set contains the IDs of the tags associated with the object. -Imagine we want to tag news. -If our news ID 1000 is tagged with tags 1, 2, 5 and 77, we can have one set -associating our tag IDs with the news item: +One illustration is tagging news articles. +If article ID 1000 is tagged with tags 1, 2, 5 and 77, a set +can associate these tag IDs with the news item: > sadd news:1000:tags 1 2 5 77 (integer) 4 -However sometimes I may want to have the inverse relation as well: the list +We may also want to have the inverse relation as well: the list of all the news tagged with a given tag: > sadd tag:1:news 1000 @@ -626,7 +626,7 @@ sets. We can use: > sinter tag:1:news tag:2:news tag:10:news tag:27:news ... results here ... -Intersection is not the only operation performed, you can also perform +In addition to intersection you can also perform unions, difference, extract a random element, and so forth. The command to extract an element is called `SPOP`, and is handy to model @@ -672,7 +672,7 @@ Now I'm ready to provide the first player with five cards: One pair of jacks, not great... -Now it's a good time to introduce the set command that provides the number +This is a good time to introduce the set command that provides the number of elements inside a set. This is often called the *cardinality of a set* in the context of set theory, so the Redis command is called `SCARD`. From abd5cf4ca7cb3c514904cbe0cfb3f6806d404013 Mon Sep 17 00:00:00 2001 From: Phil Eaton Date: Sat, 6 Feb 2016 19:55:10 -0500 Subject: [PATCH 0566/2314] Update client.json with go-redis/redis client --- clients.json | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/clients.json b/clients.json index b054f5d34d..3222f7e894 100644 --- a/clients.json +++ b/clients.json @@ -89,6 +89,15 @@ "active": true }, + { + "name": "go-redis/redis", + "language": "Go", + "repository": "https://github.com/go-redis/redis" + "description": "Redis client for Golang.", + "authors": [], + "active": true + }, + { "name": "Go-Redis", "language": "Go", From 7a74d10f91f3143fbc0447e2a0589aa444466a5e Mon Sep 17 00:00:00 2001 From: huangz1990 Date: Wed, 10 Feb 2016 16:41:29 +0800 Subject: [PATCH 0567/2314] Fix a bug in cluster document. --- topics/cluster-tutorial.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/cluster-tutorial.md b/topics/cluster-tutorial.md index a9f47f03d5..b129cd17a7 100644 --- a/topics/cluster-tutorial.md +++ b/topics/cluster-tutorial.md @@ -94,7 +94,7 @@ so for example you may have a cluster with 3 nodes, where: * Node A contains hash slots from 0 to 5500. * Node B contains hash slots from 5501 to 11000. -* Node C contains hash slots from 11001 to 16384. +* Node C contains hash slots from 11001 to 16383. This allows to add and remove nodes in the cluster easily. For example if I want to add a new node D, I need to move some hash slot from nodes A, B, C From 24e1472f0915796669be26d2fdf8caf2ca3ed97b Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 10 Feb 2016 11:52:17 +0100 Subject: [PATCH 0568/2314] Update Redlock page with Martin Kleppmann analysis. --- topics/distlock.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/topics/distlock.md b/topics/distlock.md index f9d304794b..b3f05e7613 100644 --- a/topics/distlock.md +++ b/topics/distlock.md @@ -214,3 +214,8 @@ Want to help? If you are into distributed systems, it would be great to have your opinion / analysis. Also reference implementations in other languages could be great. Thanks in advance! + +Analysis of Redlock +--- + +1. Martin Kleppmann [analyzed Redlock here](http://martin.kleppmann.com/2016/02/08/how-to-do-distributed-locking.html). I disagree with the analysis and posted [my reply to his analysis here](http://antirez.com/news/101). From 71a109cc0558c24ff8156aae1ffe9a750eb23a4e Mon Sep 17 00:00:00 2001 From: "nikolay.bondarenko" Date: Fri, 12 Feb 2016 10:47:06 +0300 Subject: [PATCH 0569/2314] Fix hash encoding in object encoding command --- commands/object.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/commands/object.md b/commands/object.md index 87aee77de6..7820db602f 100644 --- a/commands/object.md +++ b/commands/object.md @@ -29,8 +29,8 @@ Objects can be encoded in different ways: * Sets can be encoded as `intset` or `hashtable`. The `intset` is a special encoding used for small sets composed solely of integers. -* Hashes can be encoded as `zipmap` or `hashtable`. - The `zipmap` is a special encoding used for small hashes. +* Hashes can be encoded as `ziplist` or `hashtable`. + The `ziplist` is a special encoding used for small hashes. * Sorted Sets can be encoded as `ziplist` or `skiplist` format. As for the List type small sorted sets can be specially encoded using `ziplist`, while the `skiplist` encoding is the one that works with sorted From 183233644fb8b3106241adc87eeaa2c6a7d6a350 Mon Sep 17 00:00:00 2001 From: jeff martinez Date: Sat, 13 Feb 2016 12:25:27 -0800 Subject: [PATCH 0570/2314] fix a small grammar mistake in twitter clone tutorial --- topics/twitter-clone.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/twitter-clone.md b/topics/twitter-clone.md index 792636ed8c..acb050774d 100644 --- a/topics/twitter-clone.md +++ b/topics/twitter-clone.md @@ -369,7 +369,7 @@ After we create a post and we obtain the post ID, we need to LPUSH the ID in the The core of the function is the `foreach` loop. We use `ZRANGE` to get all the followers of the current user, then the loop will LPUSH the push the post in every follower timeline List. -Note that we also maintain a global timeline for all the posts, so that in the Retwis home page we can show everybody's updates easily. This requires just doing an `LPUSH` to the `timeline` List. Let's face it, aren't you start thinking it was a bit strange to have to sort things added in chronological order using `ORDER BY` with SQL? I think so. +Note that we also maintain a global timeline for all the posts, so that in the Retwis home page we can show everybody's updates easily. This requires just doing an `LPUSH` to the `timeline` List. Let's face it, aren't you starting to think it was a bit strange to have to sort things added in chronological order using `ORDER BY` with SQL? I think so. There is an interesting thing to notice in the code above: we use a new command called `LTRIM` after we perform the `LPUSH` operation in the global From b0883e5a125c8d7ec50e0ba247a65f4e4e6b2fb7 Mon Sep 17 00:00:00 2001 From: jeff martinez Date: Sat, 13 Feb 2016 12:30:50 -0800 Subject: [PATCH 0571/2314] fix a second grammar issue --- topics/twitter-clone.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/twitter-clone.md b/topics/twitter-clone.md index acb050774d..9dd37bc351 100644 --- a/topics/twitter-clone.md +++ b/topics/twitter-clone.md @@ -421,7 +421,7 @@ instead.* Following users --- -It is not hard, but we did not yet checked how we create following / follower relationships. If user ID 1000 (antirez) wants to follow user ID 5000 (pippo), we need to create both a following and a follower relationship. We just need to `ZADD` calls: +It is not hard, but we did not yet check how we create following / follower relationships. If user ID 1000 (antirez) wants to follow user ID 5000 (pippo), we need to create both a following and a follower relationship. We just need to `ZADD` calls: ZADD following:1000 5000 ZADD followers:5000 1000 From a210ad5e06e82300a1dc4e504f7386ab54f0087d Mon Sep 17 00:00:00 2001 From: Honza Dvorsky Date: Sat, 13 Feb 2016 23:47:19 +0100 Subject: [PATCH 0572/2314] Added Redbird - a new Swift client compatible with OS X and Linux --- clients.json | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/clients.json b/clients.json index b054f5d34d..36860b88d9 100644 --- a/clients.json +++ b/clients.json @@ -1269,6 +1269,15 @@ "active": true }, + { + "name": "Redbird", + "language": "Swift", + "repository": "https://github.com/czechboy0/Redbird", + "description": "Pure-Swift implementation of a Redis client from the original protocol spec (OS X + Linux compatible)", + "authors": ["czechboy0"], + "active": true + }, + { "name": "Rackdis", "language": "Racket", From 3d1eb96fc712979fb3c87fb280835edfc4e223c3 Mon Sep 17 00:00:00 2001 From: Olivier Lemasle Date: Sun, 14 Feb 2016 16:53:31 +0100 Subject: [PATCH 0573/2314] Remove flag "active" to some inactive clients Definition of "active" is taken from http://redis.io/clients (activity on the official repository within the latest 6 months) --- clients.json | 51 +++++++++++++++++---------------------------------- 1 file changed, 17 insertions(+), 34 deletions(-) diff --git a/clients.json b/clients.json index b054f5d34d..2fac2ea60d 100644 --- a/clients.json +++ b/clients.json @@ -58,8 +58,7 @@ "language": "Erlang", "repository": "https://github.com/jeremyong/sharded_eredis", "description": "Wrapper around eredis providing process pools and consistent hashing.", - "authors": ["jeremyong"], - "active": true + "authors": ["jeremyong"] }, { @@ -67,8 +66,7 @@ "language": "Erlang", "repository": "git://git.tideland.biz/errc", "description": "A comfortable Redis client for Erlang/OTP support pooling, pub/sub and transactions.", - "authors": ["themue"], - "active": true + "authors": ["themue"] }, { @@ -85,8 +83,7 @@ "language": "Fancy", "repository": "https://github.com/bakkdoor/redis.fy", "description": "A Fancy Redis client library", - "authors": ["bakkdoor"], - "active": true + "authors": ["bakkdoor"] }, { @@ -94,8 +91,7 @@ "language": "Go", "repository": "https://github.com/alphazero/Go-Redis", "description": "Google Go Client and Connectors for Redis.", - "authors": ["SunOf27"], - "active": true + "authors": ["SunOf27"] }, { @@ -133,8 +129,7 @@ "language": "Go", "repository": "https://github.com/simonz05/godis", "description": "A Redis client for Go.", - "authors": [], - "active": true + "authors": [] }, { @@ -152,8 +147,7 @@ "language": "Go", "repository": "https://github.com/xuyu/goredis", "description": "A redis client for golang with full features", - "authors": ["xuyu"], - "active": true + "authors": ["xuyu"] }, { @@ -170,8 +164,7 @@ "language": "Go", "repository": "https://github.com/shipwire/redis", "description": "A Redis client focused on streaming, with support for a print-like API, pipelining, Pub/Sub, and connection pooling.", - "authors": ["stephensearles"], - "active": true + "authors": ["stephensearles"] }, { @@ -246,8 +239,7 @@ "language": "Java", "repository": "https://github.com/spullara/redis-protocol", "description": "Up to 2.6 compatible high-performance Java, Java w/Netty & Scala (finagle) client", - "authors": ["spullara"], - "active": true + "authors": ["spullara"] }, { @@ -272,8 +264,7 @@ "repository": "https://github.com/nrk/redis-lua", "description": "", "authors": ["JoL1hAHN"], - "recommended": true, - "active": true + "recommended": true }, { @@ -281,8 +272,7 @@ "language": "Lua", "repository": "https://github.com/agladysh/lua-hiredis", "description": "Lua bindings for the hiredis library", - "authors": ["agladysh"], - "active": true + "authors": ["agladysh"] }, { @@ -320,8 +310,7 @@ "language": "Perl", "url": "http://search.cpan.org/dist/Redis-hiredis/", "description": "Perl binding for the hiredis C client", - "authors": ["neophenix"], - "active": true + "authors": ["neophenix"] }, { @@ -349,8 +338,7 @@ "url": "http://search.cpan.org/dist/AnyEvent-Hiredis/", "repository": "https://github.com/wjackson/AnyEvent-Hiredis", "description": "Non-blocking client using the hiredis C library", - "authors": [], - "active": true + "authors": [] }, { @@ -359,8 +347,7 @@ "url": "http://search.cpan.org/dist/Mojo-Redis/", "repository": "https://github.com/marcusramberg/mojo-redis", "description": "asynchronous Redis client for Mojolicious", - "authors": ["und3f", "marcusramberg", "jhthorsen"], - "active": true + "authors": ["und3f", "marcusramberg", "jhthorsen"] }, { @@ -397,8 +384,7 @@ "url": "http://rediska.geometria-lab.net", "repository": "https://github.com/Shumkov/Rediska", "description": "", - "authors": ["shumkov"], - "active": true + "authors": ["shumkov"] }, { @@ -414,8 +400,7 @@ "language": "PHP", "repository": "https://github.com/jdp/redisent", "description": "", - "authors": ["justinpoliey"], - "active": true + "authors": ["justinpoliey"] }, { @@ -456,8 +441,7 @@ "language": "PHP", "repository": "https://github.com/swoole/redis-async", "description": "Asynchronous redis client library for PHP.", - "authors": ["matyhtf"], - "active": true + "authors": ["matyhtf"] }, { @@ -500,8 +484,7 @@ "language": "Python", "repository": "https://github.com/aallamaa/desir", "description": "", - "authors": ["aallamaa"], - "active": true + "authors": ["aallamaa"] }, { From dab803f3842a8b9cafde4a2fbccc7b79ebe67c17 Mon Sep 17 00:00:00 2001 From: Ismail Date: Tue, 16 Feb 2016 10:37:24 +0000 Subject: [PATCH 0574/2314] Fix minor typo --- commands/eval.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands/eval.md b/commands/eval.md index 50f1cb4ce0..cb04ec10b1 100644 --- a/commands/eval.md +++ b/commands/eval.md @@ -9,7 +9,7 @@ It is just a Lua program that will run in the context of the Redis server. The second argument of `EVAL` is the number of arguments that follows the script (starting from the third argument) that represent Redis key names. -This arguments can be accessed by Lua using the `!KEYS` global variable in the +The arguments can be accessed by Lua using the `!KEYS` global variable in the form of a one-based array (so `KEYS[1]`, `KEYS[2]`, ...). All the additional arguments should not represent key names and can be accessed From 7eecedc5790a7bceb2393ab732bd790391d75d56 Mon Sep 17 00:00:00 2001 From: antirez Date: Tue, 16 Feb 2016 13:23:59 +0100 Subject: [PATCH 0575/2314] MIGRATE doc updated with KEYS option. --- commands/migrate.md | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/commands/migrate.md b/commands/migrate.md index 388920841a..ece66a4c2c 100644 --- a/commands/migrate.md +++ b/commands/migrate.md @@ -41,12 +41,31 @@ If there are no keys to migrate in the source instance `NOKEY` is returned. Because missing keys are possible in normal conditions, from expiry for example, `NOKEY` isn't an error. +## Migrating multiple keys with a single command call + +Starting with Redis 3.0.6 `MIGRATE` supports a new bulk-migration mode that +uses pipelining in order to migrate multiple keys between instances without +incurring in the round trip time latency and other overheads that there are +when moving each key with a single `MIGRATE` call. + +In order to enable this form, the `KEYS` option is used, and the normal *key* +argument is set to an empty string. The actual key names will be provided +after the `KEYS` argument itself, like in the following example: + + MIGRATE 192.168.1.34 6379 "" 0 5000 KEYS key1 key2 key3 + +When this form is used the `NOKEY` status code is only returned when none +of the keys is preset in the instance, otherwise the command is executed, even if +just a single key exists. + ## Options * `COPY` -- Do not remove the key from the local instance. * `REPLACE` -- Replace existing key on the remote instance. +* `KEYS` -- If the key argument is an empty string, the command will instead migrate all the keys that follow the `KEYS` option (see the above section for more info). `COPY` and `REPLACE` are available only in 3.0 and above. +`KEYS` is available starting with Redis 3.0.6. @return From 010ba8cb068fb39a1a25a5994233b89e3e64b859 Mon Sep 17 00:00:00 2001 From: Alexander Cheprasov Date: Tue, 16 Feb 2016 18:31:47 +0000 Subject: [PATCH 0576/2314] fix params for command SHUTDOWN --- commands.json | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/commands.json b/commands.json index 55b56fc5ad..b7e6bbc00a 100644 --- a/commands.json +++ b/commands.json @@ -2147,15 +2147,9 @@ "summary": "Synchronously save the dataset to disk and then shut down the server", "arguments": [ { - "name": "NOSAVE", + "name": "save-mode", "type": "enum", - "enum": ["NOSAVE"], - "optional": true - }, - { - "name": "SAVE", - "type": "enum", - "enum": ["SAVE"], + "enum": ["NOSAVE", "SAVE"], "optional": true } ], From b20d5d7b67383f1b75b8dc95ff5ad07d0eecaa0c Mon Sep 17 00:00:00 2001 From: randvis Date: Wed, 17 Feb 2016 16:11:11 +0800 Subject: [PATCH 0577/2314] Fix typo, change master to slave --- topics/sentinel.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/sentinel.md b/topics/sentinel.md index e2266586d3..599a0ad09f 100644 --- a/topics/sentinel.md +++ b/topics/sentinel.md @@ -343,7 +343,7 @@ the network described above, but more likely possible with different layouts, or because of failures at the software layer), we have a similar issue as described in Example 2, with the difference that here we have no way to break the symmetry, since there is just a slave and master, so -the master can't stop accepting queries when is disconnected by its master, +the master can't stop accepting queries when is disconnected by its slave, otherwise the master would never be available during slave failures. So this is a valid setup but the setup in the Example 2 has advantages From e41507a98c1c91b140839896304ecfdeaf58adcf Mon Sep 17 00:00:00 2001 From: JXU Date: Fri, 19 Feb 2016 14:40:47 +0800 Subject: [PATCH 0578/2314] change the client.json file --- clients.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clients.json b/clients.json index bd1e976306..79d6c22ca9 100644 --- a/clients.json +++ b/clients.json @@ -1355,7 +1355,7 @@ "language": "C++", "repository": "https://github.com/shawn246/redis_client", "description": "A redis client based on hiredis, supports cluster/pipeline and is thread safe and includes two files only. The transaction is on the way:)", - "authors": ["shawn"], + "authors": [], "active": true } ] From 4f806013e03ac6c3140e43fe9e015692e04d3381 Mon Sep 17 00:00:00 2001 From: Bob HADDLETON Date: Sat, 20 Feb 2016 05:24:56 -0600 Subject: [PATCH 0579/2314] Added "sudo" to the "/etc/init.d/redis_6379 start" command in quickstart --- topics/quickstart.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/quickstart.md b/topics/quickstart.md index e15b033267..a95f588df6 100644 --- a/topics/quickstart.md +++ b/topics/quickstart.md @@ -194,7 +194,7 @@ Both the pid file path and the configuration file name depend on the port number You are done! Now you can try running your instance with: - /etc/init.d/redis_6379 start + sudo /etc/init.d/redis_6379 start Make sure that everything is working as expected: From 26c45d984c55535f5056e68ed1c429ad12c9cd78 Mon Sep 17 00:00:00 2001 From: Bob HADDLETON Date: Sat, 20 Feb 2016 05:24:56 -0600 Subject: [PATCH 0580/2314] Added "sudo" to the "make install" and "/etc/init.d/redis_6379 start" commands in quickstart --- topics/quickstart.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/topics/quickstart.md b/topics/quickstart.md index e15b033267..166abae151 100644 --- a/topics/quickstart.md +++ b/topics/quickstart.md @@ -41,7 +41,7 @@ It is a good idea to copy both the Redis server and the command line interface i * sudo cp src/redis-server /usr/local/bin/ * sudo cp src/redis-cli /usr/local/bin/ -Or just using `make install`. +Or just using `sudo make install`. In the following documentation we assume that /usr/local/bin is in your PATH environment variable so that you can execute both the binaries without specifying the full path. @@ -194,7 +194,7 @@ Both the pid file path and the configuration file name depend on the port number You are done! Now you can try running your instance with: - /etc/init.d/redis_6379 start + sudo /etc/init.d/redis_6379 start Make sure that everything is working as expected: From 5b03e4441ddc276a8397dcc801b23e49428ae9da Mon Sep 17 00:00:00 2001 From: Ricardo Borelli Date: Tue, 23 Feb 2016 14:19:19 -0300 Subject: [PATCH 0581/2314] Add Swift-Redis client --- clients.json | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/clients.json b/clients.json index 79d6c22ca9..166cc73c52 100644 --- a/clients.json +++ b/clients.json @@ -1269,6 +1269,15 @@ "active": true }, + { + "name": "Swift-Redis", + "language": "Swift", + "repository": "https://github.com/rabc/Swift-Redis", + "description": "Redis client for (pure) Swift", + "authors": ["rabc"], + "active": true + }, + { "name": "Rackdis", "language": "Racket", From 366d5d471c814e6625089cbe2c36a6f7c96cecac Mon Sep 17 00:00:00 2001 From: Dan Bravender Date: Wed, 24 Feb 2016 10:41:03 -0500 Subject: [PATCH 0582/2314] maxmemory should be maxclients here --- topics/clients.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/clients.md b/topics/clients.md index 1c1f0d7227..a6bcfa5b81 100644 --- a/topics/clients.md +++ b/topics/clients.md @@ -50,7 +50,7 @@ In Redis 2.4 there was an hard-coded limit about the maximum number of clients that was possible to handle simultaneously. In Redis 2.6 this limit is dynamic: by default is set to 10000 clients, unless -otherwise stated by the `maxmemory` directive in Redis.conf. +otherwise stated by the `maxclients` directive in Redis.conf. However Redis checks with the kernel what is the maximum number of file descriptors that we are able to open (the *soft limit* is checked), if the From 096c720c1ef2bf97864f907a7dc06640f212f315 Mon Sep 17 00:00:00 2001 From: danieleteti Date: Wed, 13 Jan 2016 12:03:02 +0100 Subject: [PATCH 0583/2314] Added Delphi Redis Client --- clients.json | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/clients.json b/clients.json index e167df994e..5296683f35 100644 --- a/clients.json +++ b/clients.json @@ -1172,7 +1172,7 @@ "authors": ["ik5"] }, - { + { "name": "Redi.sh", "language": "Bash", "repository": "https://github.com/crypt1d/redi.sh", @@ -1277,7 +1277,7 @@ "description": "RedisKit is a asynchronious client framework for Redis server, written in Objective-C", "authors": ["dizzus"] }, - + { "name": "qredisclient", "language": "C++", @@ -1286,7 +1286,7 @@ "authors": ["u_glide"], "active": true }, - + { "name": "xredis", "language": "C++", @@ -1305,7 +1305,7 @@ "authors": ["simon_ninon"], "active": true }, - + { "name": "mruby-redis", "language": "mruby", @@ -1332,7 +1332,7 @@ "authors": ["cheprasov84"], "active": true }, - + { "name": "lasso-redis", "language": "Lasso", @@ -1341,7 +1341,7 @@ "authors": [], "active": true }, - + { "name": "c+redis+client", "language": "C++", @@ -1349,5 +1349,14 @@ "description": "A redis client based on hiredis, supports cluster/pipeline and is thread safe and includes two files only. The transaction is on the way:)", "authors": [], "active": true + }, + + { + "name": "delphiredisclient", + "language": "Delphi", + "repository": "https://github.com/danieleteti/delphiredisclient", + "description": "A Delphi Redis Client", + "authors": ["danieleteti"], + "active": true } ] From 287ec924aa20dc58f87709ca1472357c4a03fc0a Mon Sep 17 00:00:00 2001 From: Phil Eaton Date: Sun, 28 Feb 2016 10:56:28 -0500 Subject: [PATCH 0584/2314] updates clients.json for correct json formatting and additional description text --- clients.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/clients.json b/clients.json index 3222f7e894..05960d1015 100644 --- a/clients.json +++ b/clients.json @@ -92,8 +92,8 @@ { "name": "go-redis/redis", "language": "Go", - "repository": "https://github.com/go-redis/redis" - "description": "Redis client for Golang.", + "repository": "https://github.com/go-redis/redis", + "description": "Redis client for Golang supporting Redis Sentinel and Redis Cluster out of the box.", "authors": [], "active": true }, From 1f63a396589d727ca23d191374ab7f6cb86cdb99 Mon Sep 17 00:00:00 2001 From: randvis Date: Tue, 1 Mar 2016 20:52:12 +0800 Subject: [PATCH 0585/2314] Refine the sentence, fix #685 --- topics/sentinel.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/sentinel.md b/topics/sentinel.md index 599a0ad09f..b276192cd8 100644 --- a/topics/sentinel.md +++ b/topics/sentinel.md @@ -343,7 +343,7 @@ the network described above, but more likely possible with different layouts, or because of failures at the software layer), we have a similar issue as described in Example 2, with the difference that here we have no way to break the symmetry, since there is just a slave and master, so -the master can't stop accepting queries when is disconnected by its slave, +the master can't stop accepting queries when it is disconnected from its slave, otherwise the master would never be available during slave failures. So this is a valid setup but the setup in the Example 2 has advantages From e4755549f7243df96234960dd7b3fee3c3b39e11 Mon Sep 17 00:00:00 2001 From: Alexander Cheprasov Date: Tue, 1 Mar 2016 19:17:41 +0000 Subject: [PATCH 0586/2314] Added sort param for GEO commands --- commands.json | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/commands.json b/commands.json index b7e6bbc00a..0436ccb2cb 100644 --- a/commands.json +++ b/commands.json @@ -846,6 +846,11 @@ "name": "count", "type": "integer", "optional": true + }, + { + "name": "sort", + "type": "enum", + "enum": ["ASC", "DESC"] } ], "group": "geo" @@ -894,6 +899,11 @@ "name": "count", "type": "integer", "optional": true + }, + { + "name": "sort", + "type": "enum", + "enum": ["ASC", "DESC"] } ], "group": "geo" From ea70087b3270094ae20f369b0d94bc27e2f7f94b Mon Sep 17 00:00:00 2001 From: Alexander Cheprasov Date: Tue, 1 Mar 2016 19:31:36 +0000 Subject: [PATCH 0587/2314] Added sort param for GEO commands --- commands.json | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/commands.json b/commands.json index 0436ccb2cb..7192b9a668 100644 --- a/commands.json +++ b/commands.json @@ -850,7 +850,8 @@ { "name": "sort", "type": "enum", - "enum": ["ASC", "DESC"] + "enum": ["ASC", "DESC"], + "optional": true } ], "group": "geo" @@ -903,7 +904,8 @@ { "name": "sort", "type": "enum", - "enum": ["ASC", "DESC"] + "enum": ["ASC", "DESC"], + "optional": true } ], "group": "geo" From ff567ed583158bbcdb49ef1dfa1df31c1c7a9c6e Mon Sep 17 00:00:00 2001 From: Alexander Cheprasov Date: Tue, 1 Mar 2016 19:38:25 +0000 Subject: [PATCH 0588/2314] Added sort param for GEO commands --- commands.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/commands.json b/commands.json index 7192b9a668..83ad0b55c3 100644 --- a/commands.json +++ b/commands.json @@ -848,7 +848,7 @@ "optional": true }, { - "name": "sort", + "name": "order", "type": "enum", "enum": ["ASC", "DESC"], "optional": true @@ -902,7 +902,7 @@ "optional": true }, { - "name": "sort", + "name": "order", "type": "enum", "enum": ["ASC", "DESC"], "optional": true From bb35bb855fbc2c5d4b6f3c931ae4bcf9078052ce Mon Sep 17 00:00:00 2001 From: Itamar Haber Date: Fri, 4 Mar 2016 16:45:27 +0200 Subject: [PATCH 0589/2314] Replaces "informations" with "information" --- topics/indexes.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/topics/indexes.md b/topics/indexes.md index 6fc433b3ce..0b58ef1f7c 100644 --- a/topics/indexes.md +++ b/topics/indexes.md @@ -87,7 +87,7 @@ could do: This time the value associated with the score in the sorted set is the ID of the object. So once I query the index with `ZRANGEBYSCORE` I'll -also have to retrieve the informations I need with `HGETALL` or similar +also have to retrieve the information I need with `HGETALL` or similar commands. The obvious advantage is that objects can change without touching the index, as long as we don't change the indexed field. @@ -327,7 +327,7 @@ Basically we add another field that we'll extract and use only for visualization. Ranges will always be computed using the normalized strings instead. This is a common trick which has multiple applications. -Adding auxiliary informations in the index +Adding auxiliary information in the index --- When using a sorted set in a direct way, we have two different attributes From 3377e8cb8b67ccff88de67765a0d2380fb228e84 Mon Sep 17 00:00:00 2001 From: Ricardo Borelli Date: Tue, 8 Mar 2016 10:56:43 -0300 Subject: [PATCH 0590/2314] Change Swift-Redis client name to ZRedis --- clients.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/clients.json b/clients.json index 7607557fd2..94bfdefbc8 100644 --- a/clients.json +++ b/clients.json @@ -1239,9 +1239,9 @@ }, { - "name": "Swift-Redis", + "name": "ZRedis", "language": "Swift", - "repository": "https://github.com/rabc/Swift-Redis", + "repository": "https://github.com/rabc/ZRedis", "description": "Redis client for (pure) Swift", "authors": ["rabc"], "active": true From 9175ec5154ca98d60e32c6f26e95ade110a827c3 Mon Sep 17 00:00:00 2001 From: Dan Belling Date: Wed, 9 Mar 2016 20:11:49 -0600 Subject: [PATCH 0591/2314] Typo --- topics/latency-monitor.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/latency-monitor.md b/topics/latency-monitor.md index a2dc6d0fc5..4e6a767e5d 100644 --- a/topics/latency-monitor.md +++ b/topics/latency-monitor.md @@ -20,7 +20,7 @@ thread must be able to perform certain tasks incrementally, like for example keys expiration, in a way that does not impact the other clients that are served. -For all this reasons, Redis 2.8.13 introduced a new feature called +For all these reasons, Redis 2.8.13 introduced a new feature called **Latency Monitoring**, that helps the user to check and troubleshoot possible latency problems. Latency monitoring is composed of the following conceptual parts: From 236098107790893075f124fd87af1a7e95d824f4 Mon Sep 17 00:00:00 2001 From: Luis Ashurei Date: Mon, 14 Mar 2016 17:20:27 +0800 Subject: [PATCH 0592/2314] Fix typo `preset` -> `present` --- commands/migrate.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands/migrate.md b/commands/migrate.md index ece66a4c2c..6c082005bb 100644 --- a/commands/migrate.md +++ b/commands/migrate.md @@ -55,7 +55,7 @@ after the `KEYS` argument itself, like in the following example: MIGRATE 192.168.1.34 6379 "" 0 5000 KEYS key1 key2 key3 When this form is used the `NOKEY` status code is only returned when none -of the keys is preset in the instance, otherwise the command is executed, even if +of the keys is present in the instance, otherwise the command is executed, even if just a single key exists. ## Options From f15211472ee2aeb7e52e9490bcbc0655d159925f Mon Sep 17 00:00:00 2001 From: Jason Punyon Date: Wed, 30 Mar 2016 14:45:11 -0400 Subject: [PATCH 0593/2314] Adding Rol to tools.json --- tools.json | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tools.json b/tools.json index a4f2fd3561..57124e0320 100644 --- a/tools.json +++ b/tools.json @@ -555,5 +555,12 @@ "repository": "https://github.com/pkulchenko/ZeroBranePackage", "description": "Enables support for the Redis Lua API, provides remote script execution and debugging", "authors": ["zerobrane"] + }, + { + "name": "Rol", + "language": "C#", + "repository": "https://github.com/jasonpunyon/rol", + "description": "A library that makes storing and working with data in redis as easy as declaring an interface.", + "authors": ["Jason Punyon"] } ] From 9dfe170cb18b5183a388defd1008f64a7fcddf59 Mon Sep 17 00:00:00 2001 From: Guillaume F Date: Thu, 31 Mar 2016 04:54:59 +0200 Subject: [PATCH 0594/2314] C client library add. Eredis. --- clients.json | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/clients.json b/clients.json index 94bfdefbc8..e7a9f04ea9 100644 --- a/clients.json +++ b/clients.json @@ -1353,5 +1353,15 @@ "description": "A Delphi Redis Client", "authors": ["danieleteti"], "active": true + }, + + { + "name": "eredis", + "language": "C", + "repository": "https://github.com/EulerianTechnologies/eredis", + "description": "Fast and light Redis C client library extending Hiredis: thread-safe, write replication, auto-reconnect, sync pool, async libev", + "authors": ["EulerianTechnologies","guillaumef"], + "recommended": true, + "active": true } ] From c72d450d77841672b85c5224ec9c98b8aabb43a7 Mon Sep 17 00:00:00 2001 From: Guillaume F Date: Thu, 31 Mar 2016 04:55:58 +0200 Subject: [PATCH 0595/2314] C client library add. Eredis. --- clients.json | 1 - 1 file changed, 1 deletion(-) diff --git a/clients.json b/clients.json index e7a9f04ea9..e1691dd6a3 100644 --- a/clients.json +++ b/clients.json @@ -1361,7 +1361,6 @@ "repository": "https://github.com/EulerianTechnologies/eredis", "description": "Fast and light Redis C client library extending Hiredis: thread-safe, write replication, auto-reconnect, sync pool, async libev", "authors": ["EulerianTechnologies","guillaumef"], - "recommended": true, "active": true } ] From a255a7981e158d7e6df0bdd3f9feae3b55f3e8d3 Mon Sep 17 00:00:00 2001 From: Guillaume F Date: Thu, 31 Mar 2016 11:09:03 +0200 Subject: [PATCH 0596/2314] C client library add. Eredis. Just have my company twitter account. Thanks. --- clients.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clients.json b/clients.json index e1691dd6a3..051e809ad3 100644 --- a/clients.json +++ b/clients.json @@ -1360,7 +1360,7 @@ "language": "C", "repository": "https://github.com/EulerianTechnologies/eredis", "description": "Fast and light Redis C client library extending Hiredis: thread-safe, write replication, auto-reconnect, sync pool, async libev", - "authors": ["EulerianTechnologies","guillaumef"], + "authors": ["EulerianTech"], "active": true } ] From 41c4e25dbee57291aa35c53a58af06b9777e0553 Mon Sep 17 00:00:00 2001 From: Anton Kalyaev Date: Tue, 22 Mar 2016 21:38:35 +0300 Subject: [PATCH 0597/2314] add hierdis client --- clients.json | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/clients.json b/clients.json index 051e809ad3..9649c87e2c 100644 --- a/clients.json +++ b/clients.json @@ -1362,5 +1362,13 @@ "description": "Fast and light Redis C client library extending Hiredis: thread-safe, write replication, auto-reconnect, sync pool, async libev", "authors": ["EulerianTech"], "active": true + }, + + { + "name": "Hierdis", + "language": "Erlang", + "repository": "https://github.com/funbox/hierdis", + "description": "High-performance Erlang client for the Redis key-value store (NIF wrapping the hiredis C client).", + "authors": ["funbox_team"] } ] From a0bf4803f48ee6edb0965d3b7ea6ae571f6ad3b2 Mon Sep 17 00:00:00 2001 From: Jason Punyon Date: Thu, 31 Mar 2016 09:35:32 -0400 Subject: [PATCH 0598/2314] Update tools.json --- tools.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools.json b/tools.json index 57124e0320..2c238f5c5d 100644 --- a/tools.json +++ b/tools.json @@ -561,6 +561,6 @@ "language": "C#", "repository": "https://github.com/jasonpunyon/rol", "description": "A library that makes storing and working with data in redis as easy as declaring an interface.", - "authors": ["Jason Punyon"] + "authors": ["jasonpunyon"] } ] From 4ecb5d53269603f6e4b5d0b8d188080a55314597 Mon Sep 17 00:00:00 2001 From: Lee Gould Date: Fri, 1 Apr 2016 10:51:30 +0100 Subject: [PATCH 0599/2314] Add Redis Explorer client --- tools.json | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tools.json b/tools.json index 2c238f5c5d..2c7b9cd81c 100644 --- a/tools.json +++ b/tools.json @@ -562,5 +562,12 @@ "repository": "https://github.com/jasonpunyon/rol", "description": "A library that makes storing and working with data in redis as easy as declaring an interface.", "authors": ["jasonpunyon"] + }, + { + "name": "Redis Explorer", + "language": "C#", + "repository": "https://github.com/leegould/RedisExplorer", + "description": "Windows desktop GUI client", + "authors": ["leegould"] } ] From cf5652c546522dddc3a5b29a95b615c06b729b78 Mon Sep 17 00:00:00 2001 From: Ricardo Borelli Date: Tue, 5 Apr 2016 15:39:09 -0300 Subject: [PATCH 0600/2314] Update clients.json The library have been moved to Zewo's organization. --- clients.json | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/clients.json b/clients.json index 9649c87e2c..904d5c4c72 100644 --- a/clients.json +++ b/clients.json @@ -1239,10 +1239,10 @@ }, { - "name": "ZRedis", + "name": "Redis", "language": "Swift", - "repository": "https://github.com/rabc/ZRedis", - "description": "Redis client for (pure) Swift", + "repository": "https://github.com/Zewo/Redis", + "description": "Redis client for Swift", "authors": ["rabc"], "active": true }, From 30ebfb60dba50089e898b5c87370a2ced53482a3 Mon Sep 17 00:00:00 2001 From: Damian Janowski Date: Wed, 6 Apr 2016 13:44:58 -0300 Subject: [PATCH 0601/2314] Update clients.json --- clients.json | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/clients.json b/clients.json index 904d5c4c72..ce85e3af8e 100644 --- a/clients.json +++ b/clients.json @@ -1370,5 +1370,14 @@ "repository": "https://github.com/funbox/hierdis", "description": "High-performance Erlang client for the Redis key-value store (NIF wrapping the hiredis C client).", "authors": ["funbox_team"] + }, + + { + "name": "yoredis", + "language": "Node.js", + "repository": "https://github.com/djanowski/yoredis", + "description": "A minimalistic Redis client using modern Node.js.", + "authors": ["djanowski"], + "active": true } ] From ec6605aa5b246d102037a7027491c4baa8d99c0e Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 8 Apr 2016 16:11:30 +0200 Subject: [PATCH 0602/2314] redis-cli doc, finally. --- topics/rediscli.md | 753 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 753 insertions(+) create mode 100644 topics/rediscli.md diff --git a/topics/rediscli.md b/topics/rediscli.md new file mode 100644 index 0000000000..4794494b03 --- /dev/null +++ b/topics/rediscli.md @@ -0,0 +1,753 @@ +redis-cli, the Redis command line interface +=== + +`redis-cli` is the Redis command line interface, a simple program that allows +to send commands to Redis, and read the replies sent by the server, directly from the terminal. + +It has two main modes: an interactive mode where there is a REPL (Read +Eval Print Loop) where the user types commands and get replies. And another +mode where the command is sent as arguments of `redis-cli`, executed, and +printed on the standard output. + +In interactive mode `redis-cli` has basic line editing capabilities to provide +a good typing experience. + +However `redis-cli` is not just that. There are options you can use to launch +the program in order to put it into special modes, so that `redis-cli` can +do definitely more complex tasks, like simulate a slave and print the +replication stream it receives from the master, check the latency of a Redis +server and show statistics or even an ASCII-art spectrogram of latency +samples and frequencies, and many other things. + +This guide will cover the different aspects of `redis-cli`, starting from the +simplest and ending with the more advanced ones. + +If you are going to use Redis extensively, or if you already do, chances are +you happen to use `redis-cli` a lot. Spending some time to familiarize with +it is likely a very good idea, you'll see that you'll work more effectively +with Redis once you know all the tricks of its command line interface. + +Command line usage +=== + +To just run a command and have its reply printed on the standard output, +it is as simple as typing the command to execute as separated arguments +of `redis-cli`: + + $ redis-cli incr mycounter + (integer) 7 + +The reply of the command is "7". Since Redis replies are typed (they can be +strings, arrays, integers, NULL, errors and so forth), you see the type +of the reply between brackets. However that would be not exactly idea when +the output of `redis-cli` must be used as input of another command, or when +we want to redirect it into a file. + +Actually `redis-cli` only shows additional informations which improve human +readability when it detects the standard output is a tty (a terminal basically). +Otherwise it will auto-enable the *raw output mode*, like in the following +example: + + $ redis-cli incr mycounter > /tmp/output.txt + $ cat /tmp/output.txt + 8 + +This time `(integer)` was omitted from the output since the cli detected +the output was no longer written to the terminal. You can force raw output +even on the terminal with the `--raw` option: + + $ redis-cli --raw incr mycounter + 9 + +Similarly you can force human readable output when writing to a file or in +pipe to other commands by using `--no-raw`. + +## Host, port, password and database + +By default `redis-cli` connects to the server at 127.0.0.1 port 6379. +As you can guess, you can easily change this using command line options. +To specify a different host name, or IP address, use `-h`. In order to +set a different port, use `-p`. + + $ redis-cli -h redis15.localnet.org -p 6390 ping + PONG + +If your instance is password protected, the `-a ` option +will authenticate for you without the need of typing an explicit +`AUTH` command: + + $ redis-cli -a myUnguessablePazzzzzword123 ping + PONG + +Finally it's possible to send a command that operates on a different +database number (the default is to use zero), using the `-n ` +option: + + $ redis-cli flushall + OK + $ redis-cli -n 1 incr a + (integer) 1 + $ redis-cli -n 1 incr a + (integer) 2 + $ redis-cli -n 2 incr a + (integer) 1 + +## Getting input from other programs + +There are two ways you can use `redis-cli` in order to get the input +from other commands (from the standard input, basically). +One is to use as last argument the payload we read from *stdin*. +For example in order to set a Redis key to the content of the +file `/etc/services` if my computer, I can use the `-x` option: + + $ redis-cli -x set foo < /etc/services + OK + $ redis-cli getrange foo 0 50 + "#\n# Network services, Internet style\n#\n# Note that " + +As you can see in the first line of the above session, the last argument +of the `SET` command was not specified. The arguments are just `SET foo` +without the actual value I want my key to be set. + +Instead the `-x` option was specified, and a file was redirected to the +cli standard input. So the input was read, and was used as the final +argument for the command. This is useful for scripting. + +A different approach is to feed `redis-cli` a sequence of commands +written in a text file: + + $ cat /tmp/commands.txt + set foo 100 + incr foo + append foo xxx + get foo + $ cat /tmp/commands.txt | redis-cli + OK + (integer) 101 + (integer) 6 + "101xxx" + +All the commands into `commands.txt` are executed one after the other +by redis-cli as if they were typed by the user interactive. Strings can +be quoted inside the file needed, so that it's possible to have single +arguments with spaces or newlines or other special chars inside: + + $ cat /tmp/commands.txt + set foo "This is a single argument" + strlen foo + $ cat /tmp/commands.txt | redis-cli + OK + (integer) 25 + +## Continuously run the same command + +It is possible to execute the same command a specified number of times +with a user selected pause between the executions. This is useful in +different contexts, for example when we want to continuously monitor some +key content or `INFO` field output, or when we want to simulate some +recurring write event (like pushing a new item into a list every 5 seconds). + +This feature is controlled by two options: `-r ` and `-i `. +The first states how many times to run a command, the second configures +the delay between the different command calls, in seconds (with the ability +to specify decimal numbers like 0.1 in order to mean 100 milliseconds). + +By default the interval (or delay) is set to 0, so commands are just executed +ASAP: + + $ redis-cli -r 5 incr foo + (integer) 1 + (integer) 2 + (integer) 3 + (integer) 4 + (integer) 5 + +To run the same command forever, use `-1` as count. +So for example in order to monitor over time the RSS memory size it's possible +to use a command like the following: + + $ redis-cli -r -1 -i 1 INFO | grep rss_human + used_memory_rss_human:1.38M + used_memory_rss_human:1.38M + used_memory_rss_human:1.38M + ... a new line will be printed each second ... + +## Mass insertion of data using `redis-cli` + +Mass insert using `redis-cli` is covered in a separated page since it's a +worthwhile topic itself. Please refer to our [mass insertion guide](/topics/mass-insert). + +## CSV output + +Sometimes you may want to use `redis-cli` in order to quickly export data +from Redis to an external program. This can be accomplished using the CSV +output feature: + + $ redis-cli lpush mylist a b c d + (integer) 4 + $ redis-cli --csv lrange mylist 0 -1 + "d","c","b","a" + +Currently it's not possible to export the whole DB like that, but only +to run single commands with CSV output. + +## Running Lua scripts + +The `redis-cli` has extensive support for using the new Lua debugging +facility of Lua scripting, available starting with Redis 3.2. However +for this feature, please refer to the +[Redis Lua debugger documentation](/topics/ldb). + +However, even without using the debugger, you can use `redis-cli` to +run scripts from a file in a way more comfortable compared to typing +the script interactively into the shell or as an argument. + + $ cat /tmp/script.lua + return redis.call('set',KEYS[1],ARGV[1]) + $ redis-cli --eval /tmp/script.lua foo , bar + OK + +The Redis `EVAL` command takes the list of keys the script uses, and the +other non key arguments, as different arrays. When calling `EVAL` you +provide the number of keys as a number. However with `redis-cli` by using +the `--eval` option above, there is no need to specify the number of keys +explicitly. Instead it uses the convention of separating keys and arguments +with a comma. This is why in the above call you see `foo , bar` as arguments. + +So `foo` will populate the `KEYS` array, and `bar` the `ARGV` array. + +The `--eval` option ca be useful in order to write simple scripts. For more +complex work, using the Lua debugger is definitely more comfortable. It's +possible to mix the two approaches, since the debugger also uses executing +scripts from an external file. + +Interactive mode +=== + +So far we explored how to use the Redis CLI as a command line program. +This is very useful for scripts and certain types of testing, however most +people will spend the majority of time in `redis-cli` using its interactive +mode. + +In interactive mode the user types Redis commands into a prompt. The command +is sent to the server, processed, and the reply is parsed back and rendered +in a simpler to read form. + +In order to run the CLI in interactive mode, there is nothing special to do: +just lunch it without arguments and you are in: + + $ redis-cli + 127.0.0.1:6379> ping + PONG + +The string `127.0.0.1:6379>` is the prompt. It reminds you that you are +connected to a given Redis instance. + +The prompt changes as the server you are connected to, changes, or when you +are operating on a database different than the database number zero: + + 127.0.0.1:6379> select 2 + OK + 127.0.0.1:6379[2]> dbsize + (integer) 1 + 127.0.0.1:6379[2]> select 0 + OK + 127.0.0.1:6379> dbsize + (integer) 503 + +## Handling connections and reconnections + +Using the `connect` command in interactive mode it's possible to connect +to a different instance, by specifying the *hostname* and *port* we want +to connect to: + + 127.0.0.1:6379> connect metal 6379 + metal:6379> ping + PONG + +As you can see the prompt changes accordingly. If the user attempts to connect +to an instance that is unreachable, the `redis-cli` goes into disconnected +more, and attempts to reconnect at each new command: + + 127.0.0.1:6379> connect 127.0.0.1 9999 + Could not connect to Redis at 127.0.0.1:9999: Connection refused + not connected> ping + Could not connect to Redis at 127.0.0.1:9999: Connection refused + not connected> ping + Could not connect to Redis at 127.0.0.1:9999: Connection refused + +In general after a disconnection is detected, the CLI always attempts to +reconnect transparently: if the attempt fails, it shows the error and +enters the disconnected state. The following is an example of disconnection +and reconnection: + + 127.0.0.1:6379> debug restart + Could not connect to Redis at 127.0.0.1:6379: Connection refused + not connected> ping + PONG + 127.0.0.1:6379> (now we are connected again) + +When a reconnection is performed, `redis-cli` automatically re-select the +latest database number selected. However all the other state about the +connection is lost, like for example, the state of a transaction if we +were in the middle of it: + + $ redis-cli + 127.0.0.1:6379> multi + OK + 127.0.0.1:6379> ping + QUEUED + + ( here the server is manually restarted ) + + 127.0.0.1:6379> exec + (error) ERR EXEC without MULTI + +This is usually not an issue when using the CLI in interactive mode for +testing, but you want to make sure to know about this limitation. + +## Editing, history and completion + +Because `redis-cli` uses the +[linenoise line editing library](http://github.com/antirez/linenoise), it +always has line editing capabilities, without depending on `libreadline` or other +optional libraries. + +You can access an history of commands executed, in order to avoid retyping +them again and again, by pressing the arrow keys (up and down). +The history is preserved between restarts of the CLI, in a file called +`.rediscli_history` inside the user home directory, as specified +by the `HOME` environment variable. + +The CLI is also able to perform command names completion by pressing the TAB key, +like in the following example: + + 127.0.0.1:6379> Z + 127.0.0.1:6379> ZADD + 127.0.0.1:6379> ZCARD + +## Running the same command N times + +It's possible to run the same command multiple times by prefixing the command +name by a number: + + 127.0.0.1:6379> 5 incr mycounter + (integer) 1 + (integer) 2 + (integer) 3 + (integer) 4 + (integer) 5 + +## Showing help about Redis commands + +Redis has a number of commands and sometimes, as you test things, you may +not remember the exact order of arguments. `redis-cli` provides online help +for most Redis commands, using the `help` command. The command can be used +in two forms: + +* `help @` shows all the commands about a given category. Categories are `@generic`, `@list`, `@set`, `@sorted_set`, `@hash`, `@pubsub`, `@transactions`, `@connection`, `@server`, `@scripting`, `@hyperloglog`. +* `help ` shows specific help for the command given as argument. + +For example in order to show help for the `PFADD` command, use: + + 127.0.0.1:6379> help PFADD + + PFADD key element [element ...] + summary: Adds the specified elements to the specified HyperLogLog. + since: 2.8.9 + +Note that `help` supports TAB completion as well. + +Special modes of operation +=== + +So far we saw two main modes of `redis-cli`. + +* Command line execution of Redis commands. +* Interactive "REPL alike" usage. + +However the CLI performs other auxiliary tasks related to Redis that +are explained in the next sections: + +* Monitoring tool to show continuous stats about a Redis server. +* Scanning a Redis database for very large keys. +* Key space scanner with pattern matching. +* Acting as a [Pub/Sub](/topics/pubsub) client to subscribe to channels. +* Monitoring the commands executed into a Redis instance. +* Checking the [latency](/topics/latency) of a Redis server, in different ways. +* Checking the scheduler latency of the local computer. +* Transferring RDB backups from a remote Redis server to the local computer. +* Acting as a slave to show what a slave would receive. +* Simulating [LRU](/topics/lru-cache) workloads to show stats about keys hits. +* Working a as a client for the Lua debugger. + +## Continuous stats mode + +This is probably one of the less known features of `redis-cli`, and one +very useful in order to minor Redis instances in real time. +To enable this mode, the `--stat` option is used. +The output is very clear about the behavior of the CLI in this mode: + + $ redis-cli --stat + ------- data ------ --------------------- load -------------------- - child - + keys mem clients blocked requests connections + 506 1015.00K 1 0 24 (+0) 7 + 506 1015.00K 1 0 25 (+1) 7 + 506 3.40M 51 0 60461 (+60436) 57 + 506 3.40M 51 0 146425 (+85964) 107 + 507 3.40M 51 0 233844 (+87419) 157 + 507 3.40M 51 0 321715 (+87871) 207 + 508 3.40M 51 0 408642 (+86927) 257 + 508 3.40M 51 0 497038 (+88396) 257 + +In this mode a new line is printed every second with useful informations and +the difference between the old data point. You can easily understand what's +happening with memory usage, clients connected, and so forth. + +The `-i ` option in this case works as a modifier in order to +change the frequency at which new lines are emitted. The default is one +second. + +## Scanning for big keys + +In this special mode, `redis-cli` works as a key space analyzer. It scans the +dataset for big keys, but also provides informations about the data types +the data set is composed of. This mode is enabled with the `--bigkeys` option, +and produces a quite verbose output: + + $ redis-cli --bigkeys + + # Scanning the entire keyspace to find biggest keys as well as + # average sizes per key type. You can use -i 0.1 to sleep 0.1 sec + # per 100 SCAN commands (not usually needed). + + [00.00%] Biggest string found so far 'key-419' with 3 bytes + [05.14%] Biggest list found so far 'mylist' with 100004 items + [35.77%] Biggest string found so far 'counter:__rand_int__' with 6 bytes + [73.91%] Biggest hash found so far 'myobject' with 3 fields + + -------- summary ------- + + Sampled 506 keys in the keyspace! + Total key length in bytes is 3452 (avg len 6.82) + + Biggest string found 'counter:__rand_int__' has 6 bytes + Biggest list found 'mylist' has 100004 items + Biggest hash found 'myobject' has 3 fields + + 504 strings with 1403 bytes (99.60% of keys, avg size 2.78) + 1 lists with 100004 items (00.20% of keys, avg size 100004.00) + 0 sets with 0 members (00.00% of keys, avg size 0.00) + 1 hashs with 3 fields (00.20% of keys, avg size 3.00) + 0 zsets with 0 members (00.00% of keys, avg size 0.00) + +In the first part of the output, each new key larger than the previous larger +key (of the same type) encountered is reported. The summary section instead +provides general stats about the data inside the Redis instance. + +The program uses the `SCAN` command, so it can be executed against a busy +server without impacting the operations, however the `-i` option can be +used in order to throttle the scanning process of the specified fraction +of second for each 100 keys requested. For example `-i 0.1` will slow down +the program execution a lot, but will also reduce the load on the server +to a tiny amount. + +Note that the summary also reports in a cleaner form the biggest keys found +for each time. The initial output is just to provide some interesting info +ASAP if running against a very large data set. + +## Getting a list of keys + +It is also possible to scan the key space, again in a way that does not +block the Redis server (which happens if you use, instead, a command +like `KEYS *`), and print all the key names, or filtering by specific +patterns. This mode like the `--bigkeys` option uses the `SCAN` command, +so keys may be reported multiple times if the dataset is changing, but no +key should ever be missing, if the key was present since the start of the +iteration. Because of the command it uses this option is called `--scan`. + + $ redis-cli --scan | head -10 + key-419 + key-71 + key-236 + key-50 + key-38 + key-458 + key-453 + key-499 + key-446 + key-371 + +Note that `head -10` is used in order to print just the first lines of the +output. + +Scanning is able to use the underlying pattern matching capability of +the `SCAN` command in order to provide a `--pattern` option. + + $ redis-cli --scan --pattern '*-11*' + key-114 + key-117 + key-118 + key-113 + key-115 + key-112 + key-119 + key-11 + key-111 + key-110 + key-116 + +Using it in pipe with the `wc` command can be used to count specific +kind of objects, by key name: + + $ redis-cli --scan --pattern 'user:*' | wc -l + 3829433 + +## Pub/sub mode + +The CLI is able to publish messages in Redis Pub/Sub channels just using +the `PUBLISH` command. This is expected since the `PUBLISH` command is very +similar to any other command. Different is the case of subscribing to channels +in order to receive messages, in this case there is to block and wait for +messages, so this is implemented as a special mode into `redis-cli`, however +the mode is not enabled by using a special option, but simply by using the +`SUBSCRIBE` or `PSUBSCRIBE` command, both in interactive or non interactive +mode: + + $ redis-cli psubscribe '*' + Reading messages... (press Ctrl-C to quit) + 1) "psubscribe" + 2) "*" + 3) (integer) 1 + +The *reading messages* message shows that we entered Pub/Sub mode. +Now if another client publishes some message in some channel, like you +can do using `redis-cli PUBLISH mychannel mymessage`, the CLI in Pub/Sub +mode will show something like that: + + 1) "pmessage" + 2) "*" + 3) "mychannel" + 4) "mymessage" + +This is very useful for debugging Pub/Sub issues. +To exit the Pub/Sub mode just process `CTRL-C`. + +## Monitoring commands executed in Redis + +Similarly to the Pub/Sub mode, the monitoring mode is entered automatically +once you use the `MONITOR` mode. It will print all the commands received +by a Redis instance: + + $ redis-cli monitor + OK + 1460100081.165665 [0 127.0.0.1:51706] "set" "foo" "bar" + 1460100083.053365 [0 127.0.0.1:51707] "get" "foo" + +## Monitoring the latency of Redis instances + +Redis is often used in contexts where latency is very critical. Latency +involves multiple moving parts within the application, from the client library +to the network stack, to the Redis instance itself. + +The CLI has multiple facilities in order to study the latency of a Redis +instance, to understand what's the maximum and average latency, and the +distribution of latencies in the spectrum. + +The basic latency checking tool is the `--latency` option. Using this +option Redis runs a loop where the `PING` command is sent to the Redis +instance, and the time to get a reply is measured. This happens 100 +times per second, and stats are updated in a real time in the console: + + $ redis-cli --latency + min: 0, max: 1, avg: 0.19 (427 samples) + +The stats are provided in milliseconds. Usually the average latency of +a very fast instance tends to be overestimated a bit because of the +latency due to the kernel scheduler of the system running `redis-cli` +itself, so the average latency of 0.19 above may easily be 0.01 or less. +However this is usually not a big problem, since we are interested to see +events of a few millisecond or more. + +Sometimes it is useful to study how the maximum and average latencies +evolve during time. For this goal the `--latency-history` option is used: +it works exactly like `--latency`, but each 15 second (by default) a new +sampling session is started, starting from zero: + + $ redis-cli --latency-history + min: 0, max: 1, avg: 0.14 (1314 samples) -- 15.01 seconds range + min: 0, max: 1, avg: 0.18 (1299 samples) -- 15.00 seconds range + min: 0, max: 1, avg: 0.20 (113 samples)^C + +You can change the sampling sessions length with the `-i ` option. + +Finally the most advanced latency study tool, but also a bit harder to +interpret for non experienced users, is the ability to use color terminals +to show a spectrum of latencies. You'll see a colored output to indicate the +different percentages of samples, and different ASCII characters to indicate +different latency figures. This mode is enabled using the `--latency-dist` +option: + + $ redis-cli --latency-dist + (output not displayed, requires a color terminal, try it!) + +There is another pretty unusual latency tool implemented inside `redis-cli`. +It does not check the latency of a Redis instance, but the latency of the +computer you are running `redis-cli` into. What latency you may ask? +The latency that's intrinsic to the kernel scheduler, the hypervisor in case +of virtualized instances, and so forth. + +We call it *intrinsic latency* because it's opaque to the programmer, mostly. +If your Redis instance has a bad latency regardless of all the obvious things +that may be the source cause, it's worth to check what's the best your system +can do, by running `redis-cli` in this special mode directly in the system you +are running Redis servers into. + +By measuring the intrinsic latency, you know that this is the base line, +and Redis cannot do better than your system. In order to run the CLI +int this mode, use the `--intrinsic-latency `. The test time +is in seconds, and specify how many seconds `redis-cli` should check the +latency of the system it's currently running into. + + $ ./redis-cli --intrinsic-latency 5 + Max latency so far: 1 microseconds. + Max latency so far: 7 microseconds. + Max latency so far: 9 microseconds. + Max latency so far: 11 microseconds. + Max latency so far: 13 microseconds. + Max latency so far: 15 microseconds. + Max latency so far: 34 microseconds. + Max latency so far: 82 microseconds. + Max latency so far: 586 microseconds. + Max latency so far: 739 microseconds. + + 65433042 total runs (avg latency: 0.0764 microseconds / 764.14 nanoseconds per run). + Worst run took 9671x longer than the average latency. + +IMPORTANT: this command must be executed in the computer you want to run Redis +server into, not in another host. It does not even connect to a Redis instance, +it performs a test in the local computer. + +In the above case, my system cannot do better than 739 microseconds of worst +case latency, so I can expect certain queries to run in a bit less than 1 +millisecond from time to time. + +## Remote backups of RDB files + +During Redis replication first synchronization, the master and the slave +exchange the whole data set in form of an RDB file. This feature is exploited +by `redis-cli` in order to provide a remote backup facility, that allows to +transfer an RDB file from any Redis instance to the local computer running +`redis-cli`. To use this mode, call the CLI with the `--rdb ` +option: + + $ redis-cli --rdb /tmp/dump.rdb + SYNC sent to master, writing 13256 bytes to '/tmp/dump.rdb' + Transfer finished with success. + +This is a simple but effective way to make sure you have disaster recovery +RDB backups of your Redis instance. However when using this options in +scripts or cron jobs, make sure to check the return value of the command. +If it is non zero, an error occurred like in the following example: + + $ redis-cli --rdb /tmp/dump.rdb + SYNC with master failed: -ERR Can't SYNC while not connected with my master + $ echo $? + 1 + +## Slave mode + +The slave mode of the CLI is an advanced feature useful for +Redis developers and for debugging operations. +It allows to inspect what a master sends to its slaves in the replication +stream in order to propagate the writes to its replicas. The option +name is simply `--slave`. This is how it works: + + $ redis-cli --slave + SYNC with master, discarding 13256 bytes of bulk transfer... + SYNC done. Logging commands from master. + "PING" + "SELECT","0" + "set","foo","bar" + "PING" + "incr","myconuter" + +The command starts discarding the RDB file of the first synchronization, later +it logs each command received as in CSV format. + +If you think some of the commands are not replicated correctly in your slaves +this is a good way to check what's happening, and also useful information +in order to improve the bug report. + +## Performing an LRU simulation + +Redis is often used as a cache with [LRU eviction](/topics/lru-cache). +Depending on the number of keys and the amount of memory allocated for the +cache (specified via the `maxmemory` directive), the amount of cache hits +and misses will change. Sometimes to simulate the rate of hits is very +useful in order to correctly provision your cache. + +The CLI has a special mode where it performs a simulation of GET and SET +operations, using an 80-20% power law distribution in the requests pattern, +this means that 20% of keys will be requested 80% of times, which is a +common distribution in caching scenarios. + +Technically given the distribution of the requests and the Redis memory +overhead, it could be possible to compute the hit rate analytically, just +with a mathematical formula. However Redis can be configured with different +LRU settings (number of samples), and implementations of LRU, which is +approximated in Redis, changes a lot between different versions. Similarly +the amount of memory per key may change between versions. This is why this +tool was built: the main motivation was to test the Redis LRU implementation +quality, but now is also useful in order to test how a given version behaves +with the settings you had in mind for your deployment. + +In order to test this mode, you need to specify the amount of keys to use +in the test. You also need to configure a `maxmemory` setting that as a first +guess you think makes sense as a first try. + +IMPORTANT NOTE: Configuring the `maxmemory` setting in the Redis configuration +is crucial: if there is no cap to the maximum memory usage, the hit will +eventually be 100% since all the keys can be stored in memory. Or if you +specify too many keys and no maximum memory, eventually all the computer +RAM will be used. It is also needed to configure an appropriate +*maxmemory policy*, most of the times what you want is `allkeys-lru`. + +In the following example I configured a memory limit of 100MB, and an LRU +simulation using 10 million keys. + +WARNING: the test uses pipelining and will stress the server, don't use it +with production instances. + + $ ./redis-cli --lru-test 10000000 + 156000 Gets/sec | Hits: 4552 (2.92%) | Misses: 151448 (97.08%) + 153750 Gets/sec | Hits: 12906 (8.39%) | Misses: 140844 (91.61%) + 159250 Gets/sec | Hits: 21811 (13.70%) | Misses: 137439 (86.30%) + 151000 Gets/sec | Hits: 27615 (18.29%) | Misses: 123385 (81.71%) + 145000 Gets/sec | Hits: 32791 (22.61%) | Misses: 112209 (77.39%) + 157750 Gets/sec | Hits: 42178 (26.74%) | Misses: 115572 (73.26%) + 154500 Gets/sec | Hits: 47418 (30.69%) | Misses: 107082 (69.31%) + 151250 Gets/sec | Hits: 51636 (34.14%) | Misses: 99614 (65.86%) + +The program shows stats every second. As you see, in the first seconds +the cache starts to be populated. The misses rate later stabilizes into +the actual figure we can expect in the long time: + + 120750 Gets/sec | Hits: 48774 (40.39%) | Misses: 71976 (59.61%) + 122500 Gets/sec | Hits: 49052 (40.04%) | Misses: 73448 (59.96%) + 127000 Gets/sec | Hits: 50870 (40.06%) | Misses: 76130 (59.94%) + 124250 Gets/sec | Hits: 50147 (40.36%) | Misses: 74103 (59.64%) + +A miss rage of 59% may not be acceptable for our use case. So we know that +100MB of memory are no enough. Let's try with half gigabyte. After a few +minutes we'll see the output to stabilize to the following figures: + + 140000 Gets/sec | Hits: 135376 (96.70%) | Misses: 4624 (3.30%) + 141250 Gets/sec | Hits: 136523 (96.65%) | Misses: 4727 (3.35%) + 140250 Gets/sec | Hits: 135457 (96.58%) | Misses: 4793 (3.42%) + 140500 Gets/sec | Hits: 135947 (96.76%) | Misses: 4553 (3.24%) + +So we know that with 500MB we are going well enough for our number of +keys (10 millions) and distribution (80-20 style). + + From 86164b0e92018967034da7f7da2c7b8fca229b44 Mon Sep 17 00:00:00 2001 From: Itamar Haber Date: Fri, 8 Apr 2016 19:44:59 +0300 Subject: [PATCH 0603/2314] Edits and such Sorry for all the formatting changes. --- topics/rediscli.md | 311 +++++++++++++++++++++++---------------------- 1 file changed, 157 insertions(+), 154 deletions(-) diff --git a/topics/rediscli.md b/topics/rediscli.md index 4794494b03..11600682b0 100644 --- a/topics/rediscli.md +++ b/topics/rediscli.md @@ -1,20 +1,21 @@ -redis-cli, the Redis command line interface +redis-cli, the Redis command line interface === `redis-cli` is the Redis command line interface, a simple program that allows -to send commands to Redis, and read the replies sent by the server, directly from the terminal. +to send commands to Redis, and read the replies sent by the server, directly +from the terminal. It has two main modes: an interactive mode where there is a REPL (Read -Eval Print Loop) where the user types commands and get replies. And another +Eval Print Loop) where the user types commands and get replies; and another mode where the command is sent as arguments of `redis-cli`, executed, and printed on the standard output. -In interactive mode `redis-cli` has basic line editing capabilities to provide +In interactive mode, `redis-cli` has basic line editing capabilities to provide a good typing experience. However `redis-cli` is not just that. There are options you can use to launch the program in order to put it into special modes, so that `redis-cli` can -do definitely more complex tasks, like simulate a slave and print the +definitely do more complex tasks, like simulate a slave and print the replication stream it receives from the master, check the latency of a Redis server and show statistics or even an ASCII-art spectrogram of latency samples and frequencies, and many other things. @@ -30,58 +31,55 @@ with Redis once you know all the tricks of its command line interface. Command line usage === -To just run a command and have its reply printed on the standard output, -it is as simple as typing the command to execute as separated arguments -of `redis-cli`: +To just run a command and have its reply printed on the standard output is as +simple as typing the command to execute as separated arguments of `redis-cli`: $ redis-cli incr mycounter (integer) 7 The reply of the command is "7". Since Redis replies are typed (they can be strings, arrays, integers, NULL, errors and so forth), you see the type -of the reply between brackets. However that would be not exactly idea when -the output of `redis-cli` must be used as input of another command, or when +of the reply between brackets. However that would be not exactly a great idea +when the output of `redis-cli` must be used as input of another command, or when we want to redirect it into a file. -Actually `redis-cli` only shows additional informations which improve human -readability when it detects the standard output is a tty (a terminal basically). -Otherwise it will auto-enable the *raw output mode*, like in the following -example: +Actually `redis-cli` only shows additional information which improves +readability for humans when it detects the standard output is a tty (a terminal +basically). Otherwise it will auto-enable the *raw output mode*, like in the +following example: $ redis-cli incr mycounter > /tmp/output.txt $ cat /tmp/output.txt 8 -This time `(integer)` was omitted from the output since the cli detected +This time `(integer)` was omitted from the output since the CLI detected the output was no longer written to the terminal. You can force raw output even on the terminal with the `--raw` option: $ redis-cli --raw incr mycounter 9 -Similarly you can force human readable output when writing to a file or in +Similarly, you can force human readable output when writing to a file or in pipe to other commands by using `--no-raw`. ## Host, port, password and database By default `redis-cli` connects to the server at 127.0.0.1 port 6379. As you can guess, you can easily change this using command line options. -To specify a different host name, or IP address, use `-h`. In order to -set a different port, use `-p`. +To specify a different host name or an IP address, use `-h`. In order +to set a different port, use `-p`. $ redis-cli -h redis15.localnet.org -p 6390 ping PONG -If your instance is password protected, the `-a ` option -will authenticate for you without the need of typing an explicit -`AUTH` command: +If your instance is password protected, the `-a ` option will +preform authentication saving the need of explicitly using the `AUTH` command: $ redis-cli -a myUnguessablePazzzzzword123 ping PONG -Finally it's possible to send a command that operates on a different -database number (the default is to use zero), using the `-n ` -option: +Finally, it's possible to send a command that operates a on a database number +other than the default number zero by using the `-n ` option: $ redis-cli flushall OK @@ -94,27 +92,27 @@ option: ## Getting input from other programs -There are two ways you can use `redis-cli` in order to get the input -from other commands (from the standard input, basically). -One is to use as last argument the payload we read from *stdin*. -For example in order to set a Redis key to the content of the -file `/etc/services` if my computer, I can use the `-x` option: +There are two ways you can use `redis-cli` in order to get the input from other +commands (from the standard input, basically). One is to use as last argument +as the payload we read from *stdin*. For example, in order to set a Redis key +to the content of the file `/etc/services` if my computer, I can use the `-x` +option: $ redis-cli -x set foo < /etc/services OK $ redis-cli getrange foo 0 50 "#\n# Network services, Internet style\n#\n# Note that " -As you can see in the first line of the above session, the last argument -of the `SET` command was not specified. The arguments are just `SET foo` -without the actual value I want my key to be set. +As you can see in the first line of the above session, the last argument of the +`SET` command was not specified. The arguments are just `SET foo` without the +actual value I want my key to be set to. -Instead the `-x` option was specified, and a file was redirected to the -cli standard input. So the input was read, and was used as the final -argument for the command. This is useful for scripting. +Instead, the `-x` option was specified and a file was redirected to the CLI's +standard input. So the input was read, and was used as the final argument for +the command. This is useful for scripting. -A different approach is to feed `redis-cli` a sequence of commands -written in a text file: +A different approach is to feed `redis-cli` a sequence of commands written in a +text file: $ cat /tmp/commands.txt set foo 100 @@ -127,9 +125,9 @@ written in a text file: (integer) 6 "101xxx" -All the commands into `commands.txt` are executed one after the other -by redis-cli as if they were typed by the user interactive. Strings can -be quoted inside the file needed, so that it's possible to have single +All the commands in `commands.txt` are executed one after the other by +`redis-cli` as if they were typed by the user interactive. Strings can be +quoted inside the file if needed, so that it's possible to have single arguments with spaces or newlines or other special chars inside: $ cat /tmp/commands.txt @@ -163,7 +161,7 @@ ASAP: (integer) 5 To run the same command forever, use `-1` as count. -So for example in order to monitor over time the RSS memory size it's possible +So, in order to monitor over time the RSS memory size it's possible to use a command like the following: $ redis-cli -r -1 -i 1 INFO | grep rss_human @@ -175,32 +173,32 @@ to use a command like the following: ## Mass insertion of data using `redis-cli` Mass insert using `redis-cli` is covered in a separated page since it's a -worthwhile topic itself. Please refer to our [mass insertion guide](/topics/mass-insert). +worthwhile topic itself. Please refer to our +[mass insertion guide](/topics/mass-insert). ## CSV output -Sometimes you may want to use `redis-cli` in order to quickly export data -from Redis to an external program. This can be accomplished using the CSV -output feature: +Sometimes you may want to use `redis-cli` in order to quickly export data from +Redis to an external program. This can be accomplished using the CSV (Comma +Separated Values) output feature: $ redis-cli lpush mylist a b c d (integer) 4 $ redis-cli --csv lrange mylist 0 -1 "d","c","b","a" -Currently it's not possible to export the whole DB like that, but only -to run single commands with CSV output. +Currently it's not possible to export the whole DB like that, but only to run +single commands with CSV output. ## Running Lua scripts -The `redis-cli` has extensive support for using the new Lua debugging -facility of Lua scripting, available starting with Redis 3.2. However -for this feature, please refer to the -[Redis Lua debugger documentation](/topics/ldb). +The `redis-cli` has extensive support for using the new Lua debugging facility +of Lua scripting, available starting with Redis 3.2. For this feature, please +refer to the [Redis Lua debugger documentation](/topics/ldb). However, even without using the debugger, you can use `redis-cli` to run scripts from a file in a way more comfortable compared to typing -the script interactively into the shell or as an argument. +the script interactively into the shell or as an argument: $ cat /tmp/script.lua return redis.call('set',KEYS[1],ARGV[1]) @@ -209,14 +207,14 @@ the script interactively into the shell or as an argument. The Redis `EVAL` command takes the list of keys the script uses, and the other non key arguments, as different arrays. When calling `EVAL` you -provide the number of keys as a number. However with `redis-cli` by using +provide the number of keys as a number. However with `redis-cli` and using the `--eval` option above, there is no need to specify the number of keys explicitly. Instead it uses the convention of separating keys and arguments with a comma. This is why in the above call you see `foo , bar` as arguments. So `foo` will populate the `KEYS` array, and `bar` the `ARGV` array. -The `--eval` option ca be useful in order to write simple scripts. For more +The `--eval` option is useful when writing simple scripts. For more complex work, using the Lua debugger is definitely more comfortable. It's possible to mix the two approaches, since the debugger also uses executing scripts from an external file. @@ -229,12 +227,12 @@ This is very useful for scripts and certain types of testing, however most people will spend the majority of time in `redis-cli` using its interactive mode. -In interactive mode the user types Redis commands into a prompt. The command +In interactive mode the user types Redis commands at the prompt. The command is sent to the server, processed, and the reply is parsed back and rendered -in a simpler to read form. +into a simpler form to read. -In order to run the CLI in interactive mode, there is nothing special to do: -just lunch it without arguments and you are in: +Nothing special is needed for runung the CLI in interactive mode - +just lunch it without any arguments and you are in: $ redis-cli 127.0.0.1:6379> ping @@ -243,7 +241,7 @@ just lunch it without arguments and you are in: The string `127.0.0.1:6379>` is the prompt. It reminds you that you are connected to a given Redis instance. -The prompt changes as the server you are connected to, changes, or when you +The prompt changes as the server you are connected to changes, or when you are operating on a database different than the database number zero: 127.0.0.1:6379> select 2 @@ -257,7 +255,7 @@ are operating on a database different than the database number zero: ## Handling connections and reconnections -Using the `connect` command in interactive mode it's possible to connect +Using the `connect` command in interactive mode makes it possible to connect to a different instance, by specifying the *hostname* and *port* we want to connect to: @@ -267,7 +265,7 @@ to connect to: As you can see the prompt changes accordingly. If the user attempts to connect to an instance that is unreachable, the `redis-cli` goes into disconnected -more, and attempts to reconnect at each new command: +mode and attempts to reconnect with each new command: 127.0.0.1:6379> connect 127.0.0.1 9999 Could not connect to Redis at 127.0.0.1:9999: Connection refused @@ -276,7 +274,7 @@ more, and attempts to reconnect at each new command: not connected> ping Could not connect to Redis at 127.0.0.1:9999: Connection refused -In general after a disconnection is detected, the CLI always attempts to +Generally after a disconnection is detected, the CLI always attempts to reconnect transparently: if the attempt fails, it shows the error and enters the disconnected state. The following is an example of disconnection and reconnection: @@ -288,8 +286,8 @@ and reconnection: 127.0.0.1:6379> (now we are connected again) When a reconnection is performed, `redis-cli` automatically re-select the -latest database number selected. However all the other state about the -connection is lost, like for example, the state of a transaction if we +last database number selected. However, all the other state about the +connection is lost, such as the state of a transaction if we were in the middle of it: $ redis-cli @@ -297,30 +295,32 @@ were in the middle of it: OK 127.0.0.1:6379> ping QUEUED - + ( here the server is manually restarted ) 127.0.0.1:6379> exec (error) ERR EXEC without MULTI This is usually not an issue when using the CLI in interactive mode for -testing, but you want to make sure to know about this limitation. +testing, but you should be aware of this limitation. ## Editing, history and completion Because `redis-cli` uses the [linenoise line editing library](http://github.com/antirez/linenoise), it -always has line editing capabilities, without depending on `libreadline` or other -optional libraries. +always has line editing capabilities, without depending on `libreadline` or +other optional libraries. You can access an history of commands executed, in order to avoid retyping them again and again, by pressing the arrow keys (up and down). The history is preserved between restarts of the CLI, in a file called `.rediscli_history` inside the user home directory, as specified -by the `HOME` environment variable. +by the `HOME` environment variable. It is possible to use a different +history filename by setting the `REDISCLI_HISTFILE` environment variable, +and disable it by setting it to `/dev/null`. -The CLI is also able to perform command names completion by pressing the TAB key, -like in the following example: +The CLI is also able to perform command names completion by pressing the TAB +key, like in the following example: 127.0.0.1:6379> Z 127.0.0.1:6379> ZADD @@ -340,12 +340,15 @@ name by a number: ## Showing help about Redis commands -Redis has a number of commands and sometimes, as you test things, you may -not remember the exact order of arguments. `redis-cli` provides online help -for most Redis commands, using the `help` command. The command can be used +Redis has a number of [commands](/commands) and sometimes, as you test things, +you may not remember the exact order of arguments. `redis-cli` provides online +help for most Redis commands, using the `help` command. The command can be used in two forms: -* `help @` shows all the commands about a given category. Categories are `@generic`, `@list`, `@set`, `@sorted_set`, `@hash`, `@pubsub`, `@transactions`, `@connection`, `@server`, `@scripting`, `@hyperloglog`. +* `help @` shows all the commands about a given category. The +categories are: `@generic`, `@list`, `@set`, `@sorted_set`, `@hash`, +`@pubsub`, `@transactions`, `@connection`, `@server`, `@scripting`, +`@hyperloglog`. * `help ` shows specific help for the command given as argument. For example in order to show help for the `PFADD` command, use: @@ -364,7 +367,7 @@ Special modes of operation So far we saw two main modes of `redis-cli`. * Command line execution of Redis commands. -* Interactive "REPL alike" usage. +* Interactive "REPL-like" usage. However the CLI performs other auxiliary tasks related to Redis that are explained in the next sections: @@ -374,16 +377,16 @@ are explained in the next sections: * Key space scanner with pattern matching. * Acting as a [Pub/Sub](/topics/pubsub) client to subscribe to channels. * Monitoring the commands executed into a Redis instance. -* Checking the [latency](/topics/latency) of a Redis server, in different ways. +* Checking the [latency](/topics/latency) of a Redis server in different ways. * Checking the scheduler latency of the local computer. -* Transferring RDB backups from a remote Redis server to the local computer. -* Acting as a slave to show what a slave would receive. -* Simulating [LRU](/topics/lru-cache) workloads to show stats about keys hits. -* Working a as a client for the Lua debugger. +* Transferring RDB backups from a remote Redis server locally. +* Acting as a Redis slave for showing what a slave receives. +* Simulating [LRU](/topics/lru-cache) workloads for showing stats about keys hits. +* A client for the Lua debugger. ## Continuous stats mode -This is probably one of the less known features of `redis-cli`, and one +This is probably one of the lesser known features of `redis-cli`, and one very useful in order to minor Redis instances in real time. To enable this mode, the `--stat` option is used. The output is very clear about the behavior of the CLI in this mode: @@ -400,7 +403,7 @@ The output is very clear about the behavior of the CLI in this mode: 508 3.40M 51 0 408642 (+86927) 257 508 3.40M 51 0 497038 (+88396) 257 -In this mode a new line is printed every second with useful informations and +In this mode a new line is printed every second with useful information and the difference between the old data point. You can easily understand what's happening with memory usage, clients connected, and so forth. @@ -411,9 +414,9 @@ second. ## Scanning for big keys In this special mode, `redis-cli` works as a key space analyzer. It scans the -dataset for big keys, but also provides informations about the data types -the data set is composed of. This mode is enabled with the `--bigkeys` option, -and produces a quite verbose output: +dataset for big keys, but also provides information about the data types +that the data set consists of. This mode is enabled with the `--bigkeys` option, +and produces quite a verbose output: $ redis-cli --bigkeys @@ -442,13 +445,13 @@ and produces a quite verbose output: 0 zsets with 0 members (00.00% of keys, avg size 0.00) In the first part of the output, each new key larger than the previous larger -key (of the same type) encountered is reported. The summary section instead +key (of the same type) encountered is reported. The summary section provides general stats about the data inside the Redis instance. The program uses the `SCAN` command, so it can be executed against a busy -server without impacting the operations, however the `-i` option can be +server without impacting the operations, but the `-i` option can be used in order to throttle the scanning process of the specified fraction -of second for each 100 keys requested. For example `-i 0.1` will slow down +of second for each 100 keys requested. For example, `-i 0.1` will slow down the program execution a lot, but will also reduce the load on the server to a tiny amount. @@ -459,12 +462,12 @@ ASAP if running against a very large data set. ## Getting a list of keys It is also possible to scan the key space, again in a way that does not -block the Redis server (which happens if you use, instead, a command -like `KEYS *`), and print all the key names, or filtering by specific -patterns. This mode like the `--bigkeys` option uses the `SCAN` command, +block the Redis server (which does happen when you use a command +like `KEYS *`), and print all the key names, or filter them for specific +patterns. This mode, like the `--bigkeys` option, uses the `SCAN` command, so keys may be reported multiple times if the dataset is changing, but no -key should ever be missing, if the key was present since the start of the -iteration. Because of the command it uses this option is called `--scan`. +key would ever be missing, if that key was present since the start of the +iteration. Because of the command that it uses this option is called `--scan`. $ redis-cli --scan | head -10 key-419 @@ -478,11 +481,11 @@ iteration. Because of the command it uses this option is called `--scan`. key-446 key-371 -Note that `head -10` is used in order to print just the first lines of the +Note that `head -10` is used in order to print only the first lines of the output. Scanning is able to use the underlying pattern matching capability of -the `SCAN` command in order to provide a `--pattern` option. +the `SCAN` command with the `--pattern` option. $ redis-cli --scan --pattern '*-11*' key-114 @@ -497,7 +500,7 @@ the `SCAN` command in order to provide a `--pattern` option. key-110 key-116 -Using it in pipe with the `wc` command can be used to count specific +Piping the output through the `wc` command can be used to count specific kind of objects, by key name: $ redis-cli --scan --pattern 'user:*' | wc -l @@ -507,12 +510,12 @@ kind of objects, by key name: The CLI is able to publish messages in Redis Pub/Sub channels just using the `PUBLISH` command. This is expected since the `PUBLISH` command is very -similar to any other command. Different is the case of subscribing to channels -in order to receive messages, in this case there is to block and wait for -messages, so this is implemented as a special mode into `redis-cli`, however -the mode is not enabled by using a special option, but simply by using the -`SUBSCRIBE` or `PSUBSCRIBE` command, both in interactive or non interactive -mode: +similar to any other command. Subscribing to channels in order to receive +is different messages - in this case there is to block and wait for +messages, so this is implemented as a special mode in `redis-cli`. Unlike +other special modes this mode is not enabled by using a special option, +but simply by using the `SUBSCRIBE` or `PSUBSCRIBE` command, both in +interactive or non interactive mode: $ redis-cli psubscribe '*' Reading messages... (press Ctrl-C to quit) @@ -521,9 +524,9 @@ mode: 3) (integer) 1 The *reading messages* message shows that we entered Pub/Sub mode. -Now if another client publishes some message in some channel, like you +When another client publishes some message in some channel, like you can do using `redis-cli PUBLISH mychannel mymessage`, the CLI in Pub/Sub -mode will show something like that: +mode will show something such as: 1) "pmessage" 2) "*" @@ -544,47 +547,49 @@ by a Redis instance: 1460100081.165665 [0 127.0.0.1:51706] "set" "foo" "bar" 1460100083.053365 [0 127.0.0.1:51707] "get" "foo" +Note that it is possible to use to pipe the output, so you can monitor +for specific patterns using tools such as `grep`. + ## Monitoring the latency of Redis instances Redis is often used in contexts where latency is very critical. Latency involves multiple moving parts within the application, from the client library to the network stack, to the Redis instance itself. -The CLI has multiple facilities in order to study the latency of a Redis -instance, to understand what's the maximum and average latency, and the -distribution of latencies in the spectrum. +The CLI has multiple facilities for studying the latency of a Redis +instance and understanding the latency's maximum, average and distribution. The basic latency checking tool is the `--latency` option. Using this -option Redis runs a loop where the `PING` command is sent to the Redis +option the CLI runs a loop where the `PING` command is sent to the Redis instance, and the time to get a reply is measured. This happens 100 times per second, and stats are updated in a real time in the console: $ redis-cli --latency min: 0, max: 1, avg: 0.19 (427 samples) -The stats are provided in milliseconds. Usually the average latency of +The stats are provided in milliseconds. Usually, the average latency of a very fast instance tends to be overestimated a bit because of the latency due to the kernel scheduler of the system running `redis-cli` itself, so the average latency of 0.19 above may easily be 0.01 or less. -However this is usually not a big problem, since we are interested to see +However this is usually not a big problem, since we are interested in events of a few millisecond or more. Sometimes it is useful to study how the maximum and average latencies -evolve during time. For this goal the `--latency-history` option is used: -it works exactly like `--latency`, but each 15 second (by default) a new -sampling session is started, starting from zero: +evolve during time. The `--latency-history` option is used for that +purpose: it works exactly like `--latency`, but every 15 seconds (by +default) a new sampling session is started beginning with zero: $ redis-cli --latency-history min: 0, max: 1, avg: 0.14 (1314 samples) -- 15.01 seconds range min: 0, max: 1, avg: 0.18 (1299 samples) -- 15.00 seconds range min: 0, max: 1, avg: 0.20 (113 samples)^C -You can change the sampling sessions length with the `-i ` option. +You can change the sampling sessions' length with the `-i ` option. -Finally the most advanced latency study tool, but also a bit harder to +The most advanced latency study tool, but also a bit harder to interpret for non experienced users, is the ability to use color terminals -to show a spectrum of latencies. You'll see a colored output to indicate the -different percentages of samples, and different ASCII characters to indicate +to show a spectrum of latencies. You'll see a colored output that indicate the +different percentages of samples, and different ASCII characters that indicate different latency figures. This mode is enabled using the `--latency-dist` option: @@ -593,21 +598,21 @@ option: There is another pretty unusual latency tool implemented inside `redis-cli`. It does not check the latency of a Redis instance, but the latency of the -computer you are running `redis-cli` into. What latency you may ask? +computer you are running `redis-cli` on. What latency you may ask? The latency that's intrinsic to the kernel scheduler, the hypervisor in case of virtualized instances, and so forth. We call it *intrinsic latency* because it's opaque to the programmer, mostly. -If your Redis instance has a bad latency regardless of all the obvious things +If your Redis instance has bad latency regardless of all the obvious things that may be the source cause, it's worth to check what's the best your system -can do, by running `redis-cli` in this special mode directly in the system you -are running Redis servers into. +can do by running `redis-cli` in this special mode directly in the system you +are running Redis servers on. -By measuring the intrinsic latency, you know that this is the base line, -and Redis cannot do better than your system. In order to run the CLI -int this mode, use the `--intrinsic-latency `. The test time -is in seconds, and specify how many seconds `redis-cli` should check the -latency of the system it's currently running into. +By measuring the intrinsic latency, you know that this is the baseline, +and Redis cannot outdo your system. In order to run the CLI +in this mode, use the `--intrinsic-latency `. The test's time +is in seconds, and specifies how many seconds `redis-cli` should check the +latency of the system it's currently running on. $ ./redis-cli --intrinsic-latency 5 Max latency so far: 1 microseconds. @@ -624,9 +629,9 @@ latency of the system it's currently running into. 65433042 total runs (avg latency: 0.0764 microseconds / 764.14 nanoseconds per run). Worst run took 9671x longer than the average latency. -IMPORTANT: this command must be executed in the computer you want to run Redis -server into, not in another host. It does not even connect to a Redis instance, -it performs a test in the local computer. +IMPORTANT: this command must be executed on the computer you want to run Redis +server on, not on a different host. It does not even connect to a Redis instance +and performs the test only locally. In the above case, my system cannot do better than 739 microseconds of worst case latency, so I can expect certain queries to run in a bit less than 1 @@ -634,7 +639,7 @@ millisecond from time to time. ## Remote backups of RDB files -During Redis replication first synchronization, the master and the slave +During Redis replication's first synchronization, the master and the slave exchange the whole data set in form of an RDB file. This feature is exploited by `redis-cli` in order to provide a remote backup facility, that allows to transfer an RDB file from any Redis instance to the local computer running @@ -647,7 +652,7 @@ option: This is a simple but effective way to make sure you have disaster recovery RDB backups of your Redis instance. However when using this options in -scripts or cron jobs, make sure to check the return value of the command. +scripts or `cron` jobs, make sure to check the return value of the command. If it is non zero, an error occurred like in the following example: $ redis-cli --rdb /tmp/dump.rdb @@ -657,7 +662,7 @@ If it is non zero, an error occurred like in the following example: ## Slave mode -The slave mode of the CLI is an advanced feature useful for +The slave mode of the CLI is an advanced feature useful for Redis developers and for debugging operations. It allows to inspect what a master sends to its slaves in the replication stream in order to propagate the writes to its replicas. The option @@ -670,10 +675,10 @@ name is simply `--slave`. This is how it works: "SELECT","0" "set","foo","bar" "PING" - "incr","myconuter" + "incr","myconuter" -The command starts discarding the RDB file of the first synchronization, later -it logs each command received as in CSV format. +The command begins by discarding the RDB file of the first synchronization +and then logs each command received as in CSV format. If you think some of the commands are not replicated correctly in your slaves this is a good way to check what's happening, and also useful information @@ -684,27 +689,27 @@ in order to improve the bug report. Redis is often used as a cache with [LRU eviction](/topics/lru-cache). Depending on the number of keys and the amount of memory allocated for the cache (specified via the `maxmemory` directive), the amount of cache hits -and misses will change. Sometimes to simulate the rate of hits is very -useful in order to correctly provision your cache. +and misses will change. Sometimes, simulating the rate of hits is very +useful to correctly provision your cache. The CLI has a special mode where it performs a simulation of GET and SET -operations, using an 80-20% power law distribution in the requests pattern, -this means that 20% of keys will be requested 80% of times, which is a +operations, using an 80-20% power law distribution in the requests pattern. +This means that 20% of keys will be requested 80% of times, which is a common distribution in caching scenarios. -Technically given the distribution of the requests and the Redis memory -overhead, it could be possible to compute the hit rate analytically, just -with a mathematical formula. However Redis can be configured with different -LRU settings (number of samples), and implementations of LRU, which is -approximated in Redis, changes a lot between different versions. Similarly -the amount of memory per key may change between versions. This is why this -tool was built: the main motivation was to test the Redis LRU implementation -quality, but now is also useful in order to test how a given version behaves -with the settings you had in mind for your deployment. +Theoretically, given the distribution of the requests and the Redis memory +overhead, it should be possible to compute the hit rate analytically with +with a mathematical formula. However, Redis can be configured with +different LRU settings (number of samples) and LRU's implementation, which +is approximated in Redis, changes a lot between different versions. Similarly +the amount of memory per key may change between versions. That is why this +tool was built: its main was motivation for testing the quality of Redis' LRU +implementation, but now is also useful in for testing how a given version +behaves with the settings you had in mind for your deployment. -In order to test this mode, you need to specify the amount of keys to use -in the test. You also need to configure a `maxmemory` setting that as a first -guess you think makes sense as a first try. +In order to use this mode, you need to specify the amount of keys +in the test. You also need to configure a `maxmemory` setting that +makes sense as a first try. IMPORTANT NOTE: Configuring the `maxmemory` setting in the Redis configuration is crucial: if there is no cap to the maximum memory usage, the hit will @@ -749,5 +754,3 @@ minutes we'll see the output to stabilize to the following figures: So we know that with 500MB we are going well enough for our number of keys (10 millions) and distribution (80-20 style). - - From fd99c08cf95fccff82d0371ad8f688bb36132144 Mon Sep 17 00:00:00 2001 From: Itamar Haber Date: Sat, 9 Apr 2016 00:08:29 +0300 Subject: [PATCH 0604/2314] Applies proofing from @antirez --- topics/rediscli.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/topics/rediscli.md b/topics/rediscli.md index 11600682b0..0708b64122 100644 --- a/topics/rediscli.md +++ b/topics/rediscli.md @@ -94,7 +94,7 @@ other than the default number zero by using the `-n ` option: There are two ways you can use `redis-cli` in order to get the input from other commands (from the standard input, basically). One is to use as last argument -as the payload we read from *stdin*. For example, in order to set a Redis key +the payload we read from *stdin*. For example, in order to set a Redis key to the content of the file `/etc/services` if my computer, I can use the `-x` option: @@ -231,7 +231,7 @@ In interactive mode the user types Redis commands at the prompt. The command is sent to the server, processed, and the reply is parsed back and rendered into a simpler form to read. -Nothing special is needed for runung the CLI in interactive mode - +Nothing special is needed for running the CLI in interactive mode - just lunch it without any arguments and you are in: $ redis-cli @@ -449,7 +449,7 @@ key (of the same type) encountered is reported. The summary section provides general stats about the data inside the Redis instance. The program uses the `SCAN` command, so it can be executed against a busy -server without impacting the operations, but the `-i` option can be +server without impacting the operations, however the `-i` option can be used in order to throttle the scanning process of the specified fraction of second for each 100 keys requested. For example, `-i 0.1` will slow down the program execution a lot, but will also reduce the load on the server @@ -511,7 +511,7 @@ kind of objects, by key name: The CLI is able to publish messages in Redis Pub/Sub channels just using the `PUBLISH` command. This is expected since the `PUBLISH` command is very similar to any other command. Subscribing to channels in order to receive -is different messages - in this case there is to block and wait for +messages is different - in this case we need to block and wait for messages, so this is implemented as a special mode in `redis-cli`. Unlike other special modes this mode is not enabled by using a special option, but simply by using the `SUBSCRIBE` or `PSUBSCRIBE` command, both in @@ -703,7 +703,7 @@ with a mathematical formula. However, Redis can be configured with different LRU settings (number of samples) and LRU's implementation, which is approximated in Redis, changes a lot between different versions. Similarly the amount of memory per key may change between versions. That is why this -tool was built: its main was motivation for testing the quality of Redis' LRU +tool was built: its main motivation was for testing the quality of Redis' LRU implementation, but now is also useful in for testing how a given version behaves with the settings you had in mind for your deployment. From 5077e9be6f2a56cdd6db943d1a1db2c9d701c926 Mon Sep 17 00:00:00 2001 From: Itamar Haber Date: Sat, 9 Apr 2016 00:11:29 +0300 Subject: [PATCH 0605/2314] Scratches the zero --- topics/rediscli.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/rediscli.md b/topics/rediscli.md index 0708b64122..28bb0c57a0 100644 --- a/topics/rediscli.md +++ b/topics/rediscli.md @@ -577,7 +577,7 @@ events of a few millisecond or more. Sometimes it is useful to study how the maximum and average latencies evolve during time. The `--latency-history` option is used for that purpose: it works exactly like `--latency`, but every 15 seconds (by -default) a new sampling session is started beginning with zero: +default) a new sampling session is started from scratch: $ redis-cli --latency-history min: 0, max: 1, avg: 0.14 (1314 samples) -- 15.01 seconds range From 9d8c552f4d96ba436e06be63b6a514054ced918a Mon Sep 17 00:00:00 2001 From: Alexander Cheprasov Date: Sun, 10 Apr 2016 18:39:18 +0100 Subject: [PATCH 0606/2314] fixed parameter message for PING command --- commands.json | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/commands.json b/commands.json index 83ad0b55c3..56fe8162de 100644 --- a/commands.json +++ b/commands.json @@ -1676,6 +1676,13 @@ }, "PING": { "summary": "Ping the server", + "arguments": [ + { + "name": "message", + "type": "string", + "optional": true + } + ], "since": "1.0.0", "group": "connection" }, From 02e540c7d3e62a50a3d080fb5153274193cce175 Mon Sep 17 00:00:00 2001 From: Scott Byrns Date: Mon, 11 Apr 2016 01:46:47 -0600 Subject: [PATCH 0607/2314] Improving Zewo Redis description #709 --- clients.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clients.json b/clients.json index ce85e3af8e..578aa3582d 100644 --- a/clients.json +++ b/clients.json @@ -1242,7 +1242,7 @@ "name": "Redis", "language": "Swift", "repository": "https://github.com/Zewo/Redis", - "description": "Redis client for Swift", + "description": "Redis client for Swift. OpenSwift C7 Compliant, OS X and Linux compatible.", "authors": ["rabc"], "active": true }, From a1913c933bc07c56622aff8351564db4679f9277 Mon Sep 17 00:00:00 2001 From: Itamar Haber Date: Sat, 16 Apr 2016 00:33:18 +0300 Subject: [PATCH 0608/2314] Adds the `clear` REPL command --- topics/rediscli.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/topics/rediscli.md b/topics/rediscli.md index 28bb0c57a0..2bcabe2164 100644 --- a/topics/rediscli.md +++ b/topics/rediscli.md @@ -361,6 +361,10 @@ For example in order to show help for the `PFADD` command, use: Note that `help` supports TAB completion as well. +## Clearing the terminal screen + +Using the `clear` command in interactive mode clears the terminal's screen. + Special modes of operation === From 728abe06e33db5f5770c60b576d2ccee5f2b818f Mon Sep 17 00:00:00 2001 From: quiver Date: Tue, 19 Apr 2016 04:42:47 +0900 Subject: [PATCH 0609/2314] Fix typo topics/benchmarks (#712) --- topics/benchmarks.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/benchmarks.md b/topics/benchmarks.md index 2498e658e0..e1f034eb81 100644 --- a/topics/benchmarks.md +++ b/topics/benchmarks.md @@ -16,7 +16,7 @@ The following options are supported: -c Number of parallel connections (default 50) -n Total number of requests (default 100000) -d Data size of SET/GET value in bytes (default 2) - -dbnum SELECT the specified db number (default 0) + --dbnum SELECT the specified db number (default 0) -k 1=keep alive 0=reconnect (default 1) -r Use random keys for SET/GET/INCR, random values for SADD Using this option the benchmark will expand the string __rand_int__ From 0283beb3f43572a23685ee6a038771fb81a3f85e Mon Sep 17 00:00:00 2001 From: Damien Krotkine Date: Tue, 19 Apr 2016 17:41:21 +0200 Subject: [PATCH 0610/2314] Update Redis perl module maintainer twitter handle --- clients.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clients.json b/clients.json index 578aa3582d..4092fdd584 100644 --- a/clients.json +++ b/clients.json @@ -299,7 +299,7 @@ "url": "http://search.cpan.org/dist/Redis/", "repository": "https://github.com/PerlRedis/perl-redis", "description": "Perl binding for Redis database", - "authors": ["pedromelo"], + "authors": ["damsieboy"], "recommended": true, "active": true }, From 3feed63b02127ef47d619d2f98b6887ec1d58feb Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 22 Apr 2016 08:22:53 +0200 Subject: [PATCH 0611/2314] Add anti slavery message into SLAVEOF man page. --- commands/slaveof.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/commands/slaveof.md b/commands/slaveof.md index 80b7f28d98..26def63217 100644 --- a/commands/slaveof.md +++ b/commands/slaveof.md @@ -18,3 +18,7 @@ slave. @return @simple-string-reply + +**A note about slavery**: it's unfortunate that originally the master-slave terminology was picked for databases. When Redis was designed the existing terminology was used without much analysis of alternatives, however a **SLAVEOF NO ONE** command was added as a freedom message. Instead of changing the terminology, that would require breaking backward compatible in the API and `INFO` output, we want to use this page to remember you about slavery, **a crime against humanity now** but something that was perpetuated [throughout the whole human history](https://en.wikipedia.org/wiki/Slavery). + +*If slavery is not wrong, nothing is wrong.* -- Abraham Lincoln From af994c996872e986dd81c71835eeb1f19182ee46 Mon Sep 17 00:00:00 2001 From: Michael Denny Date: Fri, 22 Apr 2016 14:19:08 +0200 Subject: [PATCH 0612/2314] fix typo link topics/quickstart (#714) --- topics/admin.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/admin.md b/topics/admin.md index 42d5c95527..0fd8cfd4b6 100644 --- a/topics/admin.md +++ b/topics/admin.md @@ -16,7 +16,7 @@ Redis setup hints + Use `daemonize no` when run under daemontools. + Even if you have persistence disabled, Redis will need to perform RDB saves if you use replication, unless you use the new diskless replication feature, which is currently experimental. + If you are using replication, make sure that either your master has persistence enabled, or that it does not automatically restarts on crashes: slaves will try to be an exact copy of the master, so if a master restarts with an empty data set, slaves will be wiped as well. -+ By default Redis does not require **any authentication and listens to all the network interfaces**. This is a big security issue if you leave Redis exposed on the internet or other places where attackers can reach it. See for example [this attack](http://antirez.com/news/96) to see how dangerous it can be. Please check our [security page](/topics/security) and the [quick start](/topic/quickstart) for information about how to secure Redis. ++ By default Redis does not require **any authentication and listens to all the network interfaces**. This is a big security issue if you leave Redis exposed on the internet or other places where attackers can reach it. See for example [this attack](http://antirez.com/news/96) to see how dangerous it can be. Please check our [security page](/topics/security) and the [quick start](/topics/quickstart) for information about how to secure Redis. Running Redis on EC2 -------------------- From df71b933e3eadb38a2eee2e70b60e8bf6bdf2e24 Mon Sep 17 00:00:00 2001 From: Scott Pabin Date: Sat, 23 Apr 2016 22:29:36 -0700 Subject: [PATCH 0613/2314] Grammatical improvements --- topics/cluster-tutorial.md | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/topics/cluster-tutorial.md b/topics/cluster-tutorial.md index b129cd17a7..28d72a059a 100644 --- a/topics/cluster-tutorial.md +++ b/topics/cluster-tutorial.md @@ -213,7 +213,7 @@ and stops accepting writes. Redis Cluster configuration parameters === -We are about to create an example cluster deployment. Before to continue +We are about to create an example cluster deployment. Before we continue, let's introduce the configuration parameters that Redis Cluster introduces in the `redis.conf` file. Some will be obvious, others will be more clear as you continue reading. @@ -228,13 +228,13 @@ as you continue reading. Creating and using a Redis Cluster === -Note: to deploy a Redis Cluster manually is **very important to learn** certain -operation aspects of it. However if you want to get a cluster up and running -ASAP skip this section and the next one and go directly to **Creating a Redis Cluster using the create-cluster script**. +Note: to deploy a Redis Cluster manually it is **very important to learn** certain +operational aspects of it. However if you want to get a cluster up and running +ASAP (As Soon As Possible) skip this section and the next one and go directly to **Creating a Redis Cluster using the create-cluster script**. To create a cluster, the first thing we need is to have a few empty Redis instances running in **cluster mode**. This basically means that -clusters are not created using normal Redis instances, but a special mode +clusters are not created using normal Redis instances as a special mode needs to be configured so that the Redis instance will enable the Cluster specific features and commands. @@ -250,8 +250,8 @@ appendonly yes As you can see what enables the cluster mode is simply the `cluster-enabled` directive. Every instance also contains the path of a file where the -configuration for this node is stored, that by default is `nodes.conf`. -This file is never touched by humans, it is simply generated at startup +configuration for this node is stored, which by default is `nodes.conf`. +This file is never touched by humans; it is simply generated at startup by the Redis Cluster instances, and updated every time it is needed. Note that the **minimal cluster** that works as expected requires to contain @@ -325,9 +325,9 @@ to create the new cluster. Obviously the only setup with our requirements is to create a cluster with 3 masters and 3 slaves. -Redis-trib will propose you a configuration. Accept typing **yes**. -The cluster will be configured and *joined*, that means, instances will be -bootstrapped into talking with each other. Finally if everything went ok +Redis-trib will propose you a configuration. Accept the proposed configuration by typing **yes**. +The cluster will be configured and *joined*, which means, instances will be +bootstrapped into talking with each other. Finally, if everything went well, you'll see a message like that: [OK] All 16384 slots covered From 1b293c376a479f147edce53df840d768d6fac878 Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 4 May 2016 19:05:07 +0200 Subject: [PATCH 0614/2314] BITFIELD doc. --- commands.json | 40 +++++++++++++++++++++ commands/bitfield.md | 84 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 124 insertions(+) create mode 100644 commands/bitfield.md diff --git a/commands.json b/commands.json index 56fe8162de..15750a4183 100644 --- a/commands.json +++ b/commands.json @@ -53,6 +53,46 @@ "since": "2.6.0", "group": "string" }, + "BITFIELD": { + "summary": "Perform arbitrary bitfield integer operations on strings", + "complexity": "O(1) for each subcommand specified", + "arguments": [ + { + "name": "key", + "type": "key" + }, + { + "command": "GET", + "name": ["type", "offset"], + "type": ["type", "integer"], + "optional": true, + "multiple": true + }, + { + "command": "SET", + "name": ["type", "offset", "value"], + "type": ["type", "integer", "integer"], + "optional": true, + "multiple": true + }, + { + "command": "INCRBY", + "name": ["type", "offset", "increment"], + "type": ["type", "integer", "integer"], + "optional": true, + "multiple": true + }, + { + "command": "OVERFLOW", + "type": "enum", + "enum": ["WRAP", "SAT", "FAIL"], + "optional": true, + "multiple": true + } + ], + "since": "3.2.0", + "group": "string" + }, "BITOP": { "summary": "Perform bitwise operations between strings", "complexity": "O(N)", diff --git a/commands/bitfield.md b/commands/bitfield.md new file mode 100644 index 0000000000..b659db4051 --- /dev/null +++ b/commands/bitfield.md @@ -0,0 +1,84 @@ +The command treats a Redis string as a bit of arrays, and is capable of addressing specific integer fields of varying bit widths and arbitrary non (necessary) aligned offset. In practical terms using this command you can set, for example, a signed 5 bits integer at bit offset 1234 to a specific value, retrieve a 31 bit unsigned integer from offset 4567. Similarly the command handles increments and decrements of the specified integers, providing guaranteed and well specified overflow and underflow behavior that the user can configure. + +`BITFIELD` is able to operate with multiple bit fields in the same command call. It takes a list of operations to perform, and returns an array of replies, where each array matches the corresponding operation in the list of arguments. + +For example the following command increments an 8 bit signed integer at bit offset 100, and gets the value of the 4 bit unsigned integer at bit offset 0: + + > BITFIELD mykey INCRBY i5 100 1 GET u4 0 + 1) (integer) 1 + 2) (integer) 0 + +Note that: + +1. Addressing with `GET` bits outside the current string length (including the case the key does not exist at all), results in the operation to be performed like the missing part all consists of bits set to 0. +2. Addressing with `SET` or `INCRBY` bits outside the current string length will enlarge the string, zero-padding it, as needed, for the minimal length needed, according to the most far bit touched. + +## Supported subcommands and integer types + +The following is the list of supported commands. + +* **GET** `` `` -- Returns the specified bit field. +* **SET** `` `` `` -- Set the specified bit field and returns its old value. +* **INCRBY** `` `` `` -- Increments or decrements (if a negative increment is given) the specified bit field and returns the new value. + +There is another subcommand that only changes the behavior of successive +`INCRBY` subcommand calls by setting the overflow behavior: + +* **OVERFLOW** `[WRAP|SAT|FAIL]` + +Where an integer type is expected, it can be composed by prefixing with `i` for signed integers and `u` for unsigned integers with the number of bits of our integer type. So for example `u8` is an unsigned integer of 8 bits and `i16` is a +signed integer of 16 bits. + +The supported types are up to 64 bits for signed integers, and up to 63 bits for +unsigned integers. This limitation with unsigned integers is due to the fact +that currently the Redis protocol is unable to return 64 bit unsigned integers +as replies. + +## Overflow control + +Using the `OVERFLOW` command the user is able to fin-tune the behavior of +the increment or decrement overflow (or underflow) by specifying one of +the following behaviors: + +* **WRAP**: wrap around, both with signed and unsigned integers. In the case of unsigned integers to wrap is like to perform the operation modulo the maximum value the integer can contain (the C standard behavior). With signed integers instead the wrapping means that overflows restart towards the most negative value and underflows towards the most positive ones, so for example if an `i8` integer is set to the value 127, incrementing it by 1 will yield `-128`. +* **SAT**: uses saturation arithmetic, that is, on underflows the value is set to the minimum integer value, and on overflows to the maximum integer value. For example incrementing an `i8` integer starting from value 120 with an increment of 10, will result into the value 127, and further increments will always keep the value at 127. The same happens on underflows, but towards the value is blocked at the most negative value. +* **FAIL**: in this mode no operation is performed on overflows or underflows detected. The corresponding return value is set to NULL to signal the condition to the caller. + +Note that each `OVERFLOW` statement only affects the `INCRBY` commands +that follow it in the list of subcommands, up to the next `OVERFLOW` +statement. + +By default, **WRAP** is used if not otherwise specified. + + > BITFIELD mykey incrby u2 100 1 OVERFLOW SAT incrby u2 102 1 + 1) (integer) 1 + 2) (integer) 1 + > BITFIELD mykey incrby u2 100 1 OVERFLOW SAT incrby u2 102 1 + 1) (integer) 2 + 2) (integer) 2 + > BITFIELD mykey incrby u2 100 1 OVERFLOW SAT incrby u2 102 1 + 1) (integer) 3 + 2) (integer) 3 + > BITFIELD mykey incrby u2 100 1 OVERFLOW SAT incrby u2 102 1 + 1) (integer) 0 + 2) (integer) 3 + +## Return value + +The command returns an array with each entry being the corresponding result of +the sub command given at the same position. `OVERFLOW` subcommands don't count +as generating a reply. + +The following is an example of `OVERFLOW FAIL` returning NULL. + + > BITFIELD mykey OVERFLOW FAIL incrby u2 102 1 + 1) (nil) + +## Motivations + +The motivation for this command is that the ability to store many small integers +as a single large bitmap (or segmented over a few keys to avoid having huge keys) is extremely memory efficient, and opens new use cases for Redis to be applied, especially in the field of real time analytics. This use cases are supported by the ability to specify the overflow in a controlled way. + +## Performance considerations + +Usually `BITFIELD` is a fast command, however note that addressing far bits of currently short strings will trigger an allocation that may be more costly than executing the command on bits already existing. From 604d6025c536dc0af2fbf9bf6a5115a583f21a7a Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 4 May 2016 19:07:29 +0200 Subject: [PATCH 0615/2314] Hopefully better rendering of BITFIELD command synopsis. --- commands.json | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/commands.json b/commands.json index 15750a4183..825a2800fd 100644 --- a/commands.json +++ b/commands.json @@ -65,29 +65,25 @@ "command": "GET", "name": ["type", "offset"], "type": ["type", "integer"], - "optional": true, - "multiple": true + "optional": true }, { "command": "SET", "name": ["type", "offset", "value"], "type": ["type", "integer", "integer"], - "optional": true, - "multiple": true + "optional": true }, { "command": "INCRBY", "name": ["type", "offset", "increment"], "type": ["type", "integer", "integer"], - "optional": true, - "multiple": true + "optional": true }, { "command": "OVERFLOW", "type": "enum", "enum": ["WRAP", "SAT", "FAIL"], - "optional": true, - "multiple": true + "optional": true } ], "since": "3.2.0", From fa078bea3dbf2256bffde3d48874df36d6b0945b Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 6 May 2016 12:53:06 +0200 Subject: [PATCH 0616/2314] BITFIELD doc improved --- commands/bitfield.md | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/commands/bitfield.md b/commands/bitfield.md index b659db4051..7d34c607d4 100644 --- a/commands/bitfield.md +++ b/commands/bitfield.md @@ -34,6 +34,21 @@ unsigned integers. This limitation with unsigned integers is due to the fact that currently the Redis protocol is unable to return 64 bit unsigned integers as replies. +## Bits and positional offsets + +There are two ways in order to specify offsets in the bitfield command. +If a number without any prefix is specified, it is used just as a zero based +bit offset inside the string. + +However if the offset is prefixed with a `#` character, the specified offset +is multiplied by the integer type width, so for example: + + BITFIELD mystring SET i8 #0 100 i8 #1 200 + +Will set the first i8 integer at offset 0 and the second at offset 8. +This way you don't have to do the math yourself inside your client if what +you want is a plain array of integers of a given size. + ## Overflow control Using the `OVERFLOW` command the user is able to fin-tune the behavior of @@ -82,3 +97,18 @@ as a single large bitmap (or segmented over a few keys to avoid having huge keys ## Performance considerations Usually `BITFIELD` is a fast command, however note that addressing far bits of currently short strings will trigger an allocation that may be more costly than executing the command on bits already existing. + +## Orders of bits + +The representation used by `BITFIELD` considers the bitmap as having the +bit number 0 to be the most significant bit of the first byte, and so forth, so +for example setting a 5 bits unsigned integer to value 23 at offset 7 into a +bitmap previously set to all zeroes, will produce the following representation: + + +--------+--------+ + |00000001|01110000| + +--------+--------+ + +When offsets and integer sizes are aligned to bytes boundaries, this is the +same as big endian, however when such alignment does not exist, its important +to also understand how the bits inside a byte are ordered. From 40747787c1ab7e9cbd1a7c5504048cc20c896809 Mon Sep 17 00:00:00 2001 From: Tomasz Rekawek Date: Fri, 6 May 2016 14:28:50 +0200 Subject: [PATCH 0617/2314] Fix a minor error in the BITFIELD doc BITFIELD allows to treat a string as a "array of bits" rather than "bit of arrays". --- commands/bitfield.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands/bitfield.md b/commands/bitfield.md index 7d34c607d4..d67cfb79d6 100644 --- a/commands/bitfield.md +++ b/commands/bitfield.md @@ -1,4 +1,4 @@ -The command treats a Redis string as a bit of arrays, and is capable of addressing specific integer fields of varying bit widths and arbitrary non (necessary) aligned offset. In practical terms using this command you can set, for example, a signed 5 bits integer at bit offset 1234 to a specific value, retrieve a 31 bit unsigned integer from offset 4567. Similarly the command handles increments and decrements of the specified integers, providing guaranteed and well specified overflow and underflow behavior that the user can configure. +The command treats a Redis string as a array of bits, and is capable of addressing specific integer fields of varying bit widths and arbitrary non (necessary) aligned offset. In practical terms using this command you can set, for example, a signed 5 bits integer at bit offset 1234 to a specific value, retrieve a 31 bit unsigned integer from offset 4567. Similarly the command handles increments and decrements of the specified integers, providing guaranteed and well specified overflow and underflow behavior that the user can configure. `BITFIELD` is able to operate with multiple bit fields in the same command call. It takes a list of operations to perform, and returns an array of replies, where each array matches the corresponding operation in the list of arguments. From a0f35c25766582e6b88faa86b2d6353fb6b9dc43 Mon Sep 17 00:00:00 2001 From: Alexander Cheprasov Date: Sat, 7 May 2016 01:08:59 +0100 Subject: [PATCH 0618/2314] Fixed STORE and STOREDIST params for GEORADIUS and GEORADIUSBYMEMBER --- commands.json | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/commands.json b/commands.json index 825a2800fd..67afdba1bc 100644 --- a/commands.json +++ b/commands.json @@ -888,6 +888,18 @@ "type": "enum", "enum": ["ASC", "DESC"], "optional": true + }, + { + "command": "STORE", + "name": "key", + "type": "key", + "optional": true + }, + { + "command": "STOREDIST", + "name": "key", + "type": "key", + "optional": true } ], "group": "geo" @@ -942,6 +954,18 @@ "type": "enum", "enum": ["ASC", "DESC"], "optional": true + }, + { + "command": "STORE", + "name": "key", + "type": "key", + "optional": true + }, + { + "command": "STOREDIST", + "name": "key", + "type": "key", + "optional": true } ], "group": "geo" From f474aab3aeb84fa9317fd9e06c6976a79eee7f6d Mon Sep 17 00:00:00 2001 From: minus Date: Tue, 17 May 2016 21:02:57 +0200 Subject: [PATCH 0619/2314] Added version info for GEO* The commands should not show up as beta anymore. Also simplified the GEODEL note. --- commands.json | 6 ++++++ commands/geoadd.md | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/commands.json b/commands.json index 67afdba1bc..5eb3a19011 100644 --- a/commands.json +++ b/commands.json @@ -776,6 +776,7 @@ "multiple": true } ], + "since": "3.2.0", "group": "geo" }, "GEOHASH": { @@ -792,6 +793,7 @@ "multiple": true } ], + "since": "3.2.0", "group": "geo" }, "GEOPOS": { @@ -808,6 +810,7 @@ "multiple": true } ], + "since": "3.2.0", "group": "geo" }, "GEODIST": { @@ -832,6 +835,7 @@ "optional": true } ], + "since": "3.2.0", "group": "geo" }, "GEORADIUS": { @@ -902,6 +906,7 @@ "optional": true } ], + "since": "3.2.0", "group": "geo" }, "GEORADIUSBYMEMBER": { @@ -968,6 +973,7 @@ "optional": true } ], + "since": "3.2.0", "group": "geo" }, "GET": { diff --git a/commands/geoadd.md b/commands/geoadd.md index e7d42b8f47..e2a73fdd9e 100644 --- a/commands/geoadd.md +++ b/commands/geoadd.md @@ -11,7 +11,7 @@ limits, as specified by EPSG:900913 / EPSG:3785 / OSGEO:41001 are the following: The command will report an error when the user attempts to index coordinates outside the specified ranges. -**Note:** this command has no a symmetric **GEODEL** command simply because you can use `ZREM` in order to remove elements from the sorted set, and the Geo index structure is just a sorted set. +**Note:** there is no **GEODEL** command because you can use `ZREM` in order to remove elements. The Geo index structure is just a sorted set. How does it work? --- From 76773445022f12004b08d80b10cfcf544d7818e6 Mon Sep 17 00:00:00 2001 From: Itamar Haber Date: Fri, 3 Jun 2016 03:18:23 +0300 Subject: [PATCH 0620/2314] Adds spark-redis --- clients.json | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/clients.json b/clients.json index 4092fdd584..1c98ea7450 100644 --- a/clients.json +++ b/clients.json @@ -1379,5 +1379,15 @@ "description": "A minimalistic Redis client using modern Node.js.", "authors": ["djanowski"], "active": true + }, + + { + "name": "spark-redis", + "language": "Scala", + "repository": "https://github.com/redislabs/spark-redis", + "description": "A connector between Apache Spark and Redis.", + "authors": ["redislabs", "sunheehnus", "dvirsky"], + "active": true } + ] From 476cd7416d9708ee2679a9aa7d20df900477a95f Mon Sep 17 00:00:00 2001 From: antirez Date: Tue, 7 Jun 2016 18:54:42 +0200 Subject: [PATCH 0621/2314] Added a ZRANGEBYSCORE pattern --- commands/zrangebyscore.md | 44 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) diff --git a/commands/zrangebyscore.md b/commands/zrangebyscore.md index f872a32601..3cbc05723a 100644 --- a/commands/zrangebyscore.md +++ b/commands/zrangebyscore.md @@ -55,3 +55,47 @@ ZRANGEBYSCORE myzset 1 2 ZRANGEBYSCORE myzset (1 2 ZRANGEBYSCORE myzset (1 (2 ``` + +## Pattern: weighted random selection of an element + +Normally `ZRANGEBYSCORE` is simply used in order to get range of items +where the score is the indexed integer key, however it is possible to do less +obvious things with the command. + +For example a common problem when implementing Markov chains and other algorithms +is to select an element at random from a set, but different elements may have +different weights that change how likely it is they are picked. + +This is how we use this command in order to mount such an algorithm: + +Imagine you have elements A, B and C with weights 1, 2 and 3. +You compute the sum of the weights, which is 1+2+3 = 6 + +At this point you add all the elements into a sorted set using this algorithm: + +``` +SUM = ELEMENTS.TOTAL_WEIGHT // 6 in this case. +SCORE = 0 +FOREACH ELE in ELEMENTS + SCORE += ELE.weight / SUM + ZADD KEY SCORE ELE +END +``` + +This means that you set: + +``` +A to score 0.16 +B to score .5 +C to score 1 +``` + +Since this involves approximations, in order to avoid C is set to, +like, 0.998 instead of 1, we just modify the above algorithm to make sure +the last score is 1 (left as an exercise for the reader...). + +At this point, each time you want to get a weighted random element, +just compute a random number between 0 and 1 (which is like calling +`rand()` in most languages), so you can just do: + + RANDOM_ELE = ZRANGEBYSCORE key RAND() +inf LIMIT 0 1 From 0441bbafae50eca681bacf6ee46149aebc5bd63e Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Fri, 17 Jun 2016 10:45:37 +0200 Subject: [PATCH 0622/2314] New valid words: bitfield, underflows --- wordlist | 2 ++ 1 file changed, 2 insertions(+) diff --git a/wordlist b/wordlist index 321f73d590..5f2676ec0e 100644 --- a/wordlist +++ b/wordlist @@ -156,6 +156,7 @@ backtrace benchmarked benchmarking bgsave +bitfield bitop bitwise blazingly @@ -359,6 +360,7 @@ trib tuple tuples unary +underflows unencrypted unguessable unix From 43d13bf31f458ded5b36592eb686ea6a1e0f8440 Mon Sep 17 00:00:00 2001 From: eyjian Date: Fri, 17 Jun 2016 18:53:52 +0800 Subject: [PATCH 0623/2314] add r3c, a Redis Cluster C++ Client (#733) * add r3c, a Redis Cluster C++ Client * change twitter account --- clients.json | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/clients.json b/clients.json index 1c98ea7450..05cfc09da2 100644 --- a/clients.json +++ b/clients.json @@ -1388,6 +1388,15 @@ "description": "A connector between Apache Spark and Redis.", "authors": ["redislabs", "sunheehnus", "dvirsky"], "active": true + }, + + { + "name": "r3c", + "language": "C++", + "repository": "https://github.com/eyjian/r3c", + "description": "A Redis Cluster C++ Client, based on hiredis and support standalone, it's easy to make and use, not depends on C++11 or later.", + "authors": ["eyjian"], + "active": true } ] From 63010250409da1889f19c384f076e661bd42b76f Mon Sep 17 00:00:00 2001 From: Alexey Popravka Date: Sun, 29 May 2016 12:44:31 +0300 Subject: [PATCH 0624/2314] Note on RENAME command changes in 3.2 --- commands/rename.md | 5 +++-- commands/renamenx.md | 4 +++- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/commands/rename.md b/commands/rename.md index 86cb593175..a86f001c03 100644 --- a/commands/rename.md +++ b/commands/rename.md @@ -1,8 +1,9 @@ Renames `key` to `newkey`. -It returns an error when the source and destination names are the same, or when -`key` does not exist. +It returns an error when `key` does not exist. If `newkey` already exists it is overwritten, when this happens `RENAME` executes an implicit `DEL` operation, so if the deleted key contains a very big value it may cause high latency even if `RENAME` itself is usually a constant-time operation. +**Note:** Before Redis 3.2.0, an error is returned if source and destination names are the same. + @return @simple-string-reply diff --git a/commands/renamenx.md b/commands/renamenx.md index 4823887fa1..8fa6395b96 100644 --- a/commands/renamenx.md +++ b/commands/renamenx.md @@ -1,5 +1,7 @@ Renames `key` to `newkey` if `newkey` does not yet exist. -It returns an error under the same conditions as `RENAME`. +It returns an error when `key` does not exist. + +**Note:** Before Redis 3.2.0, an error is returned if source and destination names are the same. @return From 90ffc3476873f319048bcf105fe00290fd836f10 Mon Sep 17 00:00:00 2001 From: Jeff Fraser Date: Fri, 17 Jun 2016 10:44:43 -0400 Subject: [PATCH 0625/2314] Spelling and grammar edits in Overflow Control (#721) --- commands/bitfield.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/commands/bitfield.md b/commands/bitfield.md index d67cfb79d6..6e6fc28474 100644 --- a/commands/bitfield.md +++ b/commands/bitfield.md @@ -51,11 +51,11 @@ you want is a plain array of integers of a given size. ## Overflow control -Using the `OVERFLOW` command the user is able to fin-tune the behavior of +Using the `OVERFLOW` command the user is able to fine-tune the behavior of the increment or decrement overflow (or underflow) by specifying one of the following behaviors: -* **WRAP**: wrap around, both with signed and unsigned integers. In the case of unsigned integers to wrap is like to perform the operation modulo the maximum value the integer can contain (the C standard behavior). With signed integers instead the wrapping means that overflows restart towards the most negative value and underflows towards the most positive ones, so for example if an `i8` integer is set to the value 127, incrementing it by 1 will yield `-128`. +* **WRAP**: wrap around, both with signed and unsigned integers. In the case of unsigned integers, wrapping is like performing the operation modulo the maximum value the integer can contain (the C standard behavior). With signed integers instead wrapping means that overflows restart towards the most negative value and underflows towards the most positive ones, so for example if an `i8` integer is set to the value 127, incrementing it by 1 will yield `-128`. * **SAT**: uses saturation arithmetic, that is, on underflows the value is set to the minimum integer value, and on overflows to the maximum integer value. For example incrementing an `i8` integer starting from value 120 with an increment of 10, will result into the value 127, and further increments will always keep the value at 127. The same happens on underflows, but towards the value is blocked at the most negative value. * **FAIL**: in this mode no operation is performed on overflows or underflows detected. The corresponding return value is set to NULL to signal the condition to the caller. From 4b1a123650d68cc6963dad49710ace823440917d Mon Sep 17 00:00:00 2001 From: John Whitbeck Date: Fri, 17 Jun 2016 09:14:12 -0700 Subject: [PATCH 0626/2314] Add Java rdb-parser tool (#730) * Add Java rdb-parser tool * Remove Java rdb-parser author I don't have a twitter account. --- tools.json | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tools.json b/tools.json index 2c7b9cd81c..6f581be13f 100644 --- a/tools.json +++ b/tools.json @@ -569,5 +569,11 @@ "repository": "https://github.com/leegould/RedisExplorer", "description": "Windows desktop GUI client", "authors": ["leegould"] + }, + { + "name": "rdb-parser", + "language": "Java", + "repository": "https://github.com/jwhitbeck/java-rdb-parser", + "description": "A simple Redis RDB file parser for Java" } ] From b46b8c0cef0a5a014b063c8bca0eb2ddf8ce15f7 Mon Sep 17 00:00:00 2001 From: Arseniy Pavlenko Date: Sat, 18 Jun 2016 22:37:05 +0300 Subject: [PATCH 0627/2314] Small error in example (#719) * Small error in example 075 and 200 0 2 7 0 5 0 027050 not 027005 * Update indexes.md 001[00]1011 - 9 bit 011[00]1000 - 9 bit should be four zeroes and 18 bit result, no? 0001110011[0000]1010 * Right interlace is 000111000011001010 001[00]1011 011[00]1000 000111[0000]11001010 --- topics/indexes.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/topics/indexes.md b/topics/indexes.md index 0b58ef1f7c..ae2c4b1c6f 100644 --- a/topics/indexes.md +++ b/topics/indexes.md @@ -574,7 +574,7 @@ context of range queries. For example let's take the center of our blue box, which is at `x=75` and `y=200`. We can encode this number as we did earlier by interleaving the digits, obtaining: - 027005 + 027050 What happens if we substitute the last two digits respectively with 00 and 99? We obtain a range which is lexicographically continue: @@ -617,7 +617,7 @@ Our numbers in binary form, assuming we need just 9 bits for each variable So by interleaving digits, our representation in the index would be: - 0001110011001010:75:200 + 000111000011001010:75:200 Let's see what are our ranges as we substitute the last 2, 4, 6, 8, ... bits with 0s ad 1s in the interleaved representation: From 6efd290ad841b89a5e334d2fb2c8aa36f3a93a84 Mon Sep 17 00:00:00 2001 From: Arseniy Pavlenko Date: Tue, 21 Jun 2016 12:59:45 +0300 Subject: [PATCH 0628/2314] Should be in Node.js section not in Javascript (#735) --- clients.json | 34 ++++++++++++++++++---------------- 1 file changed, 18 insertions(+), 16 deletions(-) diff --git a/clients.json b/clients.json index 05cfc09da2..3ce8ccc3e3 100644 --- a/clients.json +++ b/clients.json @@ -720,6 +720,24 @@ "recommended": true, "active": true }, + + { + "name": "redis-fast-driver", + "language": "Node.js", + "repository": "https://github.com/h0x91b/redis-fast-driver", + "description": "Driver based on hiredis async lib, can do PUBSUB and MONITOR, simple and really fast, written with NaN so works fine with node >=0.8", + "authors": ["h0x91b"], + "active": true + }, + + { + "name": "fast-redis-cluster", + "language": "Node.js", + "repository": "https://github.com/h0x91b/fast-redis-cluster", + "description": "Simple and fast cluster driver with error handling, uses redis-fast-driver as main adapter and node_redis as backup for windows", + "authors": ["h0x91b"], + "active": true + }, { "name": "iodis", @@ -1194,22 +1212,6 @@ "active": true }, - { - "name": "redis-fast-driver", - "language": "Javascript", - "repository": "https://github.com/h0x91b/redis-fast-driver", - "description": "Driver based on hiredis async lib, can do PUBSUB and MONITOR, simple and really fast, written with NaN so works fine with node 0.8, 0.10 and 0.12", - "authors": [] - }, - - { - "name": "fast-redis-cluster", - "language": "Javascript", - "repository": "https://github.com/h0x91b/fast-redis-cluster", - "description": "Simple and fast cluster driver with error handling, uses redis-fast-driver as main adapter and node_redis as backup for windows", - "authors": [] - }, - { "name": "Redis::Cluster", "language": "Perl", From b484e4ab823c204bc747bfc185517680f23d1019 Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 22 Jun 2016 15:35:49 +0200 Subject: [PATCH 0629/2314] Clarify how expires are replicated. --- topics/replication.md | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/topics/replication.md b/topics/replication.md index 1237a2d432..7f18e27f2f 100644 --- a/topics/replication.md +++ b/topics/replication.md @@ -198,3 +198,23 @@ There are two configuration parameters for this feature: For more information, please check the example `redis.conf` file shipped with the Redis source distribution. + +How Redis replication deals with expires on keys +--- + +Redis expires allow keys to have a limited time to live. Such a feature depends +on the ability of an instance to count the time, however Redis slaves correctly +replicate keys with expires, even when such keys are altered using Lua +scripts. + +To implement such a feature Redis cannot rely on the ability of the master and +slave to have synchronized clocks, since this is a problem that cannot be solved +and would result into race conditions and diverging data sets, so Redis +uses three main techniques in order to make the replication of expired keys +able to work: + +1. Slaves don't expire keys, instead they wait for masters to expire the keys. When a master expires a key (or evict it because of LRU), it synthesizes a `DEL` command which is transmitted to all the slaves. +2. However because of master-driven expire, sometimes slaves may still have in memory keys that are already logically expired, since the master was not able to provide the `DEL` command in time. In order to deal with that the slave uses its logical clock in order to report that a key does not exist **only for read operations** that don't violate the consistency of the data set (as new commands from the master will arrive). In this way slaves avoid to report logically expired keys are still existing. In practical terms, an HTML fragments cache that uses slaves to scale will avoid returning items that are already older than the desired time to live. +3. During Lua scripts executions no keys expires are performed. As a Lua script runs, conceptually the time in the master is frozen, so that a given key will either exist or not for all the time the script runs. This prevents keys to expire in the middle of a script, and is needed in order to send the same script to the slave in a way that is guaranteed to have the same effects in the data set. + +As it is expected, once a slave is turned into a master because of a fail over, it start to expire keys in an independent way without requiring help from its old master. From 870bb60b601fda4406d7f93912f8936dfcad3d97 Mon Sep 17 00:00:00 2001 From: Damian Janowski Date: Wed, 22 Jun 2016 21:47:32 -0300 Subject: [PATCH 0630/2314] Add words. --- wordlist | 2 ++ 1 file changed, 2 insertions(+) diff --git a/wordlist b/wordlist index 5f2676ec0e..a996223e30 100644 --- a/wordlist +++ b/wordlist @@ -144,6 +144,7 @@ addr afterwards allkeys allocator +analytics antirez aof appendfsync @@ -172,6 +173,7 @@ checksum chrt cli cmsgpack +codename commandstats conf config From 3141df4ef1dc946253aa9c8b9d0be16f8230b7a2 Mon Sep 17 00:00:00 2001 From: Damian Janowski Date: Wed, 22 Jun 2016 22:16:24 -0300 Subject: [PATCH 0631/2314] Fix authors in clients.json. --- clients.json | 13 ++++++++----- makefile | 2 +- utils/clients.rb | 6 ++++-- 3 files changed, 13 insertions(+), 8 deletions(-) diff --git a/clients.json b/clients.json index 3ce8ccc3e3..c4bc3cb88a 100644 --- a/clients.json +++ b/clients.json @@ -720,7 +720,7 @@ "recommended": true, "active": true }, - + { "name": "redis-fast-driver", "language": "Node.js", @@ -914,7 +914,8 @@ "name": "redis-client", "language": "Rust", "repository": "https://github.com/AsoSunag/redis-client", - "description": "A Redis client library for Rust." + "description": "A Redis client library for Rust.", + "authors": [] }, { @@ -1012,6 +1013,7 @@ "language": "OCaml", "repository": "https://github.com/0xffea/ocaml-redis", "description": "Synchronous and asynchronous (via Lwt) Redis client library in OCaml. Provides implementation of cache and mutex helpers.", + "authors": [], "active": true }, { @@ -1027,7 +1029,8 @@ "name": "finagle-redis", "language": "Scala", "repository": "https://github.com/twitter/finagle/tree/develop/finagle-redis", - "description": "Redis client based on Finagle" + "description": "Redis client based on Finagle", + "authors": [] }, { @@ -1356,7 +1359,7 @@ "authors": ["danieleteti"], "active": true }, - + { "name": "eredis", "language": "C", @@ -1373,7 +1376,7 @@ "description": "High-performance Erlang client for the Redis key-value store (NIF wrapping the hiredis C client).", "authors": ["funbox_team"] }, - + { "name": "yoredis", "language": "Node.js", diff --git a/makefile b/makefile index 44ff6e5745..01f3251b4f 100644 --- a/makefile +++ b/makefile @@ -1,5 +1,5 @@ MD_FILES:=$(shell find commands -name '*.md') -JSON_FILES:=$(shell find commands -name '*.json') +JSON_FILES:=$(shell find . -name '*.json') TEXT_FILES:=$(patsubst %.md,tmp/%.txt,$(MD_FILES)) SPELL_FILES:=$(patsubst %.txt,%.spell,$(TEXT_FILES)) diff --git a/utils/clients.rb b/utils/clients.rb index 858e20a072..6832ea76f1 100644 --- a/utils/clients.rb +++ b/utils/clients.rb @@ -45,8 +45,10 @@ def check check_url(@client[:repository]) end - Array(@client[:authors]).each do |author| - check_author(author) + if assert(@client[:authors], "authors is null") + @client[:authors].each do |author| + check_author(author) + end end end From 4775c0d2d13a434b6e10cbd40297dca0bcbf4414 Mon Sep 17 00:00:00 2001 From: Damian Janowski Date: Wed, 22 Jun 2016 22:19:55 -0300 Subject: [PATCH 0632/2314] Fix tools.json. --- tools.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools.json b/tools.json index 6f581be13f..d55d543440 100644 --- a/tools.json +++ b/tools.json @@ -574,6 +574,7 @@ "name": "rdb-parser", "language": "Java", "repository": "https://github.com/jwhitbeck/java-rdb-parser", - "description": "A simple Redis RDB file parser for Java" + "description": "A simple Redis RDB file parser for Java", + "authors": [] } ] From 5958f9dc7d30e1275bcd2c6e1fbcd10556941c1f Mon Sep 17 00:00:00 2001 From: minus Date: Thu, 30 Jun 2016 18:17:59 +0200 Subject: [PATCH 0633/2314] Updated Nim client info (#738) --- clients.json | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/clients.json b/clients.json index c4bc3cb88a..a1676ad569 100644 --- a/clients.json +++ b/clients.json @@ -957,8 +957,7 @@ { "name": "redis", "language": "Nim", - "repository": "https://github.com/nim-lang/Nim/blob/devel/lib/pure/redis.nim", - "url": "http://nim-lang.org/docs/redis.html", + "repository": "https://github.com/nim-lang/redis", "description": "Redis client for Nim", "authors": [], "active": true From b65c1cd84d2db8856846adacfbc4cf823cdc5ea6 Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 6 Jul 2016 15:41:38 +0200 Subject: [PATCH 0634/2314] Protected mode explained in the security page. --- topics/security.md | 28 +++++++++++++++++++++------- 1 file changed, 21 insertions(+), 7 deletions(-) diff --git a/topics/security.md b/topics/security.md index 851017bf2d..b058f3da34 100644 --- a/topics/security.md +++ b/topics/security.md @@ -51,8 +51,25 @@ like the following to the **redis.conf** file: bind 127.0.0.1 Failing to protect the Redis port from the outside can have a big security -impact because of the nature of Redis. For instance, a single **FLUSHALL** command -can be used by an external attacker to delete the whole data set. +impact because of the nature of Redis. For instance, a single **FLUSHALL** command can be used by an external attacker to delete the whole data set. + +Protected mode +--- + +Unfortunately many users fail to protect Redis instances from being accessed +from external networks. Many instances are simply left exposed on the +internet with public IPs. For this reasons since version 3.2.0, when Redis is +executed with the default configuration (binding all the interfaces) and +without any password in order to access it, it enters a special mode called +**proteced mode**. In this mode Redis only replies to queries from the +loopback interfaces, and reply to other clients connecting from other +addresses with an error, explaining what is happening and how to configure +Redis properly. + +We expect protected mode to seriously decrease the security issues caused +by unprotected Redis instances executed without proper administration, however +the system administrator can still ignore the error given by Redis and +just disable protected mode or manually bind all the interfaces. Authentication feature --- @@ -106,9 +123,7 @@ inside the redis.conf configuration file. For example: rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 -In the above example, the **CONFIG** command was renamed into an unguessable name. -It is also possible to completely disable it (or any other command) by renaming it -to the empty string, like in the following example: +In the above example, the **CONFIG** command was renamed into an unguessable name. It is also possible to completely disable it (or any other command) by renaming it to the empty string, like in the following example: rename-command CONFIG "" @@ -142,8 +157,7 @@ The protocol uses prefixed-length strings and is completely binary safe. Lua scripts executed by the **EVAL** and **EVALSHA** commands follow the same rules, and thus those commands are also safe. -While it would be a very strange use case, the application should avoid composing -the body of the Lua script using strings obtained from untrusted sources. +While it would be a very strange use case, the application should avoid composing the body of the Lua script using strings obtained from untrusted sources. Code security --- From cf88e9534763040012f6f7501fa04a4d0ff4f2cb Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 7 Jul 2016 14:14:20 +0200 Subject: [PATCH 0635/2314] Make clear that [H]INCRBYFLOAT is also good at decrementing. --- commands/hincrbyfloat.md | 4 +++- commands/incrbyfloat.md | 5 ++++- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/commands/hincrbyfloat.md b/commands/hincrbyfloat.md index f5cacc3a96..d6eb472597 100644 --- a/commands/hincrbyfloat.md +++ b/commands/hincrbyfloat.md @@ -1,5 +1,6 @@ Increment the specified `field` of a hash stored at `key`, and representing a -floating point number, by the specified `increment`. +floating point number, by the specified `increment`. If the increment value +is negative, the result is to have the hash field value **decremented** instead of incremented. If the field does not exist, it is set to `0` before performing the operation. An error is returned if one of the following conditions occur: @@ -20,6 +21,7 @@ information. ```cli HSET mykey field 10.50 HINCRBYFLOAT mykey field 0.1 +HINCRBYFLOAT mykey field -5 HSET mykey field 5.0e3 HINCRBYFLOAT mykey field 2.0e2 ``` diff --git a/commands/incrbyfloat.md b/commands/incrbyfloat.md index 6f41d77b2a..9efca1d9f7 100644 --- a/commands/incrbyfloat.md +++ b/commands/incrbyfloat.md @@ -1,5 +1,7 @@ Increment the string representing a floating point number stored at `key` by the -specified `increment`. +specified `increment`. By using a negative `increment` value, the result is +that the value stored at the key is decremented (by the obvious properties +of addition). If the key does not exist, it is set to `0` before performing the operation. An error is returned if one of the following conditions occur: @@ -30,6 +32,7 @@ regardless of the actual internal precision of the computation. ```cli SET mykey 10.50 INCRBYFLOAT mykey 0.1 +INCRBYFLOAT mykey -5 SET mykey 5.0e3 INCRBYFLOAT mykey 2.0e2 ``` From 80129ef3a0fc0e48aeda73f9c9b790cf7bdb2636 Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 7 Jul 2016 15:56:21 +0200 Subject: [PATCH 0636/2314] Document that MONITOR does not show everything. --- commands/monitor.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/commands/monitor.md b/commands/monitor.md index 2fb465129c..6f10a78a7e 100644 --- a/commands/monitor.md +++ b/commands/monitor.md @@ -40,6 +40,11 @@ Connection closed by foreign host. Manually issue the `QUIT` command to stop a `MONITOR` stream running via `telnet`. +## Commands not logged by MONITOR + +For security concerns, certain special administration commands like `CONFIG` +are not logged into the `MONITOR` output. + ## Cost of running `MONITOR` Because `MONITOR` streams back **all** commands, its use comes at a cost. From d299bbadb949b240cccc9fc360d85a305e228305 Mon Sep 17 00:00:00 2001 From: Vivek Agarwal Date: Fri, 15 Jul 2016 21:51:28 +0530 Subject: [PATCH 0637/2314] Fix a typo --- topics/security.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/security.md b/topics/security.md index b058f3da34..7bc153a0d3 100644 --- a/topics/security.md +++ b/topics/security.md @@ -61,7 +61,7 @@ from external networks. Many instances are simply left exposed on the internet with public IPs. For this reasons since version 3.2.0, when Redis is executed with the default configuration (binding all the interfaces) and without any password in order to access it, it enters a special mode called -**proteced mode**. In this mode Redis only replies to queries from the +**protected mode**. In this mode Redis only replies to queries from the loopback interfaces, and reply to other clients connecting from other addresses with an error, explaining what is happening and how to configure Redis properly. From 8aa4a38f9b88c6a89ce99287ffcbf38f8bd40b28 Mon Sep 17 00:00:00 2001 From: Itamar Haber Date: Sat, 23 Jul 2016 11:42:28 -0700 Subject: [PATCH 0638/2314] Documents the option --- topics/rediscli.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/topics/rediscli.md b/topics/rediscli.md index 2bcabe2164..255879a334 100644 --- a/topics/rediscli.md +++ b/topics/rediscli.md @@ -90,6 +90,12 @@ other than the default number zero by using the `-n ` option: $ redis-cli -n 2 incr a (integer) 1 +Some or all of this information can also be provided by using the `-u ` +option and a valid URI: + + $ redis-cli -u redis://p%40ssw0rd@redis-16379.hosted.com:16379/0 ping + PONG + ## Getting input from other programs There are two ways you can use `redis-cli` in order to get the input from other From ca9f45dbc5cceed73ed42be07232fc0107a98ba7 Mon Sep 17 00:00:00 2001 From: Simon Ninon Date: Mon, 25 Jul 2016 12:56:57 +0200 Subject: [PATCH 0639/2314] update cpp_redis description (#744) --- clients.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clients.json b/clients.json index a1676ad569..62fd691c5e 100644 --- a/clients.json +++ b/clients.json @@ -1300,7 +1300,7 @@ "name": "cpp_redis", "language": "C++", "repository": "https://github.com/cylix/cpp_redis", - "description": "Modern C++11 Redis client based on boost::asio", + "description": "C++11 Redis client: async, thread-safe, no dependencies, pipelining.", "authors": ["simon_ninon"], "active": true }, From 9c8c9c7219b0cd55e34252df7998d0dc87557e72 Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 28 Jul 2016 13:22:44 +0200 Subject: [PATCH 0640/2314] Replication doc updated. slave-announce-* options documented as well. --- topics/replication.md | 75 ++++++++++++++++++++++++++----------------- 1 file changed, 46 insertions(+), 29 deletions(-) diff --git a/topics/replication.md b/topics/replication.md index 7f18e27f2f..b0939846ab 100644 --- a/topics/replication.md +++ b/topics/replication.md @@ -7,40 +7,38 @@ master servers. The following are some very important facts about Redis replication: * Redis uses asynchronous replication. Starting with Redis 2.8, -however, slaves will periodically acknowledge the amount of data +however, slaves periodically acknowledge the amount of data processed from the replication stream. * A master can have multiple slaves. * Slaves are able to accept connections from other slaves. Aside from connecting a number of slaves to the same master, slaves can also be -connected to other slaves in a graph-like structure. +connected to other slaves in a cascading-like structure. * Redis replication is non-blocking on the master side. This means that the master will continue to handle queries when one or more slaves perform the initial synchronization. -* Replication is also non-blocking on the slave side. While the slave is performing -the initial synchronization, it can handle queries using the old version of +* Replication is also non-blocking on the slave side. While the slave is performing the initial synchronization, it can handle queries using the old version of the dataset, assuming you configured Redis to do so in redis.conf. Otherwise, you can configure Redis slaves to return an error to clients if the replication stream is down. However, after the initial sync, the old dataset must be deleted and the new one must be loaded. The slave will block incoming -connections during this brief window. +connections during this brief window (that can be as long as many seconds for very large datasets). * Replication can be used both for scalability, in order to have -multiple slaves for read-only queries (for example, heavy `SORT` +multiple slaves for read-only queries (for example, slow O(N) operations can be offloaded to slaves), or simply for data redundancy. -* It is possible to use replication to avoid the cost of having the master write the full dataset to disk: just configure your master `redis.conf` to avoid saving (just comment all the "save" directives), then connect a slave configured to save from time to time. However in this setup make sure masters don't restart automatically (please read the next section for more information). +* It is possible to use replication to avoid the cost of having the master write the full dataset to disk: a typical technique involves configuring your master `redis.conf` to avoid persisting to disk at all, then connect a slave configured to save from time to time, or with AOF enabled. However this setup must be handled with care, since a restarting master will start with an empty dataset: if the slave tries to synchronized with it, the slave will be emptied as well. Safety of replication when master has persistence turned off --- In setups where Redis replication is used, it is strongly advised to have persistence turned on in the master, or when this is not possible, for example -because of latency concerns, instances should be configured to **avoid restarting -automatically**. +because of latency concerns, instances should be configured to **avoid restarting automatically** after a reboot. To better understand why masters with persistence turned off configured to auto restart are dangerous, check the following failure mode where data @@ -58,14 +56,16 @@ Every time data safety is important, and replication is used with master configu How Redis replication works --- -If you set up a slave, upon connection it sends a SYNC command. It doesn't -matter if it's the first time it has connected or if it's a reconnection. +If you set up a slave, upon connection it sends a PSYNC command. -The master then starts background saving, and starts to buffer all new -commands received that will modify the dataset. When the background +If this is a reconnection and the master has enough *backlog*, only the difference (what the slave missed) is sent. Otherwise what is called a *full resynchronization* is triggered. + +When a full resynchronization is triggered, the master starts a background +saving process in order to produce an RDB file. At the same time it starts to +buffer all new write commands received from the clients. When the background saving is complete, the master transfers the database file to the slave, which saves it on disk, and then loads it into memory. The master will -then send to the slave all buffered commands. This is done as a +then send all buffered commands to the slave. This is done as a stream of commands and is in the same format of the Redis protocol itself. You can try it yourself via telnet. Connect to the Redis port while the @@ -73,15 +73,11 @@ server is doing some work and issue the `SYNC` command. You'll see a bulk transfer and then every command received by the master will be re-issued in the telnet session. -Slaves are able to automatically reconnect when the master <-> -slave link goes down for some reason. If the master receives multiple +Slaves are able to automatically reconnect when the master-slave +link goes down for some reason. If the master receives multiple concurrent slave synchronization requests, it performs a single background save in order to serve all of them. -When a master and a slave reconnects after the link went down, a full resync -is always performed. However, starting with Redis 2.8, a partial resynchronization -is also possible. - Partial resynchronization --- @@ -91,16 +87,15 @@ replication link went down. This works by creating an in-memory backlog of the replication stream on the master side. The master and all the slaves agree on a *replication -offset* and a *master run id*, so when the link goes down, the slave will +offset* and a *master run ID*, so when the link goes down, the slave will reconnect and ask the master to continue the replication. Assuming the -master run id is still the same, and that the offset specified is available +master run ID is still the same, and that the offset specified is available in the replication backlog, replication will resume from the point where it left off. If either of these conditions are unmet, a full resynchronization is performed -(which is the normal pre-2.8 behavior). As the run id of the connected master is not -persisted to disk, a full resynchronization is needed when the slave restarts. +(which is the normal pre-2.8 behavior). As the run ID of the connected master is not persisted to disk, a full resynchronization is needed when the slave restarts. The new partial resynchronization feature uses the `PSYNC` command internally, -while the old implementation uses the `SYNC` command. Note that a Redis 2.8 +while the old implementation uses the `SYNC` command. Note that a Redis slave is able to detect if the server it is talking with does not support `PSYNC`, and will use `SYNC` instead. @@ -111,12 +106,10 @@ Normally a full resynchronization requires to create an RDB file on disk, then reload the same RDB from disk in order to feed the slaves with the data. With slow disks this can be a very stressing operation for the master. -Redis version 2.8.18 will be the first version to have experimental support -for diskless replication. In this setup the child process directly sends the +Redis version 2.8.18 is the first version to have support for diskless +replication. In this setup the child process directly sends the RDB over the wire to slaves, without using the disk as intermediate storage. -The feature is currently considered experimental. - Configuration --- @@ -218,3 +211,27 @@ able to work: 3. During Lua scripts executions no keys expires are performed. As a Lua script runs, conceptually the time in the master is frozen, so that a given key will either exist or not for all the time the script runs. This prevents keys to expire in the middle of a script, and is needed in order to send the same script to the slave in a way that is guaranteed to have the same effects in the data set. As it is expected, once a slave is turned into a master because of a fail over, it start to expire keys in an independent way without requiring help from its old master. + +Configuring replication in Docker and NAT +--- + +When Docker, or other types of containers using port forwarding, or Network Address Translation is used, Redis replication needs some extra care, especially when using Redis Sentinel or other systems where the master `INFO` or `ROLE` commands output are scanned in order to discover slaves addresses. + +The problem is that the `ROLE` command, and the replication section of +the `INFO` output, when issued into a master instance, will show slaves +as having the IP address they use to connect to the master, which, in +environments using NAT may be different compared to the logical address of the +slave instance (the one that clients should use to connect to slaves). + +Similarly the slaves will be listed with the listening port configured +into `redis.conf`, that may be different than the forwarded port in case +the port is remapped. + +In order to fix both issues, it is possible, since Redis 3.2.2, to force +a slave to announce an arbitrary pair of IP and port to the master. +The two configurations directives to use are: + + slave-announce-ip 5.5.5.5 + slave-announce-port 1234 + +And are documented in the example `redis.conf` of recent Redis distributions. From d82506dae58fcce5982edfbece8106ef2bdcce62 Mon Sep 17 00:00:00 2001 From: Dan Sullivan Date: Mon, 8 Aug 2016 15:38:29 -0400 Subject: [PATCH 0641/2314] Update mass-insert.md (#748) Small update to grammar. --- topics/mass-insert.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/mass-insert.md b/topics/mass-insert.md index 38d6abcedf..9f0c31f950 100644 --- a/topics/mass-insert.md +++ b/topics/mass-insert.md @@ -1,7 +1,7 @@ Redis Mass Insertion === -Sometimes Redis instances needs to be loaded with big amount of preexisting +Sometimes Redis instances need to be loaded with a big amount of preexisting or user generated data in a short amount of time, so that millions of keys will be created as fast as possible. From 040e7d594e57040872f19fc548026dbdafb1d56b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Usowski?= Date: Fri, 19 Aug 2016 00:21:56 +0200 Subject: [PATCH 0642/2314] Change 'think at' phrases to proper ones --- commands/expire.md | 4 ++-- commands/shutdown.md | 2 +- topics/internals-vm.md | 2 +- topics/replication.md | 4 +++- 4 files changed, 7 insertions(+), 5 deletions(-) diff --git a/commands/expire.md b/commands/expire.md index 192ffb6860..1249c742d2 100644 --- a/commands/expire.md +++ b/commands/expire.md @@ -60,8 +60,8 @@ TTL mykey Imagine you have a web service and you are interested in the latest N pages _recently_ visited by your users, such that each adjacent page view was not performed more than 60 seconds after the previous. -Conceptually you may think at this set of page views as a _Navigation session_ -if your user, that may contain interesting information about what kind of +Conceptually you may consider this set of page views as a _Navigation session_ +of your user, that may contain interesting information about what kind of products he or she is looking for currently, so that you can recommend related products. diff --git a/commands/shutdown.md b/commands/shutdown.md index 925c43fb0d..db0e0a2a8e 100644 --- a/commands/shutdown.md +++ b/commands/shutdown.md @@ -25,7 +25,7 @@ Specifically: configured. * **SHUTDOWN NOSAVE** will prevent a DB saving operation even if one or more save points are configured. - (You can think at this variant as an hypothetical **ABORT** command that just + (You can think of this variant as an hypothetical **ABORT** command that just stops the server). @return diff --git a/topics/internals-vm.md b/topics/internals-vm.md index 5e96d095b5..b327195f6a 100644 --- a/topics/internals-vm.md +++ b/topics/internals-vm.md @@ -242,7 +242,7 @@ This is what we do: * If we detect that at least a key in the requested command is swapped on disk, we block the client instead of really issuing the command. For every swapped value associated to a requested key, an I/O job is created, in order to bring the values back in memory. The main thread continues the execution of the event loop, without caring about the blocked client. * In the meanwhile, I/O threads are loading values in memory. Every time an I/O thread finished loading a value, it sends a byte to the main thread using an UNIX pipe. The pipe file descriptor has a readable event associated in the main thread event loop, that is the function `vmThreadedIOCompletedJob`. If this function detects that all the values needed for a blocked client were loaded, the client is restarted and the original command called. -So you can think at this as a blocked VM that almost always happen to have the right keys in memory, since we pause clients that are going to issue commands about swapped out values until this values are loaded. +So you can think of this as a blocked VM that almost always happen to have the right keys in memory, since we pause clients that are going to issue commands about swapped out values until this values are loaded. If the function checking what argument is a key fails in some way, there is no problem: the lookup function will see that a given key is associated to a swapped out value and will block loading it. So our non blocking VM reverts to a blocking one when it is not possible to anticipate what keys are touched. diff --git a/topics/replication.md b/topics/replication.md index b0939846ab..dc24106709 100644 --- a/topics/replication.md +++ b/topics/replication.md @@ -180,7 +180,9 @@ This is how the feature works: If there are at least N slaves, with a lag less than M seconds, then the write will be accepted. -You may think at it as a relaxed version of the "C" in the CAP theorem, where consistency is not ensured for a given write, but at least the time window for data loss is restricted to a given number of seconds. +You may think of it as a relaxed version of the "C" in the CAP theorem, where +consistency is not ensured for a given write, but at least the time window for +data loss is restricted to a given number of seconds. If the conditions are not met, the master will instead reply with an error and the write will not be accepted. From daad2f8ba86ed19eec68a7039dd406dfb4b64fc7 Mon Sep 17 00:00:00 2001 From: Capacitor Set Date: Sun, 28 Aug 2016 15:04:41 +0200 Subject: [PATCH 0643/2314] Add rebridge to clients.json (#751) * Add rebridge to clients.json * Remove author field for Rebridge --- clients.json | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/clients.json b/clients.json index 62fd691c5e..1d73178af1 100644 --- a/clients.json +++ b/clients.json @@ -1401,6 +1401,14 @@ "description": "A Redis Cluster C++ Client, based on hiredis and support standalone, it's easy to make and use, not depends on C++11 or later.", "authors": ["eyjian"], "active": true + }, + + { + "name": "rebridge", + "language": "Node.js", + "repository": "https://github.com/CapacitorSet/rebridge", + "description": "Rebridge is a transparent Javascript-Redis bridge. It creates JavaScript objects that are automatically synchronized to a Redis database. (Requires Node 6)", + "active": true } ] From e9991eea567fb7b3ae2a1e858334c429d968a894 Mon Sep 17 00:00:00 2001 From: antirez Date: Mon, 19 Sep 2016 17:27:24 +0200 Subject: [PATCH 0644/2314] SHUTDOWN NOSAVE subtle behaviors documented. --- commands/shutdown.md | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/commands/shutdown.md b/commands/shutdown.md index 925c43fb0d..a82ded5f01 100644 --- a/commands/shutdown.md +++ b/commands/shutdown.md @@ -28,6 +28,20 @@ Specifically: (You can think at this variant as an hypothetical **ABORT** command that just stops the server). +## Conditions where the a SHUTDOWN NOSAVE fails + +When using the **NOSAVE** option, the RDB file is not saved on disk. +However if the Append Only File is enabled, things are more complex. +Normally if there is an AOF child process performing an AOF rewrite, Redis +will simply kill it and exit. However there are two conditions where it is +unsafe to do so, ad the **SHUTDOWN NOSAVE** command will be refused with +an error instead. This happens when: + +* The user just turned on AOF, and the server triggered the first AOF rewrite in order to create the initial AOF file. In this context, stopping will result in losing the dataset at all: once restarted, the server will potentially have AOF enabled without having any AOF file at all. +* A slave with AOF enabled, reconnected with its master, performed a full resynchronization, and restarted the AOF file, triggering the initial AOF creation process. In this case not completing the AOF rewrite is dangerous because the latest dataset received from the master would be lost. The new master can actually be even a different instance (if the **SLAVEOF** command was used in order to reconfigure the slave), so it is important to finish the AOF rewrite and start with the correct data set representing the data set in memory when the server was terminated. + +However there situations when we want just to terminate a Redis instance ASAP, regardless of what its content is. In such a case, the right combination of commands is to send a **CONFIG appendonly no** followed by a **SHUTDOWN NOSAVE**. The first command will turn off the AOF if needed, and will terminate the AOF rewriting child if there is one active. The second command will not have any problem to execute since the AOF is no longer enabled. + @return @simple-string-reply on error. From 69563dede108609a5283eb2bdeae8d1d3caf0bff Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Thu, 22 Sep 2016 10:59:20 +0200 Subject: [PATCH 0645/2314] Ignore one more word --- wordlist | 1 + 1 file changed, 1 insertion(+) diff --git a/wordlist b/wordlist index a996223e30..7d3f8451e1 100644 --- a/wordlist +++ b/wordlist @@ -148,6 +148,7 @@ analytics antirez aof appendfsync +appendonly arity atomicity auth From 559957193a6f7011e279739ff0034f9b46f57919 Mon Sep 17 00:00:00 2001 From: Pavel Date: Thu, 22 Sep 2016 11:59:53 +0300 Subject: [PATCH 0646/2314] Clarified when WATCH doesn't abort EXEC (#737) * Clarified when WATCH doesn't abort EXEC #734 WATCH accounts for modifications by its own connection * Update transactions.md --- topics/transactions.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/topics/transactions.md b/topics/transactions.md index dd921a2a22..27b446635f 100644 --- a/topics/transactions.md +++ b/topics/transactions.md @@ -195,8 +195,10 @@ there's no need to repeat the operation. So what is `WATCH` really about? It is a command that will make the `EXEC` conditional: we are asking Redis to perform -the transaction only if no other client modified any of the -`WATCH`ed keys. Otherwise the transaction is not entered at +the transaction only if none of the `WATCH`ed keys were modified. +(But they might be changed by the same client inside the transaction +without aborting it. [More on this](https://github.com/antirez/redis-doc/issues/734).) +Otherwise the transaction is not entered at all. (Note that if you `WATCH` a volatile key and Redis expires the key after you `WATCH`ed it, `EXEC` will still work. [More on this](http://code.google.com/p/redis/issues/detail?id=270).) From e37991fdb5e1a0bd7101ebd75e4824064e2a131d Mon Sep 17 00:00:00 2001 From: Abdullah Alger Date: Thu, 22 Sep 2016 17:48:48 -0700 Subject: [PATCH 0647/2314] changed the word "fairest" to "farthest" because "fairest" is the wrong word and does not make sense in this context --- commands/georadius.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/commands/georadius.md b/commands/georadius.md index d26172d1aa..00bbaa4508 100644 --- a/commands/georadius.md +++ b/commands/georadius.md @@ -17,8 +17,8 @@ The command optionally returns additional information using the following option The command default is to return unsorted items. Two different sorting methods can be invoked using the following two options: -* `ASC`: Sort returned items from the nearest to the fairest, relative to the center. -* `DESC`: Sort returned items from the fairest to the nearest, relative to the center. +* `ASC`: Sort returned items from the nearest to the farthest, relative to the center. +* `DESC`: Sort returned items from the farthest to the nearest, relative to the center. By default all the matching items are returned. It is possible to limit the results to the first N matching items by using the **COUNT ``** option. However note that internally the command needs to perform an effort proportional to the number of items matching the specified area, so to query very large areas with a very small `COUNT` option may be slow even if just a few results are returned. On the other hand `COUNT` can be a very effective way to reduce bandwidth usage if normally just the first results are used. From 6b997e057354f9aa439816b084ccadaf0cf8bf19 Mon Sep 17 00:00:00 2001 From: Chris Tanner Date: Wed, 5 Oct 2016 14:50:00 +0100 Subject: [PATCH 0648/2314] fixed grammar in cluster-failover.md --- commands/cluster-failover.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/commands/cluster-failover.md b/commands/cluster-failover.md index f6c97848c8..2c4df76322 100644 --- a/commands/cluster-failover.md +++ b/commands/cluster-failover.md @@ -1,4 +1,4 @@ -This command, that can only be send to a Redis Cluster slave node, forces +This command, that can only be sent to a Redis Cluster slave node, forces the slave to start a manual failover of its master instance. A manual failover is a special kind of failover that is usually executed when @@ -9,12 +9,12 @@ without any window for data loss. It works in the following way: 1. The slave tells the master to stop processing queries from clients. 2. The master replies to the slave with the current *replication offset*. 3. The slave waits for the replication offset to match on its side, to make sure it processed all the data from the master before it continues. -4. The slave starts a failover, obtains a new configuration epoch from the majority of the masters, and broadcast the new configuration. -5. The old master receives the configuration update: unblocks its clients and start replying with redirection messages so that they'll continue the chat with the new master. +4. The slave starts a failover, obtains a new configuration epoch from the majority of the masters, and broadcasts the new configuration. +5. The old master receives the configuration update: unblocks its clients and starts replying with redirection messages so that they'll continue the chat with the new master. This way clients are moved away from the old master to the new master -atomically and only when the slave that is turning in the new master -processed all the replication stream from the old master. +atomically and only when the slave that is turning into the new master +has processed all of the replication stream from the old master. ## FORCE option: manual failover when the master is down From a7a52c19d2aeff82bb27ebf8fbf91fbe113c087b Mon Sep 17 00:00:00 2001 From: David Rabkin Date: Fri, 7 Oct 2016 02:13:37 -0700 Subject: [PATCH 0649/2314] Update sentinel.md (#756) Fix one grammatical issue in the Sentinel docs: Change: "Fundamental things to know about Sentinel before deploying" Previously: "Fundamental things to know about Sentinel before to deploy" --- topics/sentinel.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/sentinel.md b/topics/sentinel.md index b276192cd8..ecb779276d 100644 --- a/topics/sentinel.md +++ b/topics/sentinel.md @@ -75,7 +75,7 @@ connections from the IP addresses of the other Sentinel instances. Otherwise Sentinels can't talk and can't agree about what to do, so failover will never be performed. -Fundamental things to know about Sentinel before to deploy +Fundamental things to know about Sentinel before deploying --- 1. You need at least three Sentinel instances for a robust deployment. From 2c30231cd3c58b522b1c2ef185953b90691197ec Mon Sep 17 00:00:00 2001 From: Luis Ashurei Date: Sun, 9 Oct 2016 17:40:29 +0800 Subject: [PATCH 0650/2314] Fix typo (#758) --- commands/shutdown.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands/shutdown.md b/commands/shutdown.md index 4665a1b211..46cc9b200b 100644 --- a/commands/shutdown.md +++ b/commands/shutdown.md @@ -34,7 +34,7 @@ When using the **NOSAVE** option, the RDB file is not saved on disk. However if the Append Only File is enabled, things are more complex. Normally if there is an AOF child process performing an AOF rewrite, Redis will simply kill it and exit. However there are two conditions where it is -unsafe to do so, ad the **SHUTDOWN NOSAVE** command will be refused with +unsafe to do so, and the **SHUTDOWN NOSAVE** command will be refused with an error instead. This happens when: * The user just turned on AOF, and the server triggered the first AOF rewrite in order to create the initial AOF file. In this context, stopping will result in losing the dataset at all: once restarted, the server will potentially have AOF enabled without having any AOF file at all. From aa525a296162dd2b29ab12ff0d893afc08e159b6 Mon Sep 17 00:00:00 2001 From: vislee Date: Sun, 9 Oct 2016 17:40:54 +0800 Subject: [PATCH 0651/2314] Update commands.json (#757) fixed parameter for SCRIPT EXISTS command --- commands.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands.json b/commands.json index 5eb3a19011..f5e09d7504 100644 --- a/commands.json +++ b/commands.json @@ -2045,7 +2045,7 @@ "complexity": "O(N) with N being the number of scripts to check (so checking a single script is an O(1) operation).", "arguments": [ { - "name": "script", + "name": "sha1", "type": "string", "multiple": true } From b831b509674841e45cbae20642310d18bb6dd14b Mon Sep 17 00:00:00 2001 From: Adam Avilla Date: Sun, 9 Oct 2016 02:41:53 -0700 Subject: [PATCH 0652/2314] Updating to latest example.rb with links. (#754) --- topics/cluster-tutorial.md | 81 +++++++++++++++++++++----------------- 1 file changed, 45 insertions(+), 36 deletions(-) diff --git a/topics/cluster-tutorial.md b/topics/cluster-tutorial.md index b129cd17a7..1508690479 100644 --- a/topics/cluster-tutorial.md +++ b/topics/cluster-tutorial.md @@ -423,41 +423,50 @@ failing, or start a resharding, to see how Redis Cluster behaves under real world conditions. It is not very helpful to see what happens while nobody is writing to the cluster. -This section explains some basic usage of redis-rb-cluster showing two -examples. The first is the following, and is the `example.rb` file inside -the redis-rb-cluster distribution: +This section explains some basic usage of +[redis-rb-cluster](https://github.com/antirez/redis-rb-cluster) showing two +examples. The first is the following, and is the +[`example.rb`](https://github.com/antirez/redis-rb-cluster/blob/master/example.rb) +file inside the redis-rb-cluster distribution: ``` - 1 require './cluster' - 2 - 3 startup_nodes = [ - 4 {:host => "127.0.0.1", :port => 7000}, - 5 {:host => "127.0.0.1", :port => 7001} - 6 ] - 7 rc = RedisCluster.new(startup_nodes,32,:timeout => 0.1) - 8 - 9 last = false - 10 - 11 while not last - 12 begin - 13 last = rc.get("__last__") - 14 last = 0 if !last - 15 rescue => e - 16 puts "error #{e.to_s}" - 17 sleep 1 - 18 end - 19 end - 20 - 21 ((last.to_i+1)..1000000000).each{|x| - 22 begin - 23 rc.set("foo#{x}",x) - 24 puts rc.get("foo#{x}") - 25 rc.set("__last__",x) - 26 rescue => e - 27 puts "error #{e.to_s}" - 28 end - 29 sleep 0.1 - 30 } + 1 require './cluster' + 2 + 3 if ARGV.length != 2 + 4 startup_nodes = [ + 5 {:host => "127.0.0.1", :port => 7000}, + 6 {:host => "127.0.0.1", :port => 7001} + 7 ] + 8 else + 9 startup_nodes = [ + 10 {:host => ARGV[0], :port => ARGV[1].to_i} + 11 ] + 12 end + 13 + 14 rc = RedisCluster.new(startup_nodes,32,:timeout => 0.1) + 15 + 16 last = false + 17 + 18 while not last + 19 begin + 20 last = rc.get("__last__") + 21 last = 0 if !last + 22 rescue => e + 23 puts "error #{e.to_s}" + 24 sleep 1 + 25 end + 26 end + 27 + 28 ((last.to_i+1)..1000000000).each{|x| + 29 begin + 30 rc.set("foo#{x}",x) + 31 puts rc.get("foo#{x}") + 32 rc.set("__last__",x) + 33 rescue => e + 34 puts "error #{e.to_s}" + 35 end + 36 sleep 0.1 + 37 } ``` The application does a very simple thing, it sets keys in the form `foo` to `number`, one after the other. So if you run the program the result is the @@ -472,7 +481,7 @@ The program looks more complex than it should usually as it is designed to show errors on the screen instead of exiting with an exception, so every operation performed with the cluster is wrapped by `begin` `rescue` blocks. -The **line 7** is the first interesting line in the program. It creates the +The **line 14** is the first interesting line in the program. It creates the Redis Cluster object, using as argument a list of *startup nodes*, the maximum number of connections this object is allowed to take against different nodes, and finally the timeout after a given operation is considered to be failed. @@ -485,7 +494,7 @@ first node. You should expect such a behavior with any other serious client. Now that we have the Redis Cluster object instance stored in the **rc** variable we are ready to use the object like if it was a normal Redis object instance. -This is exactly what happens in **line 11 to 19**: when we restart the example +This is exactly what happens in **line 18 to 26**: when we restart the example we don't want to start again with `foo0`, so we store the counter inside Redis itself. The code above is designed to read this counter, or if the counter does not exist, to assign it the value of zero. @@ -494,7 +503,7 @@ However note how it is a while loop, as we want to try again and again even if the cluster is down and is returning errors. Normal applications don't need to be so careful. -**Lines between 21 and 30** start the main loop where the keys are set or +**Lines between 28 and 37** start the main loop where the keys are set or an error is displayed. Note the `sleep` call at the end of the loop. In your tests you can remove From 0da5a062389421b115a2d0d7bd262ba74b4573f7 Mon Sep 17 00:00:00 2001 From: Adam Avilla Date: Sun, 9 Oct 2016 02:42:21 -0700 Subject: [PATCH 0653/2314] I think this is what it is suppossed to be. (#755) --- topics/cluster-tutorial.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/cluster-tutorial.md b/topics/cluster-tutorial.md index 1508690479..b186422c1c 100644 --- a/topics/cluster-tutorial.md +++ b/topics/cluster-tutorial.md @@ -661,7 +661,7 @@ This is what happens, for example, if I reset a counter manually while the program is running: ``` -$ redis 127.0.0.1:7000> set key_217 0 +$ redis-cli -h 127.0.0.1 -p 7000 set key_217 0 OK (in the other tab I see...) From 8f80eaf13fec14b3082bf5611342583b27051043 Mon Sep 17 00:00:00 2001 From: Calle Erlandsson Date: Sun, 9 Oct 2016 11:42:52 +0200 Subject: [PATCH 0654/2314] Fix typo in PFCOUNT time complexity description (#746) --- commands.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands.json b/commands.json index f5e09d7504..98ba22d430 100644 --- a/commands.json +++ b/commands.json @@ -1712,7 +1712,7 @@ }, "PFCOUNT": { "summary": "Return the approximated cardinality of the set(s) observed by the HyperLogLog at key(s).", - "complexity": "O(1) with every small average constant times when called with a single key. O(N) with N being the number of keys, and much bigger constant times, when called with multiple keys.", + "complexity": "O(1) with a very small average constant time when called with a single key. O(N) with N being the number of keys, and much bigger constant times, when called with multiple keys.", "arguments": [ { "name": "key", From bf891a392d4507721a317d5b3546b6eca38c825f Mon Sep 17 00:00:00 2001 From: Alexander Cheprasov Date: Sun, 9 Oct 2016 10:44:49 +0100 Subject: [PATCH 0655/2314] Added command TOUCH (#750) * Added command TOUCH * Added command TOUCH --- commands.json | 13 +++++++++++++ commands/touch.md | 14 ++++++++++++++ 2 files changed, 27 insertions(+) create mode 100644 commands/touch.md diff --git a/commands.json b/commands.json index 98ba22d430..ba179c9979 100644 --- a/commands.json +++ b/commands.json @@ -2516,6 +2516,19 @@ "since": "2.6.0", "group": "server" }, + "TOUCH": { + "summary": "Alters the last access time of a key(s). Returns the number of existing keys specified.", + "complexity": "O(N) where N is the number of keys that will be touched.", + "arguments": [ + { + "name": "key", + "type": "key", + "multiple": true + } + ], + "since": "3.2.1", + "group": "generic" + }, "TTL": { "summary": "Get the time to live for a key", "complexity": "O(1)", diff --git a/commands/touch.md b/commands/touch.md new file mode 100644 index 0000000000..a369354503 --- /dev/null +++ b/commands/touch.md @@ -0,0 +1,14 @@ +Alters the last access time of a key(s). +A key is ignored if it does not exist. + +@return + +@integer-reply: The number of keys that were touched. + +@examples + +```cli +SET key1 "Hello" +SET key2 "World" +TOUCH key1 key2 +``` From 80f6ce7216cf92eaf9622d1b123e3f04661fdca0 Mon Sep 17 00:00:00 2001 From: Alexander Cheprasov Date: Sun, 9 Oct 2016 10:45:16 +0100 Subject: [PATCH 0656/2314] Added tool cheprasov/php-redis-lock (#745) * added tool cheprasov/php-redis-lock * added redlock cheprasov/php-redis-lock --- tools.json | 7 +++++++ topics/distlock.md | 1 + 2 files changed, 8 insertions(+) diff --git a/tools.json b/tools.json index d55d543440..88c20e8f6b 100644 --- a/tools.json +++ b/tools.json @@ -576,5 +576,12 @@ "repository": "https://github.com/jwhitbeck/java-rdb-parser", "description": "A simple Redis RDB file parser for Java", "authors": [] + }, + { + "name": "cheprasov/php-redis-lock", + "language": "PHP", + "repository": "https://github.com/cheprasov/php-redis-lock", + "description": "RedisLock for PHP is a synchronization mechanism for enforcing limits on access to a resource in an environment where there are many threads of execution. A lock is designed to enforce a mutual exclusion concurrency control policy.", + "authors": ["cheprasov84"] } ] diff --git a/topics/distlock.md b/topics/distlock.md index b3f05e7613..34e837e9ee 100644 --- a/topics/distlock.md +++ b/topics/distlock.md @@ -27,6 +27,7 @@ already available that can be used for reference. * [Redlock-py](https://github.com/SPSCommerce/redlock-py) (Python implementation). * [Redlock-php](https://github.com/ronnylt/redlock-php) (PHP implementation). * [PHPRedisMutex](https://github.com/malkusch/lock#phpredismutex) (further PHP implementation) +* [cheprasov/php-redis-lock](https://github.com/cheprasov/php-redis-lock) (PHP library for locks) * [Redsync.go](https://github.com/hjr265/redsync.go) (Go implementation). * [Redisson](https://github.com/mrniko/redisson) (Java implementation). * [Redis::DistLock](https://github.com/sbertrang/redis-distlock) (Perl implementation). From 40da090bbf0389ceccd2219ce623f7676715b594 Mon Sep 17 00:00:00 2001 From: Jason Veatch Date: Sun, 9 Oct 2016 05:46:17 -0400 Subject: [PATCH 0657/2314] hmset overwrites specified fields, not all fields (#727) --- commands/hmset.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands/hmset.md b/commands/hmset.md index a71f07d274..8cec77585e 100644 --- a/commands/hmset.md +++ b/commands/hmset.md @@ -1,6 +1,6 @@ Sets the specified fields to their respective values in the hash stored at `key`. -This command overwrites any existing fields in the hash. +This command overwrites any specified fields already existing in the hash. If `key` does not exist, a new key holding a hash is created. @return From 8df9f766f80923c412d0c0046de75983fb13a443 Mon Sep 17 00:00:00 2001 From: James Edwards Date: Sun, 9 Oct 2016 02:47:11 -0700 Subject: [PATCH 0658/2314] Add Redis Cluster Java Client, Jedipus. (#717) * Add Redis Cluster Java Client, Jedipus. * Fix caps. * Update Jedipus description and twitter user. --- clients.json | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/clients.json b/clients.json index 1d73178af1..5ba22c8ce2 100644 --- a/clients.json +++ b/clients.json @@ -206,6 +206,15 @@ "active": true }, + { + "name": "Jedipus", + "language": "Java", + "repository": "https://github.com/jamespedwards42/jedipus", + "description": "Redis Client & Command Executor.", + "authors": ["jamespedwards"], + "active": true + }, + { "name": "Redisson", "language": "Java", From c18aacd1fde8cf190cf2281dec8d4cb730c96106 Mon Sep 17 00:00:00 2001 From: Itamar Haber Date: Sun, 9 Oct 2016 12:48:22 +0300 Subject: [PATCH 0659/2314] Adds other benchmarking tools (#706) --- topics/benchmarks.md | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/topics/benchmarks.md b/topics/benchmarks.md index e1f034eb81..465644c63a 100644 --- a/topics/benchmarks.md +++ b/topics/benchmarks.md @@ -457,8 +457,16 @@ Another one using a 64-bit box, a Xeon L5420 clocked at 2.5 GHz: LPUSH: 104712.05 requests per second LPOP: 93722.59 requests per second +# Other Redis benchmarking tools -# Example of benchmark results with optimized high-end server hardware +There are several third-party tools that can be used for benchmarking Redis. Refer to each tool's +documentation for more information about its goals and capabilities. + +* [memtier_benchmark](https://github.com/redislabs/memtier_benchmark) from [Redis Labs](https://twitter.com/RedisLabs) is a NoSQL Redis and Memcache traffic generation and benchmarking tool. +* [rpc-perf](https://github.com/twitter/rpc-perf) from [Twitter](https://twitter.com/twitter) is a tool for benchmarking RPC services that supports Redis and Memcache. +* [YCSB](https://github.com/brianfrankcooper/YCSB) from [Yahoo @Yahoo](https://twitter.com/Yahoo) is a benchmarking framework with clients to many databases, including Redis. + +# Example of redis-benchmark results with optimized high-end server hardware * Redis version **2.4.2** * Default number of connections, payload size = 256 From 255066049787697361f10add56d53d2f546e5787 Mon Sep 17 00:00:00 2001 From: asduj Date: Mon, 10 Oct 2016 11:06:04 +0300 Subject: [PATCH 0660/2314] Add tornadis client (python) to the client.json (#752) * Update clients.json Add tornadis client (python) to the client.json * Update clients.json Add url to the tornadis documentation. * Update clients.json Deleted "authors" field; --- clients.json | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/clients.json b/clients.json index 5ba22c8ce2..07b6c51852 100644 --- a/clients.json +++ b/clients.json @@ -482,6 +482,15 @@ "authors": ["aallamaa"] }, + { + "name": "tornadis", + "language": "Python", + "repository": "https://github.com/thefab/tornadis", + "url": "http://tornadis.readthedocs.org", + "description": "Async minimal redis client for tornado ioloop designed for performances (use C hiredis parser)", + "active": true + }, + { "name": "brukva", "language": "Python", From a8f5c57622fc39d9f8bc3b4349924422b1826d10 Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Mon, 10 Oct 2016 12:00:32 +0200 Subject: [PATCH 0661/2314] 2.8 is not a development version anymore --- topics/signals.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/signals.md b/topics/signals.md index e5764765fa..a73b4eb9bb 100644 --- a/topics/signals.md +++ b/topics/signals.md @@ -44,7 +44,7 @@ The following follow signals are handled as a Redis crash: Once one of these signals is trapped, Redis aborts any current operation and performs the following actions: * A bug report is produced on the log file. This includes a stack trace, dump of registers, and information about the state of clients. -* Since Redis 2.8 (currently a development version) a fast memory test is performed as a first check of the reliability of the crashing system. +* Since Redis 2.8 a fast memory test is performed as a first check of the reliability of the crashing system. * If the server was daemonized, the pid file is removed. * Finally the server unregisters its own signal handler for the received signal, and sends the same signal again to itself, in order to make sure that the default action is performed, for instance dumping the core on the file system. From e9cf7306415195c1b933d560d861b2437aa2b4a8 Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Mon, 10 Oct 2016 16:19:02 +0200 Subject: [PATCH 0662/2314] Sentinel updates (#760) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Sentinel is stable since 2.8, we're further ahead now * Add missing word * in → on * Remove dots in headlines --- topics/sentinel.md | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/topics/sentinel.md b/topics/sentinel.md index ecb779276d..f2b701c8fa 100644 --- a/topics/sentinel.md +++ b/topics/sentinel.md @@ -42,11 +42,10 @@ The current version of Sentinel is called **Sentinel 2**. It is a rewrite of the initial Sentinel implementation using stronger and simpler to predict algorithms (that are explained in this documentation). -A stable release of Redis Sentinel is shipped with Redis 2.8 and 3.0, which are -the two latest stable releases of Redis. +A stable release of Redis Sentinel is shipped since Redis 2.8. New developments are performed in the *unstable* branch, and new features -sometimes are back ported into the 2.8 and 3.0 branch as soon as they are +sometimes are back ported into the latest stable branch as soon as they are considered to be stable. Redis Sentinel version 1, shipped with Redis 2.6, is deprecated and should not be used. @@ -208,7 +207,7 @@ Also note that: * Clients are called C1, C2, C3, ..., Cn. * When an instance changes role because of Sentinel actions, we put it inside square brackets, so [M1] means an instance that is now a master because of Sentinel intervention. -Note that will never show **setups where just two Sentinels are used**, since +Note that we will never show **setups where just two Sentinels are used**, since Sentinels always need **to talk with the majority** in order to start a failover. @@ -427,7 +426,7 @@ Here we assume that the instances are executed at port 5000, 5001, 5002. We also assume that you have a running Redis master at port 6379 with a slave running at port 6380. We will use the IPv4 loopback address 127.0.0.1 everywhere during the tutorial, assuming you are running the simulation -in your personal computer. +on your personal computer. The three Sentinel configuration files should look like the following: @@ -651,7 +650,7 @@ the following steps should be performed in absence of network partitions: 2. Send a `SENTINEL RESET *` command to all the other Sentinel instances (instead of `*` you can use the exact master name if you want to reset just a single master). One after the other, waiting at least 30 seconds between instances. 3. Check that all the Sentinels agree about the number of Sentinels currently active, by inspecting the output of `SENTINEL MASTER mastername` of every Sentinel. -Removing the old master or unreachable slaves. +Removing the old master or unreachable slaves --- Sentinels never forget about slaves of a given master, even when they are @@ -862,7 +861,7 @@ to a master, as Sentinel will auto discover this list querying Redis. * Hello messages also include the full current configuration of the master. If the receiving Sentinel has a configuration for a given master which is older than the one received, it updates to the new configuration immediately. * Before adding a new sentinel to a master a Sentinel always checks if there is already a sentinel with the same runid or the same address (ip and port pair). In that case all the matching sentinels are removed, and the new added. -Sentinel reconfiguration of instances outside the failover procedure. +Sentinel reconfiguration of instances outside the failover procedure --- Even when no failover is in progress, Sentinels will always try to set the From 9d544b8a23360471e7f7ba04adaa383aec1a3f25 Mon Sep 17 00:00:00 2001 From: antirez Date: Mon, 10 Oct 2016 16:52:32 +0200 Subject: [PATCH 0663/2314] Fix incorrect statement about SHUTDOWN behavior. Thanks to @oranagra for noticing that the description was actually still non conforming with the implementation. --- commands/shutdown.md | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/commands/shutdown.md b/commands/shutdown.md index 4665a1b211..1971c4d818 100644 --- a/commands/shutdown.md +++ b/commands/shutdown.md @@ -28,19 +28,21 @@ Specifically: (You can think of this variant as an hypothetical **ABORT** command that just stops the server). -## Conditions where the a SHUTDOWN NOSAVE fails +## Conditions where a SHUTDOWN fails + +When the Append Only File is enabled the shutdown may fail because the +system is in a state that does not allow to safely immediately persist +on disk. -When using the **NOSAVE** option, the RDB file is not saved on disk. -However if the Append Only File is enabled, things are more complex. Normally if there is an AOF child process performing an AOF rewrite, Redis will simply kill it and exit. However there are two conditions where it is -unsafe to do so, ad the **SHUTDOWN NOSAVE** command will be refused with +unsafe to do so, ad the **SHUTDOWN** command will be refused with an error instead. This happens when: * The user just turned on AOF, and the server triggered the first AOF rewrite in order to create the initial AOF file. In this context, stopping will result in losing the dataset at all: once restarted, the server will potentially have AOF enabled without having any AOF file at all. * A slave with AOF enabled, reconnected with its master, performed a full resynchronization, and restarted the AOF file, triggering the initial AOF creation process. In this case not completing the AOF rewrite is dangerous because the latest dataset received from the master would be lost. The new master can actually be even a different instance (if the **SLAVEOF** command was used in order to reconfigure the slave), so it is important to finish the AOF rewrite and start with the correct data set representing the data set in memory when the server was terminated. -However there situations when we want just to terminate a Redis instance ASAP, regardless of what its content is. In such a case, the right combination of commands is to send a **CONFIG appendonly no** followed by a **SHUTDOWN NOSAVE**. The first command will turn off the AOF if needed, and will terminate the AOF rewriting child if there is one active. The second command will not have any problem to execute since the AOF is no longer enabled. +There are conditions when we want just to terminate a Redis instance ASAP, regardless of what its content is. In such a case, the right combination of commands is to send a **CONFIG appendonly no** followed by a **SHUTDOWN NOSAVE**. The first command will turn off the AOF if needed, and will terminate the AOF rewriting child if there is one active. The second command will not have any problem to execute since the AOF is no longer enabled. @return From b262a108614e6d526ead0b5cf9a00f70d1bc237a Mon Sep 17 00:00:00 2001 From: Cihan B Date: Wed, 12 Oct 2016 15:08:34 -0700 Subject: [PATCH 0664/2314] incorrect reference to 3.2 as not yet stable. --- commands/eval.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands/eval.md b/commands/eval.md index cb04ec10b1..310691acdd 100644 --- a/commands/eval.md +++ b/commands/eval.md @@ -466,7 +466,7 @@ output. ## Replicating commands instead of scripts -Starting with Redis 3.2 (not yet stable) it is possible to select an +Starting with Redis 3.2, it is possible to select an alternative replication method. Instead of replication whole scripts, we can just replicate single write commands generated by the script. We call this **script effects replication**. From ab172df2f66176bc00bc3996c2b95bd830d3d156 Mon Sep 17 00:00:00 2001 From: Cihan B Date: Wed, 12 Oct 2016 15:09:18 -0700 Subject: [PATCH 0665/2314] incorrect reference to 3.2 being currently in beta --- commands/eval.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands/eval.md b/commands/eval.md index 310691acdd..172357a4da 100644 --- a/commands/eval.md +++ b/commands/eval.md @@ -797,7 +797,7 @@ The client library implementation should take one of the following approaches: ## Debugging Lua scripts -Starting with Redis 3.2 (currently in beta), Redis has support for native +Starting with Redis 3.2, Redis has support for native Lua debugging. The Redis Lua debugger is a remote debugger consisting of a server, which is Redis itself, and a client, which is by default `redis-cli`. From 1cfca35a896b09ffcb722687b251b9fad0058eed Mon Sep 17 00:00:00 2001 From: Simon Ninon Date: Thu, 13 Oct 2016 17:49:00 +0200 Subject: [PATCH 0666/2314] update cpp_redis description (#763) --- clients.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clients.json b/clients.json index 07b6c51852..2e225f5a0d 100644 --- a/clients.json +++ b/clients.json @@ -1318,7 +1318,7 @@ "name": "cpp_redis", "language": "C++", "repository": "https://github.com/cylix/cpp_redis", - "description": "C++11 Redis client: async, thread-safe, no dependencies, pipelining.", + "description": "C++11 Lightweight Redis client: async, thread-safe, no dependency, pipelining, multi-platform.", "authors": ["simon_ninon"], "active": true }, From 1677b5abf0d39c686d20b9bf75f7b7726c0b08fe Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Thu, 13 Oct 2016 20:04:11 +0200 Subject: [PATCH 0667/2314] Remove twitter account link --- clients.json | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/clients.json b/clients.json index 2e225f5a0d..6f16317f05 100644 --- a/clients.json +++ b/clients.json @@ -1012,8 +1012,7 @@ "language": "C#", "url": "http://andrew-bn.github.io/RedisBoost/", "repository": "https://github.com/andrew-bn/RedisBoost", - "description": "Thread-safe async Redis client. Offers high performance and simple api", - "authors": ["bn_andrew"] + "description": "Thread-safe async Redis client. Offers high performance and simple api" }, { From 7779f399101e8943d80183ca793d100c24a7580f Mon Sep 17 00:00:00 2001 From: deep Date: Fri, 14 Oct 2016 16:59:23 +0800 Subject: [PATCH 0668/2314] add hiredis-vip client for redis cluster (#762) * add hiredis-vip client for redis cluster * Update clients.json * update authors --- clients.json | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/clients.json b/clients.json index 6f16317f05..bae631a797 100644 --- a/clients.json +++ b/clients.json @@ -1426,6 +1426,15 @@ "repository": "https://github.com/CapacitorSet/rebridge", "description": "Rebridge is a transparent Javascript-Redis bridge. It creates JavaScript objects that are automatically synchronized to a Redis database. (Requires Node 6)", "active": true - } + }, + { + "name": "hiredis-vip", + "language": "C", + "repository": "https://github.com/vipshop/hiredis-vip", + "description": "This is the C client for redis cluster. Support for synchronous api, MSET/MGET/DEL, pipelining, asynchronous api.", + "authors": ["diguo58"], + "recommended": true, + "active": true + } ] From 977beacc4e4857fed805a1ed8a41e785fab94631 Mon Sep 17 00:00:00 2001 From: deep Date: Fri, 14 Oct 2016 17:00:56 +0800 Subject: [PATCH 0669/2314] add 'redis-migrate-tool' for redis tools (#764) --- tools.json | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tools.json b/tools.json index 88c20e8f6b..1808d07be0 100644 --- a/tools.json +++ b/tools.json @@ -583,5 +583,12 @@ "repository": "https://github.com/cheprasov/php-redis-lock", "description": "RedisLock for PHP is a synchronization mechanism for enforcing limits on access to a resource in an environment where there are many threads of execution. A lock is designed to enforce a mutual exclusion concurrency control policy.", "authors": ["cheprasov84"] + }, + { + "name": "redis-migrate-tool", + "language": "C", + "repository": "https://github.com/vipshop/redis-migrate-tool", + "description": "A convenient and useful tool for migrating data between redis groups.", + "authors": ["diguo58"] } ] From 50fb187c0e740eb3e5dde61e551431af765ae62b Mon Sep 17 00:00:00 2001 From: Itamar Haber Date: Tue, 25 Oct 2016 15:57:00 +0300 Subject: [PATCH 0670/2314] Fixes a typo --- topics/clients.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/clients.md b/topics/clients.md index a6bcfa5b81..00fbdfb823 100644 --- a/topics/clients.md +++ b/topics/clients.md @@ -124,7 +124,7 @@ if the client is idle for more than the specified number of seconds, the client You can configure this limit via `redis.conf` or simply using `CONFIG SET timeout `. -Note that the timeout only applies to number clients and it **does not apply to Pub/Sub clients**, since a Pub/Sub connection is a *push style* connection so a client that is idle is the norm. +Note that the timeout only applies to normal clients and it **does not apply to Pub/Sub clients**, since a Pub/Sub connection is a *push style* connection so a client that is idle is the norm. Even if by default connections are not subject to timeout, there are two conditions when it makes sense to set a timeout: From cf675d329d7b72ceaad9c5843794becf4f3f9067 Mon Sep 17 00:00:00 2001 From: zhkzyth Date: Fri, 28 Oct 2016 22:29:22 +0800 Subject: [PATCH 0671/2314] fix typo in commands/srandmember.md --- commands/srandmember.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands/srandmember.md b/commands/srandmember.md index df2d960015..1ef408fd21 100644 --- a/commands/srandmember.md +++ b/commands/srandmember.md @@ -34,6 +34,6 @@ When instead the count is negative, the behavior changes and the extraction happ The distribution of the returned elements is far from perfect when the number of elements in the set is small, this is due to the fact that we used an approximated random element function that does not really guarantees good distribution. -The algorithm used, that is implemented inside dict.c, samples the hash table buckets to find a non-empty one. Once a non empty bucket is found, since we use chaining in our hash table implementation, the number of elements inside the bucked is checked and a random element is selected. +The algorithm used, that is implemented inside dict.c, samples the hash table buckets to find a non-empty one. Once a non empty bucket is found, since we use chaining in our hash table implementation, the number of elements inside the bucket is checked and a random element is selected. This means that if you have two non-empty buckets in the entire hash table, and one has three elements while one has just one, the element that is alone in its bucket will be returned with much higher probability. From 9c1760247c44ec412fb69adaab2da7574d047ffb Mon Sep 17 00:00:00 2001 From: Luis Ashurei Date: Thu, 3 Nov 2016 19:45:58 +0800 Subject: [PATCH 0672/2314] Fix Redis 3.2 currently not stable (#769) Ref commits: https://github.com/antirez/redis-doc/commit/ab172df2f66176bc00bc3996c2b95bd830d3d156 https://github.com/antirez/redis-doc/commit/b262a108614e6d526ead0b5cf9a00f70d1bc237a --- commands/eval.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands/eval.md b/commands/eval.md index 172357a4da..2d25d55361 100644 --- a/commands/eval.md +++ b/commands/eval.md @@ -339,7 +339,7 @@ dispatching a command received via network is a lot more work for Redis compared to dispatching a command invoked by Lua scripts). Normally replicating scripts instead of the effects of the scripts makes sense, -however not in all the cases. So starting with Redis 3.2 (currently not stable), +however not in all the cases. So starting with Redis 3.2, the scripting engine is able to, alternatively, replicate the sequence of write commands resulting from the script execution, instead of replication the script itself. See the next section for more information. From 2f8e69ab2c2913c59b10e10a351a895e1f7f03db Mon Sep 17 00:00:00 2001 From: Igor Wiedler Date: Sun, 13 Nov 2016 22:22:14 +0100 Subject: [PATCH 0673/2314] Replace gender-specific "he" with "they" when referring to users --- topics/debugging.md | 4 ++-- topics/transactions.md | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/topics/debugging.md b/topics/debugging.md index dbfacd0f66..7419a5603b 100644 --- a/topics/debugging.md +++ b/topics/debugging.md @@ -28,9 +28,9 @@ GDB can be used in two ways: + It can inspect the state of a program that already terminated using what is called a *core file*, that is, the image of the memory at the time the program was running. From the point of view of investigating Redis bugs we need to use both this -GDB modes: the user able to reproduce the bug attaches GDB to his running Redis instance, and when the crash happens, he creates the `core` file that the in turn the developer will use to inspect the Redis internals at the time of the crash. +GDB modes: the user able to reproduce the bug attaches GDB to their running Redis instance, and when the crash happens, he creates the `core` file that the in turn the developer will use to inspect the Redis internals at the time of the crash. -This way the developer can perform all the inspections in his computer without the help of the user, and the user is free to restart Redis in the production environment. +This way the developer can perform all the inspections in their computer without the help of the user, and the user is free to restart Redis in the production environment. Compiling Redis without optimizations ------------------------------------- diff --git a/topics/transactions.md b/topics/transactions.md index 27b446635f..518131a1d1 100644 --- a/topics/transactions.md +++ b/topics/transactions.md @@ -125,7 +125,7 @@ However there are good opinions for this behavior: * Redis commands can fail only if called with a wrong syntax (and the problem is not detectable during the command queueing), or against keys holding the wrong data type: this means that in practical terms a failing command is the result of a programming errors, and a kind of error that is very likely to be detected during development, and not in production. * Redis is internally simplified and faster because it does not need the ability to roll back. -An argument against Redis point of view is that bugs happen, however it should be noted that in general the roll back does not save you from programming errors. For instance if a query increments a key by 2 instead of 1, or increments the wrong key, there is no way for a rollback mechanism to help. Given that no one can save the programmer from his errors, and that the kind of errors required for a Redis command to fail are unlikely to enter in production, we selected the simpler and faster approach of not supporting roll backs on errors. +An argument against Redis point of view is that bugs happen, however it should be noted that in general the roll back does not save you from programming errors. For instance if a query increments a key by 2 instead of 1, or increments the wrong key, there is no way for a rollback mechanism to help. Given that no one can save the programmer from their errors, and that the kind of errors required for a Redis command to fail are unlikely to enter in production, we selected the simpler and faster approach of not supporting roll backs on errors. ## Discarding the command queue @@ -195,9 +195,9 @@ there's no need to repeat the operation. So what is `WATCH` really about? It is a command that will make the `EXEC` conditional: we are asking Redis to perform -the transaction only if none of the `WATCH`ed keys were modified. +the transaction only if none of the `WATCH`ed keys were modified. (But they might be changed by the same client inside the transaction -without aborting it. [More on this](https://github.com/antirez/redis-doc/issues/734).) +without aborting it. [More on this](https://github.com/antirez/redis-doc/issues/734).) Otherwise the transaction is not entered at all. (Note that if you `WATCH` a volatile key and Redis expires the key after you `WATCH`ed it, `EXEC` will still work. [More on From b8d03252c5d0dc59f1d20a58f4bd453552dc85de Mon Sep 17 00:00:00 2001 From: Igor Wiedler Date: Sun, 13 Nov 2016 23:23:35 +0100 Subject: [PATCH 0674/2314] Update "they" to binary "he or she" as requested --- topics/debugging.md | 4 ++-- topics/transactions.md | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/topics/debugging.md b/topics/debugging.md index 7419a5603b..df4e866e32 100644 --- a/topics/debugging.md +++ b/topics/debugging.md @@ -28,9 +28,9 @@ GDB can be used in two ways: + It can inspect the state of a program that already terminated using what is called a *core file*, that is, the image of the memory at the time the program was running. From the point of view of investigating Redis bugs we need to use both this -GDB modes: the user able to reproduce the bug attaches GDB to their running Redis instance, and when the crash happens, he creates the `core` file that the in turn the developer will use to inspect the Redis internals at the time of the crash. +GDB modes: the user able to reproduce the bug attaches GDB to his or her running Redis instance, and when the crash happens, he creates the `core` file that the in turn the developer will use to inspect the Redis internals at the time of the crash. -This way the developer can perform all the inspections in their computer without the help of the user, and the user is free to restart Redis in the production environment. +This way the developer can perform all the inspections in his or her computer without the help of the user, and the user is free to restart Redis in the production environment. Compiling Redis without optimizations ------------------------------------- diff --git a/topics/transactions.md b/topics/transactions.md index 518131a1d1..ac3786071d 100644 --- a/topics/transactions.md +++ b/topics/transactions.md @@ -125,7 +125,7 @@ However there are good opinions for this behavior: * Redis commands can fail only if called with a wrong syntax (and the problem is not detectable during the command queueing), or against keys holding the wrong data type: this means that in practical terms a failing command is the result of a programming errors, and a kind of error that is very likely to be detected during development, and not in production. * Redis is internally simplified and faster because it does not need the ability to roll back. -An argument against Redis point of view is that bugs happen, however it should be noted that in general the roll back does not save you from programming errors. For instance if a query increments a key by 2 instead of 1, or increments the wrong key, there is no way for a rollback mechanism to help. Given that no one can save the programmer from their errors, and that the kind of errors required for a Redis command to fail are unlikely to enter in production, we selected the simpler and faster approach of not supporting roll backs on errors. +An argument against Redis point of view is that bugs happen, however it should be noted that in general the roll back does not save you from programming errors. For instance if a query increments a key by 2 instead of 1, or increments the wrong key, there is no way for a rollback mechanism to help. Given that no one can save the programmer from his or her errors, and that the kind of errors required for a Redis command to fail are unlikely to enter in production, we selected the simpler and faster approach of not supporting roll backs on errors. ## Discarding the command queue From 9bfe14f2c695a938a8d18139116870829e444b00 Mon Sep 17 00:00:00 2001 From: Igor Date: Mon, 14 Nov 2016 09:30:07 +0100 Subject: [PATCH 0675/2314] Redis 3.2 shipped with count argument for SPOP (#773) --- commands/spop.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/commands/spop.md b/commands/spop.md index 10d773049c..062cbc18f1 100644 --- a/commands/spop.md +++ b/commands/spop.md @@ -2,7 +2,7 @@ Removes and returns one or more random elements from the set value store at `key This operation is similar to `SRANDMEMBER`, that returns one or more random elements from a set but does not remove it. -The `count` argument will be available in a later version and is not available in 2.6, 2.8, 3.0 +The `count` argument is available since version 3.2. @return @@ -32,4 +32,4 @@ Note that this command is not suitable when you need a guaranteed uniform distri ## Count argument extension -Redis 3.2 will be the first version where an optional `count` argument can be passed to `SPOP` in order to retrieve multiple elements in a single call. The implementation is already available in the `unstable` branch. +Redis 3.2 introduced an optional `count` argument that can be passed to `SPOP` in order to retrieve multiple elements in a single call. From dcf05c0d3eee3050fc8e286841d7c4cdbba08395 Mon Sep 17 00:00:00 2001 From: Marc Verney Date: Thu, 17 Nov 2016 13:30:43 +0100 Subject: [PATCH 0676/2314] Mention EXPIRE behavior with non-positive timeouts (#774) * Mention EXPIRE behavior with non-positive timeouts * Add note about EXPIREAT and past timestamps --- commands/expire.md | 8 ++++++++ commands/expireat.md | 3 ++- topics/notifications.md | 2 +- 3 files changed, 11 insertions(+), 2 deletions(-) diff --git a/commands/expire.md b/commands/expire.md index 1249c742d2..6288b838a4 100644 --- a/commands/expire.md +++ b/commands/expire.md @@ -23,6 +23,14 @@ that is overwritten by a call like `RENAME Key_B Key_A`, it does not matter if the original `Key_A` had a timeout associated or not, the new key `Key_A` will inherit all the characteristics of `Key_B`. +Note that calling `EXPIRE`/`PEXPIRE` with a non-positive timeout or +`EXPIREAT`/`PEXPIREAT` with a time in the past will result in the key being +[deleted][del] rather than expired (accordingly, the emitted [key event][ntf] +will be `del`, not `expired`). + +[del]: /commands/del +[ntf]: /topics/notifications + ## Refreshing expires It is possible to call `EXPIRE` using as argument a key that already has an diff --git a/commands/expireat.md b/commands/expireat.md index 7deecb5e9d..87981300f6 100644 --- a/commands/expireat.md +++ b/commands/expireat.md @@ -1,6 +1,7 @@ `EXPIREAT` has the same effect and semantic as `EXPIRE`, but instead of specifying the number of seconds representing the TTL (time to live), it takes -an absolute [Unix timestamp][hewowu] (seconds since January 1, 1970). +an absolute [Unix timestamp][hewowu] (seconds since January 1, 1970). A +timestamp in the past will delete the key immediately. [hewowu]: http://en.wikipedia.org/wiki/Unix_time diff --git a/topics/notifications.md b/topics/notifications.md index 0884d720fe..6f0f72ee89 100644 --- a/topics/notifications.md +++ b/topics/notifications.md @@ -96,7 +96,7 @@ Different commands generate different kind of events according to the following * `DEL` generates a `del` event for every deleted key. * `RENAME` generates two events, a `rename_from` event for the source key, and a `rename_to` event for the destination key. -* `EXPIRE` generates an `expire` event when an expire is set to the key, or a `expired` event every time setting an expire results into the key being deleted (see `EXPIRE` documentation for more info). +* `EXPIRE` generates an `expire` event when an expire is set to the key, or an `expired` event every time a positive timeout set on a key results into the key being deleted (see `EXPIRE` documentation for more info). * `SORT` generates a `sortstore` event when `STORE` is used to set a new key. If the resulting list is empty, and the `STORE` option is used, and there was already an existing key with that name, the result is that the key is deleted, so a `del` event is generated in this condition. * `SET` and all its variants (`SETEX`, `SETNX`,`GETSET`) generate `set` events. However `SETEX` will also generate an `expire` events. * `MSET` generates a separated `set` event for every key. From 63ef74079f30a1d77b0f8bb735e2cde7600a7c33 Mon Sep 17 00:00:00 2001 From: antirez Date: Tue, 22 Nov 2016 13:11:25 +0100 Subject: [PATCH 0677/2314] SADD complexity better specified. --- commands.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands.json b/commands.json index ba179c9979..6f8f3f9127 100644 --- a/commands.json +++ b/commands.json @@ -1995,7 +1995,7 @@ }, "SADD": { "summary": "Add one or more members to a set", - "complexity": "O(N) where N is the number of members to be added.", + "complexity": "O(1) for each element added, so O(N) to add N elements when the command is called with multiple arguments.", "arguments": [ { "name": "key", From a6bcdf693d65ab944c315cbafe85f5eb33ec7804 Mon Sep 17 00:00:00 2001 From: Andrew Langhorn Date: Wed, 23 Nov 2016 21:03:14 +0000 Subject: [PATCH 0678/2314] Fix grammar This sounds much better with an 'a' between "used as" and "database". Fixes the grammar, which I noticed whilst reading redis.io. --- topics/introduction.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/introduction.md b/topics/introduction.md index 975506cf1b..bb17783022 100644 --- a/topics/introduction.md +++ b/topics/introduction.md @@ -1,7 +1,7 @@ Introduction to Redis === -Redis is an open source (BSD licensed), in-memory **data structure store**, used as database, cache and message broker. It supports data structures such as +Redis is an open source (BSD licensed), in-memory **data structure store**, used as a database, cache and message broker. It supports data structures such as [strings](/topics/data-types-intro#strings), [hashes](/topics/data-types-intro#hashes), [lists](/topics/data-types-intro#lists), [sets](/topics/data-types-intro#sets), [sorted sets](/topics/data-types-intro#sorted-sets) with range queries, [bitmaps](/topics/data-types-intro#bitmaps), [hyperloglogs](/topics/data-types-intro#hyperloglogs) and [geospatial indexes](/commands/geoadd) with radius queries. Redis has built-in [replication](/topics/replication), [Lua scripting](/commands/eval), [LRU eviction](/topics/lru-cache), [transactions](/topics/transactions) and different levels of [on-disk persistence](/topics/persistence), and provides high availability via [Redis Sentinel](/topics/sentinel) and automatic partitioning with [Redis Cluster](/topics/cluster-tutorial). You can run **atomic operations** From 5c97392c4724ca3cb66762486081b313c06f57dd Mon Sep 17 00:00:00 2001 From: Damian Janowski Date: Fri, 25 Nov 2016 22:50:47 -0300 Subject: [PATCH 0679/2314] Try deploy from Travis. --- .travis.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.travis.yml b/.travis.yml index a905458e99..7f42d39dcd 100644 --- a/.travis.yml +++ b/.travis.yml @@ -13,3 +13,9 @@ install: - gem install $(sed -e 's/ -v /:/' .gems) script: make -s + +deploy: + provider: script + script: curl https://redis.io/deploy?token=$DEPLOY_TOKEN + on: + branch: master From aa59fc2e094b059f1dfdb07e152f6ca2db8b12ff Mon Sep 17 00:00:00 2001 From: Damian Janowski Date: Fri, 25 Nov 2016 22:54:23 -0300 Subject: [PATCH 0680/2314] Need to escape token. --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 7f42d39dcd..b5acaf3de0 100644 --- a/.travis.yml +++ b/.travis.yml @@ -16,6 +16,6 @@ script: make -s deploy: provider: script - script: curl https://redis.io/deploy?token=$DEPLOY_TOKEN + script: curl -G https://redis.io/deploy --data-urlencode token=$DEPLOY_TOKEN on: branch: master From 06f0729ad4422d28021a4686b7c44f9e35c8b92d Mon Sep 17 00:00:00 2001 From: Vladimir Rutsky Date: Wed, 30 Nov 2016 19:23:49 +0400 Subject: [PATCH 0681/2314] fix list formatting (#776) --- commands/rpoplpush.md | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/commands/rpoplpush.md b/commands/rpoplpush.md index 76bbb580e2..49aa8fa798 100644 --- a/commands/rpoplpush.md +++ b/commands/rpoplpush.md @@ -58,9 +58,11 @@ all the elements of an N-elements list, one after the other, in O(N) without transferring the full list from the server to the client using a single `LRANGE` operation. -The above pattern works even if the following two conditions: * There are -multiple clients rotating the list: they'll fetch different elements, until all -the elements of the list are visited, and the process restarts. +The above pattern works even if the following two conditions: + +* There are multiple clients rotating the list: they'll fetch different + elements, until all the elements of the list are visited, and the process + restarts. * Even if other clients are actively pushing new items at the end of the list. The above makes it very simple to implement a system where a set of items must From d58853cfee7e05d5ffe8b28ac768ca55d2bcf27d Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 2 Dec 2016 10:28:42 +0100 Subject: [PATCH 0682/2314] Improve writable slaves section. --- topics/replication.md | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/topics/replication.md b/topics/replication.md index dc24106709..d0a5ffca8c 100644 --- a/topics/replication.md +++ b/topics/replication.md @@ -144,8 +144,18 @@ You may wonder why it is possible to revert the read-only setting and have slave instances that can be target of write operations. While those writes will be discarded if the slave and the master resynchronize or if the slave is restarted, there are a few legitimate -use case for storing ephemeral data in writable slaves. However in the future -it is possible that this feature will be dropped. +use case for storing ephemeral data in writable slaves. + +For example computing slow set or zset operations and storing them into local +keys is an use case for writable slaves that was observed multiple times. + +However note that **writable slaves are uncapable of expiring keys with a time to live set**. This means that if you use `EXPIRE` or other commands that set a maximum TTL for a key, the key will leak, and while you may no longer see it while accessing it with read commands, you will see it in the count of keys and it will still use memory. So in general mixing writable slaves and keys with TTL is going to create issues. + +Also note that since Redis 4.0 slave writes are only local, and are not propoagated to sub-slaves attached to the instance. Sub slaves instead will always receive the replication stream identical to the one sent by the top-level master to the intermediate slaves. So for example in the following setup: + + A ---> B ---> C + +Even if `B` is writable, C will not see `B` writes and will instead have identical dataset as the master instance `A`. Setting a slave to authenticate to a master --- From abf74b1f2a1fb774fb30f0a8b064cf106c6923be Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 2 Dec 2016 17:11:12 +0100 Subject: [PATCH 0683/2314] UNLINK documented. --- commands.json | 13 +++++++++++++ commands/unlink.md | 18 ++++++++++++++++++ 2 files changed, 31 insertions(+) create mode 100644 commands/unlink.md diff --git a/commands.json b/commands.json index 6f8f3f9127..a188cd177c 100644 --- a/commands.json +++ b/commands.json @@ -2567,6 +2567,19 @@ "since": "2.0.0", "group": "pubsub" }, + "UNLINK": { + "summary": "Delete a key asynchronously in another thread. Otherwise it is just as DEL, but non blocking.", + "complexity": "O(1) for each key removed regardless of its size. Then the command does O(N) work in a different thread in order to reclaim memory, where N is the number of allocations the deleted objects where composed of.", + "arguments": [ + { + "name": "key", + "type": "key", + "multiple": true + } + ], + "since": "4.0.0", + "group": "generic" + }, "UNWATCH": { "summary": "Forget about all watched keys", "complexity": "O(1)", diff --git a/commands/unlink.md b/commands/unlink.md new file mode 100644 index 0000000000..4c2a61a7b7 --- /dev/null +++ b/commands/unlink.md @@ -0,0 +1,18 @@ +This command is very similar to `DEL`: it removes the specified keys. +Just like `DEL` a key is ignored if it does not exist. However the command +performs the actual memory reclaiming in a different thread, so it is not +blocking, while `DEL` is. This is where the command name comes from: the +command just **unlinks** the keys from the keyspace. The actual removal +will happen later asyncrhonously. + +@return + +@integer-reply: The number of keys that were unlinked. + +@examples + +```cli +SET key1 "Hello" +SET key2 "World" +UNLINK key1 key2 key3 +``` From e6bee9a2684c9d3b0c6fc4ea1590b1d915f3fb0c Mon Sep 17 00:00:00 2001 From: Damian Janowski Date: Fri, 2 Dec 2016 14:20:25 -0300 Subject: [PATCH 0684/2314] Fix typos. --- commands/unlink.md | 2 +- wordlist | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/commands/unlink.md b/commands/unlink.md index 4c2a61a7b7..c91dd664de 100644 --- a/commands/unlink.md +++ b/commands/unlink.md @@ -3,7 +3,7 @@ Just like `DEL` a key is ignored if it does not exist. However the command performs the actual memory reclaiming in a different thread, so it is not blocking, while `DEL` is. This is where the command name comes from: the command just **unlinks** the keys from the keyspace. The actual removal -will happen later asyncrhonously. +will happen later asynchronously. @return diff --git a/wordlist b/wordlist index 7d3f8451e1..d5eb0604c0 100644 --- a/wordlist +++ b/wordlist @@ -367,6 +367,7 @@ underflows unencrypted unguessable unix +unlinks unordered unreachability unsubscribe From c1fec1428994780de17092fe23100284059b8423 Mon Sep 17 00:00:00 2001 From: Danny Guo Date: Thu, 8 Dec 2016 04:21:49 -0500 Subject: [PATCH 0685/2314] Fix typos for config rewrite (#778) --- commands/config-rewrite.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands/config-rewrite.md b/commands/config-rewrite.md index 5eb0952027..c1031561bf 100644 --- a/commands/config-rewrite.md +++ b/commands/config-rewrite.md @@ -1,4 +1,4 @@ -The `CONFIG REWRITE` command rewrites the `redis.conf` file the server was started with, applying the minimal changes needed to make it reflecting the configuration currently used by the server, that may be different compared to the original one because of the use of the `CONFIG SET` command. +The `CONFIG REWRITE` command rewrites the `redis.conf` file the server was started with, applying the minimal changes needed to make it reflect the configuration currently used by the server, which may be different compared to the original one because of the use of the `CONFIG SET` command. The rewrite is performed in a very conservative way: From d1c303e827b43f50822b424bc47d10d762ef6a7c Mon Sep 17 00:00:00 2001 From: antirez Date: Tue, 13 Dec 2016 18:37:00 +0100 Subject: [PATCH 0686/2314] Redis 4.0 writable slaves + expire updates. --- topics/replication.md | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/topics/replication.md b/topics/replication.md index d0a5ffca8c..5ee2cd540f 100644 --- a/topics/replication.md +++ b/topics/replication.md @@ -149,7 +149,12 @@ use case for storing ephemeral data in writable slaves. For example computing slow set or zset operations and storing them into local keys is an use case for writable slaves that was observed multiple times. -However note that **writable slaves are uncapable of expiring keys with a time to live set**. This means that if you use `EXPIRE` or other commands that set a maximum TTL for a key, the key will leak, and while you may no longer see it while accessing it with read commands, you will see it in the count of keys and it will still use memory. So in general mixing writable slaves and keys with TTL is going to create issues. +However note that **writable slaves before version 4.0 were uncapable of expiring keys with a time to live set**. This means that if you use `EXPIRE` or other commands that set a maximum TTL for a key, the key will leak, and while you may no longer see it while accessing it with read commands, you will see it in the count of keys and it will still use memory. So in general mixing writable slaves (previous version 4.0) and keys with TTL is going to create issues. + +Redis 4.0 RC3 and greater totally resolve this problem and now writable +slaves are able to evict keys with TTL as masters do, with the exceptions +of keys written in DB numbers greater than 63 (but by default Redis instances +only have 16 databases). Also note that since Redis 4.0 slave writes are only local, and are not propoagated to sub-slaves attached to the instance. Sub slaves instead will always receive the replication stream identical to the one sent by the top-level master to the intermediate slaves. So for example in the following setup: From d0b8d797a274aa99bfbd6b1bdeb9732fc4d3c273 Mon Sep 17 00:00:00 2001 From: Wyatt Alt Date: Mon, 19 Dec 2016 14:14:36 -0800 Subject: [PATCH 0687/2314] reword slavery note for grammar --- commands/slaveof.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands/slaveof.md b/commands/slaveof.md index 26def63217..2dc61a8147 100644 --- a/commands/slaveof.md +++ b/commands/slaveof.md @@ -19,6 +19,6 @@ slave. @simple-string-reply -**A note about slavery**: it's unfortunate that originally the master-slave terminology was picked for databases. When Redis was designed the existing terminology was used without much analysis of alternatives, however a **SLAVEOF NO ONE** command was added as a freedom message. Instead of changing the terminology, that would require breaking backward compatible in the API and `INFO` output, we want to use this page to remember you about slavery, **a crime against humanity now** but something that was perpetuated [throughout the whole human history](https://en.wikipedia.org/wiki/Slavery). +**A note about slavery**: it's unfortunate that originally the master-slave terminology was picked for databases. When Redis was designed the existing terminology was used without much analysis of alternatives, however a **SLAVEOF NO ONE** command was added as a freedom message. Instead of changing the terminology, which would require breaking backward compatibility in the API and `INFO` output, we want to use this page to remind you that slavery is both **a crime against humanity today** and something that has been perpetuated [throughout all human history](https://en.wikipedia.org/wiki/Slavery). *If slavery is not wrong, nothing is wrong.* -- Abraham Lincoln From be0ad99141596fb9c539971a16bc9da749a74b75 Mon Sep 17 00:00:00 2001 From: Alexander Cheprasov Date: Sun, 25 Dec 2016 21:50:35 +0000 Subject: [PATCH 0688/2314] Added ASYNC param for FLUSHALL & FLUSHDB --- commands.json | 16 ++++++++++++++++ commands/flushall.md | 4 ++++ commands/flushdb.md | 4 ++++ 3 files changed, 24 insertions(+) diff --git a/commands.json b/commands.json index a188cd177c..d9d2babada 100644 --- a/commands.json +++ b/commands.json @@ -754,11 +754,27 @@ }, "FLUSHALL": { "summary": "Remove all keys from all databases", + "arguments": [ + { + "name": "async", + "type": "enum", + "enum": ["ASYNC"], + "optional": true + } + ], "since": "1.0.0", "group": "server" }, "FLUSHDB": { "summary": "Remove all keys from the current database", + "arguments": [ + { + "name": "async", + "type": "enum", + "enum": ["ASYNC"], + "optional": true + } + ], "since": "1.0.0", "group": "server" }, diff --git a/commands/flushall.md b/commands/flushall.md index be1f717eba..5d2dffaa12 100644 --- a/commands/flushall.md +++ b/commands/flushall.md @@ -5,6 +5,10 @@ This command never fails. The time-complexity for this operation is O(N), N being the number of keys in all existing databases. +FLUSHALL ASYNC (Redis 4.0.0 or greater) +--- +Code name “lazy freeing of objects”, but it’s a lame name for a neat feature. There is a new command called `UNLINK` that just deletes a key reference in the database, and does the actual clean up of the allocations in a separated thread, so if you use `UNLINK` instead of `DEL` against a huge key the server will not block. And even better with the ASYNC options of `FLUSHALL` and `FLUSHDB` you can do that for whole DBs or for all the data inside the instance, if you want. Combined with the new `SWAPDB` command, that swaps two Redis databases content, `FLUSHDB ASYNC` can be quite interesting. Once you, for instance, populated DB 1 with the new version of the data, you can `SWAPDB 0 1` and `FLUSHDB ASYNC` the database with the old data, and create yet a newer version and reiterate. This is only possible now because flushing a whole DB is no longer blocking. + @return @simple-string-reply diff --git a/commands/flushdb.md b/commands/flushdb.md index f41d0e31f3..af596f4d8f 100644 --- a/commands/flushdb.md +++ b/commands/flushdb.md @@ -4,6 +4,10 @@ This command never fails. The time-complexity for this operation is O(N), N being the number of keys in the database. +FLUSHDB ASYNC (Redis 4.0.0 or greater) +--- +See `FLUSHALL` for documentation. + @return @simple-string-reply From 992bc8c4a48fb66698ee4a41f3392edd3f23710a Mon Sep 17 00:00:00 2001 From: Alexander Cheprasov Date: Mon, 26 Dec 2016 00:39:25 +0000 Subject: [PATCH 0689/2314] Added ASYNC param for FLUSHALL & FLUSHDB --- commands/flushall.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands/flushall.md b/commands/flushall.md index 5d2dffaa12..c5f3cef4ac 100644 --- a/commands/flushall.md +++ b/commands/flushall.md @@ -7,7 +7,7 @@ keys in all existing databases. FLUSHALL ASYNC (Redis 4.0.0 or greater) --- -Code name “lazy freeing of objects”, but it’s a lame name for a neat feature. There is a new command called `UNLINK` that just deletes a key reference in the database, and does the actual clean up of the allocations in a separated thread, so if you use `UNLINK` instead of `DEL` against a huge key the server will not block. And even better with the ASYNC options of `FLUSHALL` and `FLUSHDB` you can do that for whole DBs or for all the data inside the instance, if you want. Combined with the new `SWAPDB` command, that swaps two Redis databases content, `FLUSHDB ASYNC` can be quite interesting. Once you, for instance, populated DB 1 with the new version of the data, you can `SWAPDB 0 1` and `FLUSHDB ASYNC` the database with the old data, and create yet a newer version and reiterate. This is only possible now because flushing a whole DB is no longer blocking. +Code name "lazy freeing of objects", but it's a lame name for a neat feature. There is a new command called `UNLINK` that just deletes a key reference in the database, and does the actual clean up of the allocations in a separated thread, so if you use `UNLINK` instead of `DEL` against a huge key the server will not block. And even better with the ASYNC options of `FLUSHALL` and `FLUSHDB` you can do that for whole DBs or for all the data inside the instance, if you want. Combined with the new `SWAPDB` command, that swaps two Redis databases content, FLUSHDB ASYNC can be quite interesting. Once you, for instance, populated DB 1 with the new version of the data, you can SWAPDB 0 1 and FLUSHDB ASYNC the database with the old data, and create yet a newer version and reiterate. This is only possible now because flushing a whole DB is no longer blocking. @return From bb7a37583b41e42f08b82522214a04441c0c4692 Mon Sep 17 00:00:00 2001 From: Alexander Cheprasov Date: Mon, 26 Dec 2016 11:12:01 +0000 Subject: [PATCH 0690/2314] Added ASYNC param for FLUSHALL & FLUSHDB --- commands/flushall.md | 7 +++++-- commands/flushdb.md | 2 +- wordlist | 1 + 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/commands/flushall.md b/commands/flushall.md index c5f3cef4ac..83beb43ca2 100644 --- a/commands/flushall.md +++ b/commands/flushall.md @@ -5,9 +5,12 @@ This command never fails. The time-complexity for this operation is O(N), N being the number of keys in all existing databases. -FLUSHALL ASYNC (Redis 4.0.0 or greater) +`FLUSHALL` ASYNC (Redis 4.0.0 or greater) --- -Code name "lazy freeing of objects", but it's a lame name for a neat feature. There is a new command called `UNLINK` that just deletes a key reference in the database, and does the actual clean up of the allocations in a separated thread, so if you use `UNLINK` instead of `DEL` against a huge key the server will not block. And even better with the ASYNC options of `FLUSHALL` and `FLUSHDB` you can do that for whole DBs or for all the data inside the instance, if you want. Combined with the new `SWAPDB` command, that swaps two Redis databases content, FLUSHDB ASYNC can be quite interesting. Once you, for instance, populated DB 1 with the new version of the data, you can SWAPDB 0 1 and FLUSHDB ASYNC the database with the old data, and create yet a newer version and reiterate. This is only possible now because flushing a whole DB is no longer blocking. +Code name "lazy freeing of objects", but it's a lame name for a neat feature. +There is a new command called `UNLINK` that just deletes a key reference in the database, and does the actual clean up of the allocations in a separated thread, so if you use `UNLINK` instead of `DEL` against a huge key the server will not block. +And even better with the ASYNC options of `FLUSHALL` and `FLUSHDB` you can do that for whole dbs or for all the data inside the instance, if you want. Combined with the new `SWAPDB` command, that swaps two Redis databases content, `FLUSHDB` ASYNC can be quite interesting. +Once you, for instance, populated db 1 with the new version of the data, you can `SWAPDB` 0 1 and `FLUSHDB` ASYNC the database with the old data, and create yet a newer version and reiterate. This is only possible now because flushing a whole db is no longer blocking. @return diff --git a/commands/flushdb.md b/commands/flushdb.md index af596f4d8f..f7a3c9343b 100644 --- a/commands/flushdb.md +++ b/commands/flushdb.md @@ -4,7 +4,7 @@ This command never fails. The time-complexity for this operation is O(N), N being the number of keys in the database. -FLUSHDB ASYNC (Redis 4.0.0 or greater) +`FLUSHDB` ASYNC (Redis 4.0.0 or greater) --- See `FLUSHALL` for documentation. diff --git a/wordlist b/wordlist index d5eb0604c0..780842887d 100644 --- a/wordlist +++ b/wordlist @@ -1,3 +1,4 @@ +ASYNC ACLs AMD AOF From 97329dec4afad511e4c6a69654580d4b1520a052 Mon Sep 17 00:00:00 2001 From: Alexander Cheprasov Date: Mon, 26 Dec 2016 11:17:00 +0000 Subject: [PATCH 0691/2314] Added ASYNC param for FLUSHALL & FLUSHDB --- wordlist | 1 + 1 file changed, 1 insertion(+) diff --git a/wordlist b/wordlist index 780842887d..958a0a4b29 100644 --- a/wordlist +++ b/wordlist @@ -184,6 +184,7 @@ cpu cron dataset datasets +dbs decrement decrementing denyoom From 12a2263b7ba3efc8d721e83e37cb656cea8c6b7f Mon Sep 17 00:00:00 2001 From: Alexander Cheprasov Date: Mon, 26 Dec 2016 18:25:11 +0000 Subject: [PATCH 0692/2314] Added SWAPDB command (#784) * Added SWAPDB command * Added SWAPDB command --- commands.json | 15 +++++++++++++++ commands/swapdb.md | 17 +++++++++++++++++ 2 files changed, 32 insertions(+) create mode 100644 commands/swapdb.md diff --git a/commands.json b/commands.json index a188cd177c..e57e967701 100644 --- a/commands.json +++ b/commands.json @@ -2505,6 +2505,21 @@ "since": "1.0.0", "group": "set" }, + "SWAPDB": { + "summary": "Swaps two Redis databases", + "arguments": [ + { + "name": "index", + "type": "integer" + }, + { + "name": "index", + "type": "integer" + } + ], + "since": "4.0.0", + "group": "connection" + }, "SYNC": { "summary": "Internal command used for replication", "since": "1.0.0", diff --git a/commands/swapdb.md b/commands/swapdb.md new file mode 100644 index 0000000000..1613316f44 --- /dev/null +++ b/commands/swapdb.md @@ -0,0 +1,17 @@ +This command swaps two Redis databases, so that immediately all the +clients connected to a given database will see the data of the other database, and +the other way around. Example: + + SWAPDB 0 1 + +This will swap database 0 with database 1. All the clients connected with database 0 will immediately see the new data, exactly like all the clients connected with database 1 will see the data that was formerly of database 0. + +@return + +@simple-string-reply: `OK` if `SWAPDB` was executed correctly. + +@examples + +```cli +SWAPDB 0 1 +``` From f42dc1e9214d73485e55da79fb20eecbab9d85b1 Mon Sep 17 00:00:00 2001 From: Alexander Cheprasov Date: Mon, 26 Dec 2016 18:38:57 +0000 Subject: [PATCH 0693/2314] Added ASYNC param for FLUSHALL & FLUSHDB --- commands/flushall.md | 8 +++----- commands/flushdb.md | 2 +- wordlist | 1 - 3 files changed, 4 insertions(+), 7 deletions(-) diff --git a/commands/flushall.md b/commands/flushall.md index 83beb43ca2..b31e0b51a8 100644 --- a/commands/flushall.md +++ b/commands/flushall.md @@ -5,12 +5,10 @@ This command never fails. The time-complexity for this operation is O(N), N being the number of keys in all existing databases. -`FLUSHALL` ASYNC (Redis 4.0.0 or greater) +`FLUSHALL ASYNC` (Redis 4.0.0 or greater) --- -Code name "lazy freeing of objects", but it's a lame name for a neat feature. -There is a new command called `UNLINK` that just deletes a key reference in the database, and does the actual clean up of the allocations in a separated thread, so if you use `UNLINK` instead of `DEL` against a huge key the server will not block. -And even better with the ASYNC options of `FLUSHALL` and `FLUSHDB` you can do that for whole dbs or for all the data inside the instance, if you want. Combined with the new `SWAPDB` command, that swaps two Redis databases content, `FLUSHDB` ASYNC can be quite interesting. -Once you, for instance, populated db 1 with the new version of the data, you can `SWAPDB` 0 1 and `FLUSHDB` ASYNC the database with the old data, and create yet a newer version and reiterate. This is only possible now because flushing a whole db is no longer blocking. +Redis is now able to delete keys in the background in a different thread without blocking the server. +An `ASYNC` option was added to `FLUSHALL` and `FLUSHDB` in order to let the entire dataset or a single database to be freed asynchronously. @return diff --git a/commands/flushdb.md b/commands/flushdb.md index f7a3c9343b..fe5e8731c5 100644 --- a/commands/flushdb.md +++ b/commands/flushdb.md @@ -4,7 +4,7 @@ This command never fails. The time-complexity for this operation is O(N), N being the number of keys in the database. -`FLUSHDB` ASYNC (Redis 4.0.0 or greater) +`FLUSHDB ASYNC` (Redis 4.0.0 or greater) --- See `FLUSHALL` for documentation. diff --git a/wordlist b/wordlist index 958a0a4b29..780842887d 100644 --- a/wordlist +++ b/wordlist @@ -184,7 +184,6 @@ cpu cron dataset datasets -dbs decrement decrementing denyoom From 753d339fb6529a5ed7aa3e1b586c3446fee82cde Mon Sep 17 00:00:00 2001 From: Luis Ashurei Date: Wed, 4 Jan 2017 18:22:01 +0800 Subject: [PATCH 0694/2314] Fix typo (#788) --- topics/replication.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/replication.md b/topics/replication.md index 5ee2cd540f..053ac4a1ff 100644 --- a/topics/replication.md +++ b/topics/replication.md @@ -156,7 +156,7 @@ slaves are able to evict keys with TTL as masters do, with the exceptions of keys written in DB numbers greater than 63 (but by default Redis instances only have 16 databases). -Also note that since Redis 4.0 slave writes are only local, and are not propoagated to sub-slaves attached to the instance. Sub slaves instead will always receive the replication stream identical to the one sent by the top-level master to the intermediate slaves. So for example in the following setup: +Also note that since Redis 4.0 slave writes are only local, and are not propagated to sub-slaves attached to the instance. Sub slaves instead will always receive the replication stream identical to the one sent by the top-level master to the intermediate slaves. So for example in the following setup: A ---> B ---> C From 31a88df345042aeac130aae65a10b0445e5bb71c Mon Sep 17 00:00:00 2001 From: Konstantin Shabanov Date: Tue, 31 Jan 2017 01:00:58 +0700 Subject: [PATCH 0695/2314] Add oxblood client (#791) --- clients.json | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/clients.json b/clients.json index bae631a797..842c267295 100644 --- a/clients.json +++ b/clients.json @@ -1436,5 +1436,14 @@ "authors": ["diguo58"], "recommended": true, "active": true + }, + + { + "name": "oxblood", + "language": "Ruby", + "repository": "https://github.com/etehtsea/oxblood", + "description": "Straightforward Ruby client.", + "authors": ["etehtsea"], + "active": true } ] From 3e9140eef5842c6d7e1c7b72d7596d876e0e5d0e Mon Sep 17 00:00:00 2001 From: Ken Reese Date: Tue, 31 Jan 2017 01:20:35 -0700 Subject: [PATCH 0696/2314] Updated link to scredis repo (#793) --- clients.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clients.json b/clients.json index 842c267295..ae51d15269 100644 --- a/clients.json +++ b/clients.json @@ -570,7 +570,7 @@ { "name": "scredis", "language": "Scala", - "repository": "https://github.com/Livestream/scredis", + "repository": "https://github.com/scredis/scredis", "description": "Non-blocking, ultra-fast Scala Redis client built on top of Akka IO, used in production at Livestream", "authors": ["livestream"], "active": true From cf4c556f0e0a7ded71840fe24177a3aea50afc92 Mon Sep 17 00:00:00 2001 From: Christos Date: Sat, 4 Feb 2017 12:47:21 +0200 Subject: [PATCH 0697/2314] Fixed typo (#794) --- topics/data-types-intro.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/data-types-intro.md b/topics/data-types-intro.md index cfb0e3d328..1fa1761e22 100644 --- a/topics/data-types-intro.md +++ b/topics/data-types-intro.md @@ -470,7 +470,7 @@ Examples of rule 1: > lpush mylist 1 2 3 (integer) 3 -However we can't perform operations against the wrong type of the key exists: +However we can't perform operations against the wrong type if the key exists: > set foo bar OK From ebf4924e4ee82b65d6f0c5e89df854446d273008 Mon Sep 17 00:00:00 2001 From: Gianluca Borello Date: Fri, 10 Feb 2017 06:24:47 -0800 Subject: [PATCH 0698/2314] Fix cluster spec doc: PFAIL -> FAIL (#790) --- topics/cluster-spec.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/cluster-spec.md b/topics/cluster-spec.md index 6b06e5d03a..c53f4f512b 100644 --- a/topics/cluster-spec.md +++ b/topics/cluster-spec.md @@ -735,7 +735,7 @@ A `PFAIL` condition is escalated to a `FAIL` condition when the following set of * Some node, that we'll call A, has another node B flagged as `PFAIL`. * Node A collected, via gossip sections, information about the state of B from the point of view of the majority of masters in the cluster. -* The majority of masters signaled the `PFAIL` or `PFAIL` condition within `NODE_TIMEOUT * FAIL_REPORT_VALIDITY_MULT` time. (The validity factor is set to 2 in the current implementation, so this is just two times the `NODE_TIMEOUT` time). +* The majority of masters signaled the `PFAIL` or `FAIL` condition within `NODE_TIMEOUT * FAIL_REPORT_VALIDITY_MULT` time. (The validity factor is set to 2 in the current implementation, so this is just two times the `NODE_TIMEOUT` time). If all the above conditions are true, Node A will: From e209128a0df40b310c1fb1957193a28b54aaebfa Mon Sep 17 00:00:00 2001 From: Louis-Michel Couture Date: Fri, 10 Feb 2017 12:15:14 -0500 Subject: [PATCH 0699/2314] Remove outdated references in quickstart example (#797) --- topics/quickstart.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/topics/quickstart.md b/topics/quickstart.md index 166abae151..1d915c0840 100644 --- a/topics/quickstart.md +++ b/topics/quickstart.md @@ -112,8 +112,8 @@ download and install a Redis client library for your programming language. You'll find a [full list of clients for different languages in this page](http://redis.io/clients). For instance if you happen to use the Ruby programming language our best advice -is to use the [Redis-rb](http://github.com/ezmobius/redis-rb) client. -You can install it using the command **gem install redis** (also make sure to install the **SystemTimer** gem as well). +is to use the [Redis-rb](https://github.com/redis/redis-rb) client. +You can install it using the command **gem install redis**. These instructions are Ruby specific but actually many library clients for popular languages look quite similar: you create a Redis object and execute From d1aeb9d49c99703197994cd2ec65befc74c6d639 Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Thu, 16 Feb 2017 13:33:12 +0100 Subject: [PATCH 0700/2314] =?UTF-8?q?Tix=20typo:=20Replicte=20=E2=86=92=20?= =?UTF-8?q?Replicate?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- commands/eval.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/commands/eval.md b/commands/eval.md index 2d25d55361..4023aaab38 100644 --- a/commands/eval.md +++ b/commands/eval.md @@ -524,9 +524,9 @@ an error if called when script effects replication is disabled. The command can be called with four different arguments: - redis.set_repl(redis.REPL_ALL) -- Replicte to AOF and slaves. - redis.set_repl(redis.REPL_AOF) -- Replicte only to AOF. - redis.set_repl(redis.REPL_SLAVE) -- Replicte only to slaves. + redis.set_repl(redis.REPL_ALL) -- Replicate to AOF and slaves. + redis.set_repl(redis.REPL_AOF) -- Replicate only to AOF. + redis.set_repl(redis.REPL_SLAVE) -- Replicate only to slaves. redis.set_repl(redis.REPL_NONE) -- Don't replicate at all. By default the scripting engine is always set to `REPL_ALL`. By calling From d358f351f7e49cc047c4f846cc7369b137c0364f Mon Sep 17 00:00:00 2001 From: SeYoungLee Date: Wed, 22 Feb 2017 16:01:22 +0900 Subject: [PATCH 0701/2314] Update clients.json add PL/SQL client library oredis --- clients.json | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/clients.json b/clients.json index ae51d15269..5e1728f27f 100644 --- a/clients.json +++ b/clients.json @@ -1445,5 +1445,15 @@ "description": "Straightforward Ruby client.", "authors": ["etehtsea"], "active": true + }, + + { + "name": "oredis", + "language": "PL/SQL", + "repository": "https://github.com/SeYoungLee/oredis", + "description": "Redis client library for Oracle PL/SQL. This support Redis cluster and asynchronous execution", + "authors": ["SeyoungLee"], + "recommended": true, + "active": true } ] From 90dbe4b2fbde9ff1aec49c4b00004c83528f9ceb Mon Sep 17 00:00:00 2001 From: SeYoungLee Date: Wed, 22 Feb 2017 17:48:41 +0900 Subject: [PATCH 0702/2314] Update clients.json remove recommended section --- clients.json | 1 - 1 file changed, 1 deletion(-) diff --git a/clients.json b/clients.json index 5e1728f27f..263d71a0da 100644 --- a/clients.json +++ b/clients.json @@ -1453,7 +1453,6 @@ "repository": "https://github.com/SeYoungLee/oredis", "description": "Redis client library for Oracle PL/SQL. This support Redis cluster and asynchronous execution", "authors": ["SeyoungLee"], - "recommended": true, "active": true } ] From 7672f3a9311c25300033b73a626bcab3bc43d60c Mon Sep 17 00:00:00 2001 From: SeYoungLee Date: Wed, 22 Feb 2017 18:14:00 +0900 Subject: [PATCH 0703/2314] Update clients.json change the first brace position --- clients.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clients.json b/clients.json index 263d71a0da..74eadb1ebf 100644 --- a/clients.json +++ b/clients.json @@ -1447,7 +1447,7 @@ "active": true }, - { + { "name": "oredis", "language": "PL/SQL", "repository": "https://github.com/SeYoungLee/oredis", From 0c37dc4487cdc191ae8ec2e9b82e16798e434cdc Mon Sep 17 00:00:00 2001 From: Brad Urani Date: Wed, 22 Feb 2017 02:22:13 -0800 Subject: [PATCH 0704/2314] Fixing grammatical error (#799) --- topics/indexes.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/indexes.md b/topics/indexes.md index ae2c4b1c6f..0faaf772d5 100644 --- a/topics/indexes.md +++ b/topics/indexes.md @@ -24,7 +24,7 @@ elements ordered by a floating point number which is the *score* of each element. Elements are ordered from the smallest to the highest score. Since the score is a double precision float, indexes you can build with -vanilla sorted sets are limited to things were the indexing field is a number +vanilla sorted sets are limited to things where the indexing field is a number within a given range. The two commands to build these kind of indexes are `ZADD` and From 80bfc2c174c66400ca2f229ad908c47775b8da5c Mon Sep 17 00:00:00 2001 From: Glen Arrowsmith Date: Wed, 22 Feb 2017 20:24:18 +1000 Subject: [PATCH 0705/2314] Added pronunciation (#781) At least this is how I've read its pronounced. --- topics/faq.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/topics/faq.md b/topics/faq.md index 2ac0d9bf97..db4cb14260 100644 --- a/topics/faq.md +++ b/topics/faq.md @@ -152,3 +152,7 @@ It means REmote DIctionary Server. Originally Redis was started in order to scale [LLOOGG][lloogg]. But after I got the basic server working I liked the idea to share the work with other people, and Redis was turned into an open source project. [lloogg]: http://lloogg.com + +## How is Redis pronounced? + +It's "red" like the color, then "iss". From a1f9a93e0b4dcf76b3b6780f12ee4e98467b5cf6 Mon Sep 17 00:00:00 2001 From: Asmod4n Date: Mon, 27 Feb 2017 21:13:43 +0100 Subject: [PATCH 0706/2314] Add mruby-hiredis Redis Client for mruby with Async support, pipelines and transactions --- clients.json | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/clients.json b/clients.json index 74eadb1ebf..6c2506f72e 100644 --- a/clients.json +++ b/clients.json @@ -1330,6 +1330,15 @@ "authors": ["matsumotory"], "active": true }, + + { + "name": "mruby-hiredis", + "language": "mruby", + "repository": "https://github.com/Asmod4n/mruby-hiredis", + "description": "Redis Client for mruby with Async support, pipelines and transactions", + "authors": ["Asmod4n"], + "active": true + }, { "name": "Pottery", From 1623f33dd2bbe2f3fcdb9ec7f0eb762176770ad8 Mon Sep 17 00:00:00 2001 From: Itamar Haber Date: Mon, 13 Mar 2017 13:42:13 +0200 Subject: [PATCH 0707/2314] Clarifies that Lua data types are converted to Redis protocol when calling a command (#804) --- commands/eval.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/commands/eval.md b/commands/eval.md index 4023aaab38..e77cc71d0b 100644 --- a/commands/eval.md +++ b/commands/eval.md @@ -74,10 +74,10 @@ protocol using a set of conversion rules. ## Conversion between Lua and Redis data types Redis return values are converted into Lua data types when Lua calls a Redis -command using call() or pcall(). -Similarly Lua data types are converted into the Redis protocol when a Lua script -returns a value, so that scripts can control what `EVAL` will return to the -client. +command using `call()` or `pcall()`. +Similarly, Lua data types are converted into the Redis protocol when calling +a Redis command and when a Lua script returns a value, so that scripts can +control what `EVAL` will return to the client. This conversion between data types is designed in a way that if a Redis type is converted into a Lua type, and then the result is converted back into a Redis From ea17db8d7e7eb0e71d61cb69de1251c9451656c2 Mon Sep 17 00:00:00 2001 From: Eric Silverberg Date: Mon, 13 Mar 2017 16:57:19 -0400 Subject: [PATCH 0708/2314] Update clients.json Include my new PSSRedisClient project as an option for Swift. Also marks one of the swift clients as no longer actively maintained (not updated in 2+ yrs) --- clients.json | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/clients.json b/clients.json index 74eadb1ebf..0a2c6a071f 100644 --- a/clients.json +++ b/clients.json @@ -1247,7 +1247,7 @@ "repository": "https://github.com/Farhaddc/Swidis", "description": "iOS Framework Allowing you to connect to Redis server with Swift programming language.", "authors": ["Farhaddc"], - "active": true + "active": false }, { @@ -1277,6 +1277,15 @@ "active": true }, + { + "name": "PSSRedisClient", + "language": "Swift", + "repository": "https://github.com/perrystreetsoftware/PSSRedisClient", + "description": "Swift redis client using the CocoaAsyncSocket library, installable via Cocoapods", + "authors": ["esilverberg"], + "active": true + }, + { "name": "Rackdis", "language": "Racket", From 0e969fdc9c7217c407d3b4f4429199842c1814d4 Mon Sep 17 00:00:00 2001 From: joanvila Date: Sun, 19 Mar 2017 10:55:15 +0100 Subject: [PATCH 0709/2314] Added asyncio python implementation to distlock implementation list --- topics/distlock.md | 1 + 1 file changed, 1 insertion(+) diff --git a/topics/distlock.md b/topics/distlock.md index 34e837e9ee..3088025174 100644 --- a/topics/distlock.md +++ b/topics/distlock.md @@ -25,6 +25,7 @@ already available that can be used for reference. * [Redlock-rb](https://github.com/antirez/redlock-rb) (Ruby implementation). There is also a [fork of Redlock-rb](https://github.com/leandromoreira/redlock-rb) that adds a gem for easy distribution and perhaps more. * [Redlock-py](https://github.com/SPSCommerce/redlock-py) (Python implementation). +* [Aioredlock](https://github.com/joanvila/aioredlock) (Asyncio Python implementation). * [Redlock-php](https://github.com/ronnylt/redlock-php) (PHP implementation). * [PHPRedisMutex](https://github.com/malkusch/lock#phpredismutex) (further PHP implementation) * [cheprasov/php-redis-lock](https://github.com/cheprasov/php-redis-lock) (PHP library for locks) From 7c7ce77b8e29377a063e096d0780081d3a197fb1 Mon Sep 17 00:00:00 2001 From: Jose Tiago Macara Coutinho Date: Sun, 19 Mar 2017 16:09:21 +0100 Subject: [PATCH 0710/2314] Add QRedis --- tools.json | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tools.json b/tools.json index 1808d07be0..1fa42e22a8 100644 --- a/tools.json +++ b/tools.json @@ -304,6 +304,13 @@ "description": "Cross-platform desktop GUI management tool for Redis", "authors": ["u_glide"] }, + { + "name": "QRedis", + "language": "Python", + "repository": "https://github.com/tiagocoutinho/qredis", + "description": "Python, Qt based redis GUI", + "authors": ["tiagocoutinho"] + }, { "name": "FastoRedis", "language": "C++", From fe7853a014c01f8f3c999be793353a179c7016c6 Mon Sep 17 00:00:00 2001 From: Chris Tanner Date: Tue, 21 Mar 2017 23:35:34 +0000 Subject: [PATCH 0711/2314] data types grammar --- topics/data-types-intro.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/topics/data-types-intro.md b/topics/data-types-intro.md index cfb0e3d328..61730b51c2 100644 --- a/topics/data-types-intro.md +++ b/topics/data-types-intro.md @@ -1,7 +1,7 @@ An introduction to Redis data types and abstractions === -Redis is not a *plain* key-value store, actually it is a *data structures server*, supporting different kind of values. What this means is that, while in +Redis is not a *plain* key-value store, it is actually a *data structures server*, supporting different kinds of values. What this means is that, while in traditional key-value stores you associated string keys to string values, in Redis the value is not limited to a simple string, but can also hold more complex data structures. The following is the list of all the data structures supported @@ -989,13 +989,13 @@ proportional to the number of items you want to count, because you need to remember the elements you have already seen in the past in order to avoid counting them multiple times. However there is a set of algorithms that trade memory for precision: you end with an estimated measure with a standard error, -in the case of the Redis implementation, which is less than 1%. The +which in the case of the Redis implementation is less than 1%. The magic of this algorithm is that you no longer need to use an amount of memory proportional to the number of items counted, and instead can use a constant amount of memory! 12k bytes in the worst case, or a lot less if your HyperLogLog (We'll just call them HLL from now) has seen very few elements. -HLLs in Redis, while technically a different data structure, is encoded +HLLs in Redis, while technically a different data structure, are encoded as a Redis string, so you can call `GET` to serialize a HLL, and `SET` to deserialize it back to the server. @@ -1029,7 +1029,7 @@ There are other important things in the Redis API that can't be explored in the context of this document, but are worth your attention: * It is possible to [iterate the key space of a large collection incrementally](/commands/scan). -* It is possible to run [Lua scripts server side](/commands/eval) to win latency and bandwidth. +* It is possible to run [Lua scripts server side](/commands/eval) to improve latency and bandwidth. * Redis is also a [Pub-Sub server](/topics/pubsub). Learn more From bcf5f720094a9019c7c3b11270339e6bee4c9eae Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 22 Mar 2017 10:24:35 +0100 Subject: [PATCH 0712/2314] Redis on ARM page. --- topics/ARM.md | 66 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 66 insertions(+) create mode 100644 topics/ARM.md diff --git a/topics/ARM.md b/topics/ARM.md new file mode 100644 index 0000000000..5aba99a47c --- /dev/null +++ b/topics/ARM.md @@ -0,0 +1,66 @@ +# Redis on ARM + +Since the Redis 4.0 version (currently in release candidate state) Redis +supports the ARM processor, and the Raspberry Pi, as a main +platform, exactly like it happens for Linux/x86. It means that every new +release of Redis is tested on the Pi environment, and that we take +this documentation page updated with information about supported devices +and information. While Redis already runs on Android, in the future we look +forward to extend our testing efforts to Android to also make it an officially +supported platform. + +We believe that Redis is ideal for IoT and Embedded devices for several +reasons: + +* Redis has a very small memory footprint and CPU requirements. Can run in small devices like the Raspberry Pi Zero without impacting the overall performances, using a small amount of memory, while delivering good performances for many use cases. +* The data structures of Redis are often a good way to model IoT/embedded use cases. For example in order to accumulate time series data, to receive or queue commands to execute or responses to send back to the remote servers and so forth. +* Modeling data inside Redis can be very useful in order to make in-device decisions for appliances that must respond very quickly or when the remote servers are offline. +* Redis can be used as an interprocess communication system between the processes running in the device. +* The append only file storage of Redis is well suited for the SSD cards. + +## Redis /proc/cpu/alignment requirements + +Linux on ARM allows to trap unaligned accesses and fix them inside the kernel +in order to continue the exeuction of the offending program instead of +generating a SIGBUS. Redis 4.0 and greater are fixed in order to avoid any kind +of unaligned access, so there is no need to have a specific value for this +kernel configuration. Even when kernel alignment fixing is disabled Redis should +run as expected. + +## Building Redis in the Pi + +* Grab the latest commint of the Redis 4.0 branch. +* Just use `make` as usually to create the executable. + +There is nothing special in the process. The only difference is that by +default, Redis uses the libc allocator instead of defaulting to Jemalloc +as it does in other Linux based environments. This is because we believe +that for the small use cases inside embeddeed devices, memory fragmentation +is unlikely to be a problem. Moreover Jemalloc on ARM may not be as tested +as the libc allocator. + +## Performances + +Performance testing of Redis was performend in the Raspberry Pi 3 and in the +original model B Pi. The difference between the two Pis in terms of +delivered performances is quite big. The benchmarks were performed via the +loopback interface, since most use cases will probably use Redis from within +the device and not via the network. + +Raspberry Pi 3: + +* Test 1 : 5 millions writes with 1 million keys (even distribution among keys). No persistence, no pipelining. 28000 ops/sec. +* Test 2: Like test 1 but with pipelining using groups of 8 operations: 80000 ops/sec. +* Test 3: Like test 1 but with AOF enabled, fsync 1 sec: 23000 ops/sec +* Test 4: Like test 3, but with an AOF rewrite in progress: 21000 ops/sec + +Raspberry Pi 1 model B: + +* Test 1 : 5 millions writes with 1 million keys (even distribution among keys). No persistence, no pipelining. 2200 ops/sec. +* Test 2: Like test 1 but with pipelining using groups of 8 operations: 8500 ops/sec. +* Test 3: Like test 1 but with AOF enabled, fsync 1 sec: 1820 ops/sec +* Test 4: Like test 3, but with an AOF rewrite in progress: 1000 ops/sec + +The benchmarks above are referring to simple SET/GET operations. The performances are similar for all the Redis fast operations (not running in linear time). However sorted sets may show slightly slow numbers. + + From 31a800294795a77c3c67677493ef2d53d90a8bc2 Mon Sep 17 00:00:00 2001 From: antirez Date: Wed, 22 Mar 2017 10:45:15 +0100 Subject: [PATCH 0713/2314] ARM page typos fixed. --- topics/ARM.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/topics/ARM.md b/topics/ARM.md index 5aba99a47c..10bf228b64 100644 --- a/topics/ARM.md +++ b/topics/ARM.md @@ -12,7 +12,7 @@ supported platform. We believe that Redis is ideal for IoT and Embedded devices for several reasons: -* Redis has a very small memory footprint and CPU requirements. Can run in small devices like the Raspberry Pi Zero without impacting the overall performances, using a small amount of memory, while delivering good performances for many use cases. +* Redis has a very small memory footprint and CPU requirements. Can run in small devices like the Raspberry Pi Zero without impacting the overall performance, using a small amount of memory, while delivering good performance for many use cases. * The data structures of Redis are often a good way to model IoT/embedded use cases. For example in order to accumulate time series data, to receive or queue commands to execute or responses to send back to the remote servers and so forth. * Modeling data inside Redis can be very useful in order to make in-device decisions for appliances that must respond very quickly or when the remote servers are offline. * Redis can be used as an interprocess communication system between the processes running in the device. @@ -29,7 +29,7 @@ run as expected. ## Building Redis in the Pi -* Grab the latest commint of the Redis 4.0 branch. +* Grab the latest commit of the Redis 4.0 branch. * Just use `make` as usually to create the executable. There is nothing special in the process. The only difference is that by @@ -39,11 +39,11 @@ that for the small use cases inside embeddeed devices, memory fragmentation is unlikely to be a problem. Moreover Jemalloc on ARM may not be as tested as the libc allocator. -## Performances +## Performance Performance testing of Redis was performend in the Raspberry Pi 3 and in the original model B Pi. The difference between the two Pis in terms of -delivered performances is quite big. The benchmarks were performed via the +delivered performance is quite big. The benchmarks were performed via the loopback interface, since most use cases will probably use Redis from within the device and not via the network. @@ -61,6 +61,6 @@ Raspberry Pi 1 model B: * Test 3: Like test 1 but with AOF enabled, fsync 1 sec: 1820 ops/sec * Test 4: Like test 3, but with an AOF rewrite in progress: 1000 ops/sec -The benchmarks above are referring to simple SET/GET operations. The performances are similar for all the Redis fast operations (not running in linear time). However sorted sets may show slightly slow numbers. +The benchmarks above are referring to simple SET/GET operations. The performance is similar for all the Redis fast operations (not running in linear time). However sorted sets may show slightly slow numbers. From 026a5b9d37b478a9650833d9991162e564acaadc Mon Sep 17 00:00:00 2001 From: "Kyle J. Davis" Date: Wed, 22 Mar 2017 08:35:00 -0400 Subject: [PATCH 0714/2314] Added commas in ops/sec The added commas make it more clear the performance difference between Pi 3 and Pi 1/B. I had to look a few times to see that Test 1 was x10+ on the pi3 because it was just zeros. --- topics/ARM.md | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/topics/ARM.md b/topics/ARM.md index 10bf228b64..ca9895956d 100644 --- a/topics/ARM.md +++ b/topics/ARM.md @@ -49,17 +49,17 @@ the device and not via the network. Raspberry Pi 3: -* Test 1 : 5 millions writes with 1 million keys (even distribution among keys). No persistence, no pipelining. 28000 ops/sec. -* Test 2: Like test 1 but with pipelining using groups of 8 operations: 80000 ops/sec. -* Test 3: Like test 1 but with AOF enabled, fsync 1 sec: 23000 ops/sec -* Test 4: Like test 3, but with an AOF rewrite in progress: 21000 ops/sec +* Test 1 : 5 millions writes with 1 million keys (even distribution among keys). No persistence, no pipelining. 28,000 ops/sec. +* Test 2: Like test 1 but with pipelining using groups of 8 operations: 80,000 ops/sec. +* Test 3: Like test 1 but with AOF enabled, fsync 1 sec: 23,000 ops/sec +* Test 4: Like test 3, but with an AOF rewrite in progress: 21,000 ops/sec Raspberry Pi 1 model B: -* Test 1 : 5 millions writes with 1 million keys (even distribution among keys). No persistence, no pipelining. 2200 ops/sec. -* Test 2: Like test 1 but with pipelining using groups of 8 operations: 8500 ops/sec. -* Test 3: Like test 1 but with AOF enabled, fsync 1 sec: 1820 ops/sec -* Test 4: Like test 3, but with an AOF rewrite in progress: 1000 ops/sec +* Test 1 : 5 millions writes with 1 million keys (even distribution among keys). No persistence, no pipelining. 2,200 ops/sec. +* Test 2: Like test 1 but with pipelining using groups of 8 operations: 8,500 ops/sec. +* Test 3: Like test 1 but with AOF enabled, fsync 1 sec: 1,820 ops/sec +* Test 4: Like test 3, but with an AOF rewrite in progress: 1,000 ops/sec The benchmarks above are referring to simple SET/GET operations. The performance is similar for all the Redis fast operations (not running in linear time). However sorted sets may show slightly slow numbers. From 53a9721f98d383246e7c903478b5cfb2333ec296 Mon Sep 17 00:00:00 2001 From: Chris Tanner Date: Wed, 22 Mar 2017 18:58:24 +0000 Subject: [PATCH 0715/2314] debugging.md grammar /readability changes --- topics/debugging.md | 31 +++++++++++++++++++++---------- 1 file changed, 21 insertions(+), 10 deletions(-) diff --git a/topics/debugging.md b/topics/debugging.md index dbfacd0f66..d3e02462a9 100644 --- a/topics/debugging.md +++ b/topics/debugging.md @@ -4,10 +4,10 @@ Redis debugging guide Redis is developed with a great stress on stability: we do our best with every release to make sure you'll experience a very stable product and no crashes. However even with our best efforts it is impossible to avoid all -the critical bugs with 100% of success. +the critical bugs with 100% success. When Redis crashes it produces a detailed report of what happened, however -sometimes looking at the crash report is not enough, nor it is possible for +sometimes looking at the crash report is not enough, nor is it possible for the Redis core team to reproduce the issue independently: in this scenario we need help from the user that is able to reproduce the issue. @@ -27,8 +27,10 @@ GDB can be used in two ways: + It can attach to a running program and inspect the state of it at runtime. + It can inspect the state of a program that already terminated using what is called a *core file*, that is, the image of the memory at the time the program was running. -From the point of view of investigating Redis bugs we need to use both this -GDB modes: the user able to reproduce the bug attaches GDB to his running Redis instance, and when the crash happens, he creates the `core` file that the in turn the developer will use to inspect the Redis internals at the time of the crash. +From the point of view of investigating Redis bugs we need to use both of these +GDB modes: the user able to reproduce the bug attaches GDB to his running Redis +instance, and when the crash happens, he creates the `core` file that the in +turn the developer will use to inspect the Redis internals at the time of the crash. This way the developer can perform all the inspections in his computer without the help of the user, and the user is free to restart Redis in the production environment. @@ -49,7 +51,7 @@ compiled with optimizations. It is great if you make sure to recompile Redis with `make noopt` after the first crash, so that the next time it will be simpler to track the issue. -You should not be concerned with the loss of performances compiling Redis +You should not be concerned with the loss of performance compiling Redis without optimizations, it is very unlikely that this will cause problems in your environment since it is usually just a matter of a small percentage because Redis is not very CPU-bound (it does a lot of I/O to serve queries). @@ -58,10 +60,10 @@ Attaching GDB to a running process ---------------------------------- If you have an already running Redis server, you can attach GDB to it, so that -if Redis will crash it will be possible to both inspect the internals and +if Redis crashes it will be possible to both inspect the internals and generate a `core dump` file. -After you attach GDB to the Redis process it will continue running as usually without any loss of performance, so this is not a dangerous procedure. +After you attach GDB to the Redis process it will continue running as usual without any loss of performance, so this is not a dangerous procedure. In order to attach GDB the first thing you need is the *process ID* of the running Redis instance (the *pid* of the process). You can easily obtain it using `redis-cli`: @@ -165,9 +167,16 @@ The next step is to generate the core dump, that is the image of the memory of t (gdb) gcore Saved corefile core.58414 -Now you have the core dump to send to the Redis developer, but **it is important to understand** that this happens to contain all the data that was inside the Redis instance at the time of the crash: Redis developers will make sure to don't share the content with any other, and will delete the file as soon as it is no longer used for debugging purposes, but you are warned that sending the core file you are sending your data. +Now you have the core dump to send to the Redis developer, but **it is important +to understand** that this happens to contain all the data that was inside the +Redis instance at the time of the crash; Redis developers will make sure not to +share the content with anyone else, and will delete the file as soon as it is no +longer used for debugging purposes, but you are warned that by sending the core +file you are sending your data. -If there are sensible stuff in the data set we suggest sending the dump directly to Salvatore Sanfilippo (that is the guy writing this doc) at the email address **antirez at gmail dot com**. +If there are sensible stuff in the data set we suggest sending the dump directly +to Salvatore Sanfilippo (that is the guy writing this doc) at the email address +**antirez at gmail dot com**. What to send to developers -------------------------- @@ -182,4 +191,6 @@ Finally you can send everything to the Redis core team: Thank you --------- -Your help is extremely important! Many issues can only be tracked this way, thanks! It is also possible that helping Redis debugging you'll be among the winners of the next [Redis Moka Award](http://antirez.com/post/redis-moka-awards-2011.html). +Your help is extremely important! Many issues can only be tracked this way, +thanks! It is also possible that helping Redis debugging you'll be among the +winners of the next [Redis Moka Award](http://antirez.com/post/redis-moka-awards-2011.html). From 4604b56164a6dcc01974d7b014c5706f207cfa84 Mon Sep 17 00:00:00 2001 From: Chris Tanner Date: Wed, 22 Mar 2017 19:36:44 +0000 Subject: [PATCH 0716/2314] faq.md grammar/spelling --- topics/distlock.md | 2 +- topics/encryption.md | 2 +- topics/faq.md | 12 ++++++------ 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/topics/distlock.md b/topics/distlock.md index b3f05e7613..ce8be54df6 100644 --- a/topics/distlock.md +++ b/topics/distlock.md @@ -42,7 +42,7 @@ Safety and Liveness guarantees We are going to model our design with just three properties that, from our point of view, are the minimum guarantees needed to use distributed locks in an effective way. 1. Safety property: Mutual exclusion. At any given moment, only one client can hold a lock. -2. Liveness property A: Deadlock free. Eventually it is always possible to acquire a lock, even if the client that locked a resource crashed or gets partitioned. +2. Liveness property A: Deadlock free. Eventually it is always possible to acquire a lock, even if the client that locked a resource crashes or gets partitioned. 3. Liveness property B: Fault tolerance. As long as the majority of Redis nodes are up, clients are able to acquire and release locks. Why failover-based implementations are not enough diff --git a/topics/encryption.md b/topics/encryption.md index 5819f08c61..b1707df83c 100644 --- a/topics/encryption.md +++ b/topics/encryption.md @@ -3,7 +3,7 @@ Redis Encryption The idea of adding SSL support to Redis was proposed many times, however currently we believe that given the small percentage of users requiring -SSL support, and the fact that each scenario tends to be different, to use +SSL support, and the fact that each scenario tends to be different, using a different "tunneling" strategy can be better. We may change the idea in the future, but currently a good solution that may be suitable for many use cases is to use the following project: diff --git a/topics/faq.md b/topics/faq.md index 2ac0d9bf97..7012da3bf0 100644 --- a/topics/faq.md +++ b/topics/faq.md @@ -1,6 +1,6 @@ # FAQ -## Why Redis is different compared to other key-value stores? +## Why is Redis different compared to other key-value stores? There are two main reasons. @@ -70,7 +70,7 @@ is reached in the case you are using Redis for caching. We have documentation if you plan to use [Redis as an LRU cache](/topics/lru-cache). -## Background saving is failing with a fork() error under Linux even if I've a lot of free RAM! +## Background saving fails with a fork() error under Linux even if I have a lot of free RAM! Short answer: `echo 1 > /proc/sys/vm/overcommit_memory` :) @@ -90,10 +90,10 @@ as much free RAM as required to really duplicate all the parent memory pages, with the result that if you have a Redis dataset of 3 GB and just 2 GB of free memory it will fail. -Setting `overcommit_memory` to 1 says Linux to relax and perform the fork in a +Setting `overcommit_memory` to 1 tells Linux to relax and perform the fork in a more optimistic allocation fashion, and this is indeed what you want for Redis. -A good source to understand how Linux Virtual Memory work and other +A good source to understand how Linux Virtual Memory works and other alternatives for `overcommit_memory` and `overcommit_ratio` is this classic from Red Hat Magazine, ["Understanding Virtual Memory"][redhatvm]. Beware, this article had `1` and `2` configuration values for `overcommit_memory` @@ -126,7 +126,7 @@ You can find more information about using multiple Redis instances in the [Parti ## What is the maximum number of keys a single Redis instance can hold? and what the max number of elements in a Hash, List, Set, Sorted Set? Redis can handle up to 2^32 keys, and was tested in practice to -handle at least 250 million of keys per instance. +handle at least 250 million keys per instance. Every hash, list, set, and sorted set, can hold 2^32 elements. @@ -143,7 +143,7 @@ If you use keys with limited time to live (Redis expires) this is normal behavio As a result of this, it is common for users with many keys with an expire set to see less keys in the slaves, because of this artifact, but there is no actual logical difference in the instances content. -## What Redis means actually? +## What does Redis actually mean? It means REmote DIctionary Server. From cee792c23c2062c8e6bf9e6b35c24d3be9a07730 Mon Sep 17 00:00:00 2001 From: Chris Tanner Date: Wed, 22 Mar 2017 20:12:08 +0000 Subject: [PATCH 0717/2314] indexes.md grammar/typo fixes --- topics/indexes.md | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/topics/indexes.md b/topics/indexes.md index ae2c4b1c6f..1543265309 100644 --- a/topics/indexes.md +++ b/topics/indexes.md @@ -315,7 +315,7 @@ One simple way do deal with this issues is to actually normalize the string the user searches. Whatever the user searches for "Banana", "BANANA" or "Ba'nana" we may always turn it into "banana". -However sometimes we could like to present the user with the original +However sometimes we may like to present the user with the original item typed, even if we normalize the string for indexing. In order to do this, what we do is to change the format of the index so that instead of just storing `term:frequency` we store `normalized:frequency:original` @@ -495,8 +495,8 @@ In can add 5 more entries for the same relation, but in a different order: ZADD myindex 0 pos:is-friend-of:matteocollina:antirez Now things start to be interesting, and I can query the graph in many -different ways. For example, what are all the people `antirez` -*is friend to*? +different ways. For example, who are all the people `antirez` +*is friend of*? ZRANGEBYLEX myindex "[spo:antirez:is-friend-of:" "[spo:antirez:is-friend-of:\xff" 1) "spo:antirez:is-friend-of:matteocollina" @@ -512,9 +512,9 @@ the first is the subject and the second is the object? 3) "sop:antirez:matteocollina:talked-with" By combining different queries, I can ask fancy questions. For example: -*What are all my friends that, like beer, live in Barcelona, and matteocollina consider friends as well?* +*Who are all my friends that, like beer, live in Barcelona, and matteocollina consider friends as well?* To get this information I start with an `spo` query to find all the people -I'm friend with. Than for each result I get I perform an `spo` query +I'm friend with. Then for each result I get I perform an `spo` query to check if they like beer, removing the ones for which I can't find this relation. I do it again to filter by city. Finally I perform an `ops` query to find, of the list I obtained, who is considered friend by @@ -525,15 +525,15 @@ Make sure to check [Matteo Collina's slides about Levelgraph](http://nodejsconfi Multi dimensional indexes === -A more complex type of index is an index that allows to perform queries -where two or multiple variables are queried at the same time for specific +A more complex type of index is an index that allows you to perform queries +where two or more variables are queried at the same time for specific ranges. For example I may have a data set representing persons age and salary, and I want to retrieve all the people between 50 and 55 years old having a salary between 70000 and 85000. This query may be performed with a multi column index, but this requires us to select the first variable and then scan the second, which means we -may do a lot more work than needed. It is possible to perform this kind of +may do a lot more work than needed. It is possible to perform these kinds of queries involving multiple variables using different data structures. For example, multi-dimensional trees such as *k-d trees* or *r-trees* are sometimes used. Here we'll describe a different way to index data into @@ -549,7 +549,7 @@ where `x` is between 50 and 100, and where `y` is between 100 and 300. ![Points in the space](http://redis.io/images/redisdoc/2idx_0.png) -In order to represent data that makes this kind of queries fast to perform, +In order to represent data that makes these kinds of queries fast to perform, we start by padding our numbers with 0. So for example imagine we want to add the point 10,25 (x,y) to our index. Given that the maximum range in the example is 400 we can just pad to three digits, so we obtain: @@ -577,7 +577,7 @@ earlier by interleaving the digits, obtaining: 027050 What happens if we substitute the last two digits respectively with 00 and 99? -We obtain a range which is lexicographically continue: +We obtain a range which is lexicographically continuous: 027000 to 027099 @@ -631,7 +631,7 @@ And so forth. Now we have definitely better granularity! As you can see substituting N bits from the index gives us search boxes of side `2^(N/2)`. -So what we do is to check the dimension where our search box is smaller, +So what we do is check the dimension where our search box is smaller, and check the nearest power of two to this number. Our search box was 50,100 to 100,300, so it has a width of 50 and an height of 200. We take the smaller of the two, 50, and check the nearest power of two @@ -645,7 +645,7 @@ which is 50,100, and find the first range by substituting the last 6 bits in each number with 0. Then we do the same with the right top corner. With two trivial nested for loops where we increment only the significative -bits, we can find all the squares between this two. For each square we +bits, we can find all the squares between these two. For each square we convert the two numbers into our interleaved representation, and create the range using the converted representation as our start, and the same representation but with the latest 12 bits turned on as end range. @@ -687,7 +687,7 @@ Turning this into code is simple. Here is a Ruby example: While non immediately trivial this is a very useful indexing strategy that in the future may be implemented in Redis in a native way. -For now, the good thing is that the complexity may be easily incapsualted +For now, the good thing is that the complexity may be easily encapsualted inside a library that can be used in order to perform indexing and queries. One example of such library is [Redimension](https://github.com/antirez/redimension), a proof of concept Ruby library which indexes N-dimensional data inside Redis using the technique described here. From 632a0152f3bb7edea00deb0199b5e52cb536b175 Mon Sep 17 00:00:00 2001 From: Mike Campbell Date: Fri, 31 Mar 2017 13:29:24 +0100 Subject: [PATCH 0718/2314] Add detail about `sentinel auth-pass` directive (#812) Should explain in the "Sentinel and Redis authentication" how to set up Sentinels so that they can connect to Redis servers that use authentication. --- topics/sentinel.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/topics/sentinel.md b/topics/sentinel.md index f2b701c8fa..890a4e22c0 100644 --- a/topics/sentinel.md +++ b/topics/sentinel.md @@ -777,6 +777,12 @@ configuring in this slave only the `masterauth` directive, without using the `requirepass` directive, so that data will be readable by unauthenticated clients. +In order for sentinels to connect to Redis server instances when they are +configured with `requirepass`, the Sentinel configuration must include the +`sentinel auth-pass` directive, in the format: + + sentinel auth-pass + Sentinel clients implementation --- From 2a013bd1967109f3753b674402d2f984a3362b69 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 7 Apr 2017 15:14:18 +0200 Subject: [PATCH 0719/2314] Fixed typo in ARM page. --- topics/ARM.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/ARM.md b/topics/ARM.md index 10bf228b64..7b588d4d5a 100644 --- a/topics/ARM.md +++ b/topics/ARM.md @@ -21,7 +21,7 @@ reasons: ## Redis /proc/cpu/alignment requirements Linux on ARM allows to trap unaligned accesses and fix them inside the kernel -in order to continue the exeuction of the offending program instead of +in order to continue the execution of the offending program instead of generating a SIGBUS. Redis 4.0 and greater are fixed in order to avoid any kind of unaligned access, so there is no need to have a specific value for this kernel configuration. Even when kernel alignment fixing is disabled Redis should From 357931c9b5410635847834b0197a1b62d8ef1e08 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 14 Apr 2017 12:42:44 +0200 Subject: [PATCH 0720/2314] Add info about r/place Reddit project in BITFIELD command doc. --- commands/bitfield.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/commands/bitfield.md b/commands/bitfield.md index 6e6fc28474..46b7be8e96 100644 --- a/commands/bitfield.md +++ b/commands/bitfield.md @@ -94,6 +94,8 @@ The following is an example of `OVERFLOW FAIL` returning NULL. The motivation for this command is that the ability to store many small integers as a single large bitmap (or segmented over a few keys to avoid having huge keys) is extremely memory efficient, and opens new use cases for Redis to be applied, especially in the field of real time analytics. This use cases are supported by the ability to specify the overflow in a controlled way. +Trivia: the Reddit 1st April fool project [r/place](https://reddit.com/r/place) was [built using the Redis BITFIELD command](https://redditblog.com/2017/04/13/how-we-built-rplace/) in order to take an in memory representation of the collaborative canvas. + ## Performance considerations Usually `BITFIELD` is a fast command, however note that addressing far bits of currently short strings will trigger an allocation that may be more costly than executing the command on bits already existing. From 223fa4ae45502612d9fb6951acd93f9024d9736b Mon Sep 17 00:00:00 2001 From: Damian Janowski Date: Fri, 14 Apr 2017 12:03:48 -0300 Subject: [PATCH 0721/2314] Minor edits to Reddit section, make build pass. --- commands/bitfield.md | 2 +- wordlist | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/commands/bitfield.md b/commands/bitfield.md index 46b7be8e96..52e6147d03 100644 --- a/commands/bitfield.md +++ b/commands/bitfield.md @@ -94,7 +94,7 @@ The following is an example of `OVERFLOW FAIL` returning NULL. The motivation for this command is that the ability to store many small integers as a single large bitmap (or segmented over a few keys to avoid having huge keys) is extremely memory efficient, and opens new use cases for Redis to be applied, especially in the field of real time analytics. This use cases are supported by the ability to specify the overflow in a controlled way. -Trivia: the Reddit 1st April fool project [r/place](https://reddit.com/r/place) was [built using the Redis BITFIELD command](https://redditblog.com/2017/04/13/how-we-built-rplace/) in order to take an in memory representation of the collaborative canvas. +Fun fact: Reddit's 2017 April fool's project [r/place](https://reddit.com/r/place) was [built using the Redis BITFIELD command](https://redditblog.com/2017/04/13/how-we-built-rplace/) in order to take an in-memory representation of the collaborative canvas. ## Performance considerations diff --git a/wordlist b/wordlist index 780842887d..825b8b51c6 100644 --- a/wordlist +++ b/wordlist @@ -96,6 +96,7 @@ RDDs REDISPORT REdis REmote +Reddit RSS RTT Redis From e6af4e1318852f019c85c3610f738b5a8677b464 Mon Sep 17 00:00:00 2001 From: Damian Janowski Date: Fri, 14 Apr 2017 12:08:42 -0300 Subject: [PATCH 0722/2314] Silly wordlist. --- commands/bitfield.md | 2 +- wordlist | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/commands/bitfield.md b/commands/bitfield.md index 52e6147d03..3141be9fd5 100644 --- a/commands/bitfield.md +++ b/commands/bitfield.md @@ -94,7 +94,7 @@ The following is an example of `OVERFLOW FAIL` returning NULL. The motivation for this command is that the ability to store many small integers as a single large bitmap (or segmented over a few keys to avoid having huge keys) is extremely memory efficient, and opens new use cases for Redis to be applied, especially in the field of real time analytics. This use cases are supported by the ability to specify the overflow in a controlled way. -Fun fact: Reddit's 2017 April fool's project [r/place](https://reddit.com/r/place) was [built using the Redis BITFIELD command](https://redditblog.com/2017/04/13/how-we-built-rplace/) in order to take an in-memory representation of the collaborative canvas. +Fun fact: Reddit's 2017 April fools' project [r/place](https://reddit.com/r/place) was [built using the Redis BITFIELD command](https://redditblog.com/2017/04/13/how-we-built-rplace/) in order to take an in-memory representation of the collaborative canvas. ## Performance considerations diff --git a/wordlist b/wordlist index 825b8b51c6..f3f63ac075 100644 --- a/wordlist +++ b/wordlist @@ -96,7 +96,7 @@ RDDs REDISPORT REdis REmote -Reddit +Reddit's RSS RTT Redis From c2ac484105ece1072c66be47ed9063f6ae7d0a67 Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 20 Apr 2017 09:03:45 +0200 Subject: [PATCH 0723/2314] Some LFU documentation. --- topics/lru-cache.md | 62 ++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 61 insertions(+), 1 deletion(-) diff --git a/topics/lru-cache.md b/topics/lru-cache.md index 6a9e487a9d..19c093de4b 100644 --- a/topics/lru-cache.md +++ b/topics/lru-cache.md @@ -1,7 +1,7 @@ Using Redis as an LRU cache === -When Redis is used as a cache, sometimes it is handy to let it automatically +When Redis is used as a cache, often it is handy to let it automatically evict old data as you add new one. This behavior is very well known in the community of developers, since it is the default behavior of the popular *memcached* system. @@ -12,6 +12,9 @@ order to limit the memory usage to a fixed amount, and it also covers in depth the LRU algorithm used by Redis, that is actually an approximation of the exact LRU. +Starting with Redis version 4.0, a new LFU (Least Frequently Used) eviction +policy was introduced. This is covered in a separated section of this documentation. + Maxmemory configuration directive --- @@ -129,3 +132,60 @@ difference in your cache misses rate. To experiment in production with different values for the sample size by using the `CONFIG SET maxmemory-samples ` command, is very simple. +The new LFU mode +--- + +Starting with Redis 4.0, a new [Least Frequently Used eviction mode](http://antirez.com/news/109) is available. This mode may work better (provide a better +hits/misses ratio) in certain cases, since using LFU Redis will try to track +the frequency of access of items, so that the ones used rarely are evicted while +the one used often have an higher chance of remaining in memory. + +If you think at LRU, an item that was recently accessed but is actually almost never requested, will not get expired, so the risk is to evict a key that has an higher chance to be requested in the future. LFU does not have this problem, and in general should adapt better to different access patterns. + +To configure the LFU mode, the following policies are available: + +* `volatile-lfu` Evict using approximated LFU among the keys with an expire set. +* `allkeys-lfu` Evict any key using approximated LFU. + +LFU is approximated like LRU: it uses a probabilistic counter, called a [Morris counter](https://en.wikipedia.org/wiki/Approximate_counting_algorithm) in order to estimate the object access frequency using just a few bits per object, combined with a decay period so that the counter is reduced over time: at some point we no longer want to consider keys as frequently accessed, even if they were in the past, so that the algorithm can adapt to a shift in the access pattern. + +Those informations are sampled similarly to what happens for LRU (as explained in the previous section of this documentation) in order to select a candidate for eviction. + +However unlike LRU, LFU has certain tunable parameters: for instance, how fast +should a frequent item lower in rank if it gets no longer accessed? It is also possible to tune the Morris counters range in order to better adapt the algorithm to specific use cases. + +By default Redis 4.0 is configured to: + +* Saturate the counter at, around, one million requests. +* Decay the counter every one minute. + +Those should be reasonable values and were tested experimental, but the user may want to play with these configuration settings in order to pick optimal values. + +Instructions about how to tune these parameters can be found inside the example `redis.conf` file in the source distribution, but briefly, they are: + +``` +lfu-log-factor 10 +lfu-decay-time 1 +``` + +The decay time is the obvious one, it is the amount of minutes a counter should be decayed, when sampled and found to be older than that value. A special value of `0` means: always decay the counter every time is scanned, and is rarely useful. + +The counter *logarithm factor* changes how many hits are needed in order to saturate the frequency counter, which is just in the range 0-255. The higher the factor, the more accesses are needed in order to reach the maximum. The lower the factor, the better is the resolution of the counter for low accesses, according to the following table: + +``` ++--------+------------+------------+------------+------------+------------+ +| factor | 100 hits | 1000 hits | 100K hits | 1M hits | 10M hits | ++--------+------------+------------+------------+------------+------------+ +| 0 | 104 | 255 | 255 | 255 | 255 | ++--------+------------+------------+------------+------------+------------+ +| 1 | 18 | 49 | 255 | 255 | 255 | ++--------+------------+------------+------------+------------+------------+ +| 10 | 10 | 18 | 142 | 255 | 255 | ++--------+------------+------------+------------+------------+------------+ +| 100 | 8 | 11 | 49 | 143 | 255 | ++--------+------------+------------+------------+------------+------------+ +``` + +So basically the factor is a trade off between better distinguishing items with low accesses VS distinguishing items with high accesses. More informations are available in the example `redis.conf` file self documenting comments. + +Since LFU is a new feature, we'll appreciate any feedback about how it performs in your use case compared to LRU. From 3ecad2d131ff31b86c091241d74c63b47eef1c09 Mon Sep 17 00:00:00 2001 From: randvis Date: Thu, 20 Apr 2017 20:57:20 +0800 Subject: [PATCH 0724/2314] Remove incorrect sentinel symbol in "Example 3: Sentinel in the client boxes" (#686) --- topics/sentinel.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/sentinel.md b/topics/sentinel.md index 890a4e22c0..f8c98629ad 100644 --- a/topics/sentinel.md +++ b/topics/sentinel.md @@ -311,7 +311,7 @@ where clients are: +----+ +----+ | M1 |----+----| R1 | - | S1 | | | S2 | + | | | | | +----+ | +----+ | +------------+------------+ From 1aa2882aecd31b410bc4cc0b09fbcf25700228ab Mon Sep 17 00:00:00 2001 From: Michael Grunder Date: Fri, 28 Apr 2017 00:15:18 -0700 Subject: [PATCH 0725/2314] Sort returns different replies depending on command arguments. (#823) --- commands/sort.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/commands/sort.md b/commands/sort.md index aa4f39fde8..28e8bc681d 100644 --- a/commands/sort.md +++ b/commands/sort.md @@ -137,4 +137,5 @@ key is accessed to retrieve the specified hash field. @return -@array-reply: list of sorted elements. +@array-reply: without passing the `store` option the command returns a list of sorted elements. +@integer-reply: when the `store` option is specified the command returns the number of sorted elements in the destination list. From 2d3ac9c5b42f102fde0d2bf1b78c205ab3a6730e Mon Sep 17 00:00:00 2001 From: Dennis Olvany Date: Wed, 3 May 2017 09:14:00 -0400 Subject: [PATCH 0726/2314] Update rediscli.md (#826) --- topics/rediscli.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/rediscli.md b/topics/rediscli.md index 2bcabe2164..27ff39f6a5 100644 --- a/topics/rediscli.md +++ b/topics/rediscli.md @@ -391,7 +391,7 @@ are explained in the next sections: ## Continuous stats mode This is probably one of the lesser known features of `redis-cli`, and one -very useful in order to minor Redis instances in real time. +very useful in order to monitor Redis instances in real time. To enable this mode, the `--stat` option is used. The output is very clear about the behavior of the CLI in this mode: From e66217aa3897c0221dccbae8c22bca657972b82b Mon Sep 17 00:00:00 2001 From: Ben Arent Date: Fri, 5 May 2017 11:47:09 -0700 Subject: [PATCH 0727/2314] Correct URL --- topics/problems.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/problems.md b/topics/problems.md index 05e9183617..fdb4303686 100644 --- a/topics/problems.md +++ b/topics/problems.md @@ -24,5 +24,5 @@ releases that included important fixes. List of known Linux related bugs affecting Redis. === -* Ubuntu 10.04 and 10.10 have serious bugs (especially 10.10) that cause slow downs if not just instance hangs. Please move away from the default kernels shipped with this distributions. [Link to 10.04 bug](https://silverline.librato.com/blog/main/EC2_Users_Should_be_Cautious_When_Booting_Ubuntu_10_04_AMIs). [Link to 10.10 bug](https://bugs.launchpad.net/ubuntu/+source/linux/+bug/666211). Both bugs were reported many times in the context of EC2 instances, but other users confirmed that also native servers are affected (at least by one of the two). +* Ubuntu 10.04 and 10.10 have serious bugs (especially 10.10) that cause slow downs if not just instance hangs. Please move away from the default kernels shipped with this distributions. [Link to 10.04 bug](https://blog.librato.com/posts/2011/5/16/ec2-users-should-be-cautious-when-booting-ubuntu-1004-amis). [Link to 10.10 bug](https://bugs.launchpad.net/ubuntu/+source/linux/+bug/666211). Both bugs were reported many times in the context of EC2 instances, but other users confirmed that also native servers are affected (at least by one of the two). * Certain versions of the Xen hypervisor are known to have very bad fork() performances. See [the latency page](/topics/latency) for more information. From 19d4e96168127bcd52a66c1135733323b33e1622 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 12 May 2017 19:19:27 +0200 Subject: [PATCH 0728/2314] Modules json file added. --- modules.json | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) create mode 100644 modules.json diff --git a/modules.json b/modules.json new file mode 100644 index 0000000000..41d38632b9 --- /dev/null +++ b/modules.json @@ -0,0 +1,19 @@ +[ + { + "name": "redis-roaring", + "license" : "MIT", + "repository": "https://github.com/aviggiano/redis-roaring", + "description": "Uses the CRoaring library to implement roaring bitmap commands for Redis.", + "authors": ["aviggiano"], + "stars": 9 + }, + + { + "name": "neural-redis", + "license" : "BSD", + "repository": "https://github.com/antirez/neural-redis", + "description": "Online trainable neural networks as Redis data types.", + "authors": ["antirez"], + "stars": 1854 + } +] From b9ece3a04bc0b226d618af7a0b7f793335ff80e2 Mon Sep 17 00:00:00 2001 From: Itamar Haber Date: Fri, 12 May 2017 21:11:15 +0300 Subject: [PATCH 0729/2314] Adds some Redis Labs modules w --- modules.json | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/modules.json b/modules.json index 41d38632b9..c8f0c1ee80 100644 --- a/modules.json +++ b/modules.json @@ -8,6 +8,33 @@ "stars": 9 }, + { + "name": "ReJSON", + "license" : "AGPL", + "repository": "https://github.com/RedisLabsModules/ReJSON", + "description": "A JSON data type for Redis", + "authors": ["itamarhaber", "RedisLabs"], + "stars": 271 + }, + + { + "name": "Redis-ML", + "license" : "AGPL", + "repository": "https://github.com/RedisLabsModules/redis-ml", + "description": "Machine Learning Model Server", + "authors": ["shaynativ", "RedisLabs"], + "stars": 49 + }, + + { + "name": "RediSearch", + "license" : "AGPL", + "repository": "https://github.com/RedisLabsModules/RediSearch", + "description": "Full-Text search over Redis", + "authors": ["dvirsky", "RedisLabs"], + "stars": 247 + }, + { "name": "neural-redis", "license" : "BSD", From 2fb01d798ded31928aee42372719e3d0c63d0045 Mon Sep 17 00:00:00 2001 From: Itamar Haber Date: Fri, 12 May 2017 21:29:04 +0300 Subject: [PATCH 0730/2314] And a few more --- modules.json | 54 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 54 insertions(+) diff --git a/modules.json b/modules.json index c8f0c1ee80..db797db9e3 100644 --- a/modules.json +++ b/modules.json @@ -8,6 +8,33 @@ "stars": 9 }, + { + "name": "redis-cell", + "license" : "MIT", + "repository": "https://github.com/brandur/redis-cell", + "description": "A Redis module that provides rate limiting in Redis as a single command.", + "authors": ["brandur"], + "stars": 164 + }, + + { + "name": "Redis Graph", + "license" : "AGPL", + "repository": "https://github.com/swilly22/redis-module-graph", + "description": "A graph database with a Cypher-based querying language", + "authors": ["swilly22"], + "stars": 120 + }, + + { + "name": "redis-tdigest", + "license" : "MIT", + "repository": "https://github.com/usmanm/redis-tdigest", + "description": "t-digest data structure wich can be used for accurate online accumulation of rank-based statistics such as quantiles and cumulative distribution at a point.", + "authors": ["usmanm"], + "stars": 29 + }, + { "name": "ReJSON", "license" : "AGPL", @@ -35,6 +62,33 @@ "stars": 247 }, + { + "name": "topk", + "license" : "AGPL", + "repository": "https://github.com/RedisLabsModules/topk", + "description": "An almost deterministic top k elements counter", + "authors": ["itamarhaber", "RedisLabs"], + "stars": 15 + }, + + { + "name": "countminsketch", + "license" : "AGPL", + "repository": "https://github.com/RedisLabsModules/countminsketch", + "description": "An apporximate frequency counter", + "authors": ["itamarhaber", "RedisLabs"], + "stars": 15 + }, + + { + "name": "redablooms", + "license" : "AGPL", + "repository": "https://github.com/RedisLabsModules/redablooms", + "description": "Scalable, counting Bloom filters", + "authors": ["itamarhaber", "RedisLabs"], + "stars": 15 + }, + { "name": "neural-redis", "license" : "BSD", From 428de7699297f60d9b9a4b1ecb2a1e7034cff4f9 Mon Sep 17 00:00:00 2001 From: dschow Date: Tue, 16 May 2017 01:04:28 -0700 Subject: [PATCH 0731/2314] Pattern: Rate limiter 2 incorrectly expires the value instead of the key (#832) --- commands/incr.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands/incr.md b/commands/incr.md index 17a127b48d..48431f9a76 100644 --- a/commands/incr.md +++ b/commands/incr.md @@ -103,7 +103,7 @@ IF current != NULL AND current > 10 THEN ELSE value = INCR(ip) IF value == 1 THEN - EXPIRE(value,1) + EXPIRE(ip,1) END PERFORM_API_CALL() END From 94333eb1efe2bedfbfdc3413d5b189776485bcdd Mon Sep 17 00:00:00 2001 From: Itamar Haber Date: Wed, 17 May 2017 11:39:54 +0300 Subject: [PATCH 0732/2314] Adds @badboy to the hiredis team (#822) --- clients.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clients.json b/clients.json index 6a5d578a22..a75e023336 100644 --- a/clients.json +++ b/clients.json @@ -669,7 +669,7 @@ "language": "C", "repository": "https://github.com/redis/hiredis", "description": "This is the official C client. Support for the whole command set, pipelining, event driven programming.", - "authors": ["antirez","pnoordhuis"], + "authors": ["antirez","pnoordhuis","badboy_"], "recommended": true, "active": true }, From 4c77f08a4c619a19ddf4e32e20862361ca8670fb Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 15 Jun 2017 17:01:54 +0200 Subject: [PATCH 0733/2314] SLOWLOG doc updated after 4.0 changes. --- commands/slowlog.md | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/commands/slowlog.md b/commands/slowlog.md index 0cf113f34a..7872c7a146 100644 --- a/commands/slowlog.md +++ b/commands/slowlog.md @@ -56,12 +56,21 @@ redis 127.0.0.1:6379> slowlog get 2 3) "100" ``` -Every entry is composed of four fields: +There are also optional fields emitted only by Redis 4.0 or greater: + +``` +5) "127.0.0.1:58217" +6) "worker-123" +``` + +Every entry is composed of four (or six starting with Redis 4.0) fields: * A unique progressive identifier for every slow log entry. * The unix timestamp at which the logged command was processed. * The amount of time needed for its execution, in microseconds. * The array composing the arguments of the command. +* Client IP address and port (4.0 only). +* Client name if set via the `CLIENT SETNAME` command (4.0 only). The entry's unique ID can be used in order to avoid processing slow log entries multiple times (for instance you may have a script sending you an email alert From deeb994d34ebeabc5cb61ee42ef0ae61be114794 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 23 Jun 2017 11:37:55 +0200 Subject: [PATCH 0734/2314] Update CLIENT PAUSE doc according to codebase change. --- commands/client-pause.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/commands/client-pause.md b/commands/client-pause.md index 07a5867d46..957163fff9 100644 --- a/commands/client-pause.md +++ b/commands/client-pause.md @@ -15,6 +15,10 @@ This command is useful as it makes able to switch clients from a Redis instance It is possible to send `CLIENT PAUSE` in a MULTI/EXEC block together with the `INFO replication` command in order to get the current master offset at the time the clients are blocked. This way it is possible to wait for a specific offset in the slave side in order to make sure all the replication stream was processed. +Since Redis 3.2.10 / 4.0.0, this command also prevents keys to be evicted or +expired during the time clients are paused. This way the dataset is guaranteed +to be static not just from the point of view of clients not being able to write, but also from the point of view of internal operations. + @return @simple-string-reply: The command returns OK or an error if the timeout is invalid. From 9234f5a64821868ae72467205148c19ace6ca514 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 23 Jun 2017 17:07:07 +0200 Subject: [PATCH 0735/2314] SELECT page improved. --- commands/select.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/commands/select.md b/commands/select.md index 344d3115aa..4bfca9ca3f 100644 --- a/commands/select.md +++ b/commands/select.md @@ -1,6 +1,12 @@ Select the DB with having the specified zero-based numeric index. New connections always use DB 0. +Redis different selectable databases are a form of namespacing: all the databases are anyway persisted togeter in the same RDB / AOF file. However different DBs can have keys having the same name, and there are commands available like `FLUSHDB`, `SWAPDB` or `RANDOMKEY` that work on specific databases. + +When using Redis Cluster, the `SELECT` command cannot be used, since Redis Cluster only supports database zero. In the case of Redis Cluster, having multiple databases would be useless, and a worthless source of complexity, because anyway commands operating atomically on a single database would not be possible with the Redis Cluster design and goals. + +Since the currently selected database is a property of the connection, clients should track the currently selected database and re-select it on reconnection. While there is no command in order to query the selected database in the current connection, the `CLIENT LIST` output shows, for each client, the currently selected database. + @return @simple-string-reply From a143d41d20d39838d02404f733e78b49dc80254c Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 23 Jun 2017 17:09:12 +0200 Subject: [PATCH 0736/2314] Fix first sentence in the SELECT command page. --- commands/select.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/commands/select.md b/commands/select.md index 4bfca9ca3f..4b74e996bb 100644 --- a/commands/select.md +++ b/commands/select.md @@ -1,5 +1,5 @@ -Select the DB with having the specified zero-based numeric index. -New connections always use DB 0. +Select the Redis logical database having the specified zero-based numeric index. +New connections always use the database 0. Redis different selectable databases are a form of namespacing: all the databases are anyway persisted togeter in the same RDB / AOF file. However different DBs can have keys having the same name, and there are commands available like `FLUSHDB`, `SWAPDB` or `RANDOMKEY` that work on specific databases. From ce9dd1b4eee52e09adf319e4baa56c755678ed92 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 23 Jun 2017 17:21:09 +0200 Subject: [PATCH 0737/2314] Fix typos from SELECT man page. --- commands/select.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands/select.md b/commands/select.md index 4b74e996bb..fed0b2d8ec 100644 --- a/commands/select.md +++ b/commands/select.md @@ -1,7 +1,7 @@ Select the Redis logical database having the specified zero-based numeric index. New connections always use the database 0. -Redis different selectable databases are a form of namespacing: all the databases are anyway persisted togeter in the same RDB / AOF file. However different DBs can have keys having the same name, and there are commands available like `FLUSHDB`, `SWAPDB` or `RANDOMKEY` that work on specific databases. +Redis different selectable databases are a form of namespacing: all the databases are anyway persisted together in the same RDB / AOF file. However different databases can have keys having the same name, and there are commands available like `FLUSHDB`, `SWAPDB` or `RANDOMKEY` that work on specific databases. When using Redis Cluster, the `SELECT` command cannot be used, since Redis Cluster only supports database zero. In the case of Redis Cluster, having multiple databases would be useless, and a worthless source of complexity, because anyway commands operating atomically on a single database would not be possible with the Redis Cluster design and goals. From 55ae827a8e3a8885e5430bc8aa0f0c3c0f015dc9 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 23 Jun 2017 17:28:58 +0200 Subject: [PATCH 0738/2314] Improve SELECT page a bit more. --- commands/select.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/commands/select.md b/commands/select.md index fed0b2d8ec..d8efd0a253 100644 --- a/commands/select.md +++ b/commands/select.md @@ -3,6 +3,8 @@ New connections always use the database 0. Redis different selectable databases are a form of namespacing: all the databases are anyway persisted together in the same RDB / AOF file. However different databases can have keys having the same name, and there are commands available like `FLUSHDB`, `SWAPDB` or `RANDOMKEY` that work on specific databases. +In practical terms, Redis databases should mainly used in order to, if needed, separate different keys belonging to the same application, and not in order to use a single Redis instance for multiple unrelated applications. + When using Redis Cluster, the `SELECT` command cannot be used, since Redis Cluster only supports database zero. In the case of Redis Cluster, having multiple databases would be useless, and a worthless source of complexity, because anyway commands operating atomically on a single database would not be possible with the Redis Cluster design and goals. Since the currently selected database is a property of the connection, clients should track the currently selected database and re-select it on reconnection. While there is no command in order to query the selected database in the current connection, the `CLIENT LIST` output shows, for each client, the currently selected database. From 3ce015851aed508aa80634515efffe3f3584fe5e Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 23 Jun 2017 17:36:17 +0200 Subject: [PATCH 0739/2314] Nazy spell checker that does not allow namespacing hopefully disabled. Inability to update doc without changing a dictionary is not ideal... Just a warning would be great. --- makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/makefile b/makefile index 01f3251b4f..97646e5f04 100644 --- a/makefile +++ b/makefile @@ -3,7 +3,7 @@ JSON_FILES:=$(shell find . -name '*.json') TEXT_FILES:=$(patsubst %.md,tmp/%.txt,$(MD_FILES)) SPELL_FILES:=$(patsubst %.txt,%.spell,$(TEXT_FILES)) -default: parse spell +default: parse parse: $(JSON_FILES) rake parse From 19a1b8777c126b08562da62c895c9b931dbeecb2 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 23 Jun 2017 18:32:44 +0200 Subject: [PATCH 0740/2314] Admin page: add more info about upgrading instances. --- topics/admin.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/topics/admin.md b/topics/admin.md index 0fd8cfd4b6..ec9a0133e7 100644 --- a/topics/admin.md +++ b/topics/admin.md @@ -24,7 +24,7 @@ Running Redis on EC2 + Use HVM based instances, not PV based instances. + Don't use old instances families, for example: use m3.medium with HVM instead of m1.medium with PV. + The use of Redis persistence with **EC2 EBS volumes** needs to be handled with care since sometimes EBS volumes have high latency characteristics. -+ You may want to try the new **diskless replication** (currently experimental) if you have issues when slaves are synchronizing with the master. ++ You may want to try the new **diskless replication** if you have issues when slaves are synchronizing with the master. Upgrading or restarting a Redis instance without downtime ------------------------------------------------------- @@ -45,3 +45,7 @@ The following steps provide a very commonly used way in order to avoid any downt * Allow writes to the slave using **CONFIG SET slave-read-only no** * Configure all your clients in order to use the new instance (that is, the slave). * Once you are sure that the master is no longer receiving any query (you can check this with the [MONITOR command](/commands/monitor)), elect the slave to master using the **SLAVEOF NO ONE** command, and shut down your master. + +If you are using [Redis Sentinel](/topics/) or [Redis Cluster](/topics//topics/cluster-tutorial), the simplest way in order to upgrade to newer versions, is to upgrade a slave after the other, then perform a manual fail-over in order to promote one of the upgraded slaves as master, and finally promote the last slave. + +Note however that Redis Cluster 4.0 is not compatible with Redis Cluster 3.2 at cluster bus protocol level, so a mass restart is needed in this case. From 13902d7bbd741b31c5c4dbc50d49d444a1c66b0c Mon Sep 17 00:00:00 2001 From: antirez Date: Mon, 26 Jun 2017 09:33:17 +0200 Subject: [PATCH 0741/2314] ARM and benchmarks pages improved. --- topics/ARM.md | 4 ++-- topics/benchmarks.md | 43 ++++++++++++++++++------------------------- 2 files changed, 20 insertions(+), 27 deletions(-) diff --git a/topics/ARM.md b/topics/ARM.md index f2805da34d..67a5b62fec 100644 --- a/topics/ARM.md +++ b/topics/ARM.md @@ -1,8 +1,8 @@ # Redis on ARM Since the Redis 4.0 version (currently in release candidate state) Redis -supports the ARM processor, and the Raspberry Pi, as a main -platform, exactly like it happens for Linux/x86. It means that every new +supports the ARM processor in general, and the Raspberry Pi specifically, as a +main platform, exactly like it happens for Linux/x86. It means that every new release of Redis is tested on the Pi environment, and that we take this documentation page updated with information about supported devices and information. While Redis already runs on Android, in the future we look diff --git a/topics/benchmarks.md b/topics/benchmarks.md index 465644c63a..39db64b0bb 100644 --- a/topics/benchmarks.md +++ b/topics/benchmarks.md @@ -123,32 +123,18 @@ different options. If you plan to compare Redis to something else, then it is important to evaluate the functional and technical differences, and take them in account. -+ Redis is a server: all commands involve network or IPC round trips. It is -meaningless to compare it to embedded data stores such as SQLite, Berkeley DB, -Tokyo/Kyoto Cabinet, etc ... because the cost of most operations is -primarily in network/protocol management. -+ Redis commands return an acknowledgment for all usual commands. Some other -data stores do not (for instance MongoDB does not implicitly acknowledge write -operations). Comparing Redis to stores involving one-way queries is only -mildly useful. -+ Naively iterating on synchronous Redis commands does not benchmark Redis -itself, but rather measure your network (or IPC) latency. To really test Redis, -you need multiple connections (like redis-benchmark) and/or to use pipelining -to aggregate several commands and/or multiple threads or processes. -+ Redis is an in-memory data store with some optional persistence options. If -you plan to compare it to transactional servers (MySQL, PostgreSQL, etc ...), -then you should consider activating AOF and decide on a suitable fsync policy. -+ Redis is a single-threaded server. It is not designed to benefit from -multiple CPU cores. People are supposed to launch several Redis instances to -scale out on several cores if needed. It is not really fair to compare one -single Redis instance to a multi-threaded data store. ++ Redis is a server: all commands involve network or IPC round trips. It is meaningless to compare it to embedded data stores such as SQLite, Berkeley DB, Tokyo/Kyoto Cabinet, etc ... because the cost of most operations is primarily in network/protocol management. ++ Redis commands return an acknowledgment for all usual commands. Some other data stores do not. Comparing Redis to stores involving one-way queries is only mildly useful. ++ Naively iterating on synchronous Redis commands does not benchmark Redis itself, but rather measure your network (or IPC) latency and the client library intrinsic latency. To really test Redis, you need multiple connections (like redis-benchmark) and/or to use pipelining to aggregate several commands and/or multiple threads or processes. ++ Redis is an in-memory data store with some optional persistence options. If you plan to compare it to transactional servers (MySQL, PostgreSQL, etc ...), then you should consider activating AOF and decide on a suitable fsync policy. ++ Redis is, mostly, a single-threaded server from the POV of commands execution (actually modern versions of Redis use threads for different things). It is not designed to benefit from multiple CPU cores. People are supposed to launch several Redis instances to scale out on several cores if needed. It is not really fair to compare one single Redis instance to a multi-threaded data store. A common misconception is that redis-benchmark is designed to make Redis performances look stellar, the throughput achieved by redis-benchmark being somewhat artificial, and not achievable by a real application. This is -actually plain wrong. +actually not true. -The redis-benchmark program is a quick and useful way to get some figures and +The `redis-benchmark` program is a quick and useful way to get some figures and evaluate the performance of a Redis instance on a given hardware. However, by default, it does not represent the maximum throughput a Redis instance can sustain. Actually, by using pipelining and a fast client (hiredis), it is fairly @@ -156,12 +142,17 @@ easy to write a program generating more throughput than redis-benchmark. The default behavior of redis-benchmark is to achieve throughput by exploiting concurrency only (i.e. it creates several connections to the server). It does not use pipelining or any parallelism at all (one pending query per -connection at most, and no multi-threading). +connection at most, and no multi-threading), if not explicitly enabled via +the `-P` parameter. So in some way using `redis-benchmark` and, triggering, for +example, a `BGSAVE` operation in the background at the same time, will provide +the user with numbers more near to the *worst case* than to the best case. To run a benchmark using pipelining mode (and achieve higher throughput), you need to explicitly use the -P option. Please note that it is still a realistic behavior since a lot of Redis based applications actively use -pipelining to improve performance. +pipelining to improve performance. However you should use a pipeline size that +is more or less the average pipeline length you'll be able to use in your +application in order to get realistic numbers. Finally, the benchmark should apply the same operations, and work in the same way with the multiple data stores you want to compare. It is absolutely pointless to @@ -279,7 +270,7 @@ Jumbo frames may also provide a performance boost when large objects are used. allocators (libc malloc, jemalloc, tcmalloc), which may have different behaviors in term of raw speed, internal and external fragmentation. If you did not compile Redis yourself, you can use the INFO command to check -the mem_allocator field. Please note most benchmarks do not run long enough to +the `mem_allocator` field. Please note most benchmarks do not run long enough to generate significant external fragmentation (contrary to production Redis instances). @@ -300,7 +291,7 @@ reproducible results, it is better to set the highest possible fixed frequency for all the CPU cores involved in the benchmark. + An important point is to size the system accordingly to the benchmark. The system must have enough RAM and must not swap. On Linux, do not forget -to set the overcommit_memory parameter correctly. Please note 32 and 64 bit +to set the `overcommit_memory` parameter correctly. Please note 32 and 64 bit Redis instances do not have the same memory footprint. + If you plan to use RDB or AOF for your benchmark, please check there is no other I/O activity in the system. Avoid putting RDB or AOF files on NAS or NFS shares, @@ -314,6 +305,8 @@ but MONITOR will impact the measured performance significantly. # Benchmark results on different virtualized and bare-metal servers. +WARNING: Note that most of the following benchmarks are a few years old and are obtained using old hardware compared to today's standards. This page should be updated, but in many cases you can expect twice the numbers you are seeing here using state of hard hardware. Moreover Redis 4.0 is faster than 2.6 in many workloads. + * The test was done with 50 simultaneous clients performing 2 million requests. * Redis 2.6.14 is used for all the tests. * Test was executed using the loopback interface. From a4aa24003a94c063c4aea80b568fe53b61417ce4 Mon Sep 17 00:00:00 2001 From: antirez Date: Mon, 26 Jun 2017 10:56:00 +0200 Subject: [PATCH 0742/2314] Client handling: page improved with TCP keepalive info. --- topics/clients.md | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/topics/clients.md b/topics/clients.md index 00fbdfb823..62daf8d488 100644 --- a/topics/clients.md +++ b/topics/clients.md @@ -161,4 +161,11 @@ See the [CLIENT LIST](http://redis.io/commands/client-list) documentation for th Once you have the list of clients, you can easily close the connection with a client using the `CLIENT KILL` command specifying the client address as argument. -The commands `CLIENT SETNAME` and `CLIENT GETNAME` can be used to set and get the connection name. +The commands `CLIENT SETNAME` and `CLIENT GETNAME` can be used to set and get the connection name. Starting with Redis 4.0, the client name is shown in the +`SLOWLOG` output, so that it gets simpler to identify clients that are creating +latency issues. + +TCP keepalive +--- + +Recent versions of Redis (3.2 or greater) have TCP keepalive (`SO_KEEPALIVE` socket option) enabled by default and set to about 300 seconds. This option is useful in order to detect dead peers (clients that cannot be reached even if they look connected). Moreover, if there is network equipment between clients and servers that need to see some traffic in order to take the connection open, the option will prevent unexpected connection closed events. From cb65db5c7febacf9cacca0e37037518b8576905e Mon Sep 17 00:00:00 2001 From: antirez Date: Mon, 26 Jun 2017 14:47:07 +0200 Subject: [PATCH 0743/2314] Pipeline page improved. --- topics/pipelining.md | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/topics/pipelining.md b/topics/pipelining.md index bbb5167284..36f1f774be 100644 --- a/topics/pipelining.md +++ b/topics/pipelining.md @@ -56,7 +56,28 @@ To be very explicit, with pipelining the order of operations of our very first e **IMPORTANT NOTE**: While the client sends commands using pipelining, the server will be forced to queue the replies, using memory. So if you need to send a lot of commands with pipelining, it is better to send them as batches having a reasonable number, for instance 10k commands, read the replies, and then send another 10k commands again, and so forth. The speed will be nearly the same, but the additional memory used will be at max the amount needed to queue the replies for this 10k commands. -Some benchmark +It's not just a matter of RTT +--- + +Pipelining is not just a way in order to reduce the latency cost due to the +round trip time, it actually improves by a huge amount the total operations +you can perform per second in a given Redis server. This is the result of the +fact that, without using pipelining, serving each command is very cheap from +the point of view of accessing the data structures and producing the reply, +but it is very costly from the point of view of doing the socket I/O. This +involes calling the `read()` and `write()` syscall, that means going from user +land to kernel land. The context switch is a huge speed penalty. + +When pipelining is used, many commands are usually read with a single `read()` +system call, and multiple replies are delivered with a single `write()` system +call. Because of this, the number of total queries performed per second +initially increases almost linearly with longer pipelines, and eventually +reaches 10 times the baseline obtained not using pipelining, as you can +see from the following graph: + +![Pipeline size and IOPs](http://redis.io/images/redisdoc/pipeline_iops.png) + +Some real world code example --- In the following benchmark we'll use the Redis Ruby client, supporting pipelining, to test the speed improvement due to pipelining: From 24d3ea7a3762ad743223a4b63a540dd31f3ba2db Mon Sep 17 00:00:00 2001 From: antirez Date: Mon, 26 Jun 2017 15:03:08 +0200 Subject: [PATCH 0744/2314] Pipelining page: add appendix on loopback interface latency. --- topics/pipelining.md | 34 +++++++++++++++++++++++++++++++++- 1 file changed, 33 insertions(+), 1 deletion(-) diff --git a/topics/pipelining.md b/topics/pipelining.md index 36f1f774be..a05db1fc81 100644 --- a/topics/pipelining.md +++ b/topics/pipelining.md @@ -1,6 +1,9 @@ -Request/Response protocols and RTT +Using pipelining to speedup Redis queries === +Request/Response protocols and RTT +--- + Redis is a TCP server using the client-server model and what is called a *Request/Response* protocol. This means that usually a request is accomplished with the following steps: @@ -127,3 +130,32 @@ Pipelining VS Scripting Using [Redis scripting](/commands/eval) (available in Redis version 2.6 or greater) a number of use cases for pipelining can be addressed more efficiently using scripts that perform a lot of the work needed at the server side. A big advantage of scripting is that it is able to both read and write data with minimal latency, making operations like *read, compute, write* very fast (pipelining can't help in this scenario since the client needs the reply of the read command before it can call the write command). Sometimes the application may also want to send `EVAL` or `EVALSHA` commands in a pipeline. This is entirely possible and Redis explicitly supports it with the [SCRIPT LOAD](http://redis.io/commands/script-load) command (it guarantees that `EVALSHA` can be called without the risk of failing). + +Appendix: why a busy loops are slow even on the loopback interface? +--- + +Even with all the background covered in this page, you may still wonder why +a Redis benchmark like the following (in pseudo code), is slow even when +executed in the loopback interface, when the server and the client are running +in the same physical machine: + + FOR-ONE-SECOND: + Redis.SET("foo","bar") + END + +After all if both the Redis process and the benchmark are running in the same +box, isn't this just messages copied via memory from one place to another without +any actual latency and actual networking involved? + +The reason is that processes in a system are not always running, actually it is +the kernel scheduler that let the process run, so what happens is that, for +instance, the benchmark is allowed to run, reads the reply from the Redis server +(related to the last command executed), and writes a new command. The command is +now in the loopback interface buffer, but in order to be read by the server, the +kernel should schedule the server process (currently blocked in a system call) +to run, and so forth. So in practical terms the loopback interface still involves +network-alike latency, because of how the kernel scheduler works. + +Basically a busy loop benchmark is the silliest thing that can be done when +metering performances in a networked server. The wise thing is just avoiding +benchmarking in this way. From 0e75d746a7d734c2c0ea473bc762659e29677141 Mon Sep 17 00:00:00 2001 From: antirez Date: Mon, 26 Jun 2017 15:36:47 +0200 Subject: [PATCH 0745/2314] FAQ page improved. --- topics/faq.md | 50 +++++++++++++++++++++++++++++++++++--------------- 1 file changed, 35 insertions(+), 15 deletions(-) diff --git a/topics/faq.md b/topics/faq.md index 97a6cd6e76..1b2be3c803 100644 --- a/topics/faq.md +++ b/topics/faq.md @@ -7,20 +7,23 @@ There are two main reasons. * Redis is a different evolution path in the key-value DBs where values can contain more complex data types, with atomic operations defined on those data types. Redis data types are closely related to fundamental data structures and are exposed to the programmer as such, without additional abstraction layers. * Redis is an in-memory but persistent on disk database, so it represents a different trade off where very high write and read speed is achieved with the limitation of data sets that can't be larger than memory. Another advantage of in memory databases is that the memory representation of complex data structures -is much simpler to manipulate compared to the same data structure on disk, so +is much simpler to manipulate compared to the same data structures on disk, so Redis can do a lot, with little internal complexity. At the same time the two on-disk storage formats (RDB and AOF) don't need to be suitable for random access, so they are compact and always generated in an append-only fashion (Even the AOF log rotation is an append-only operation, since the new version -is generated from the copy of data in memory). +is generated from the copy of data in memory). However this design also involves +different challenges compared to traditional on-disk stores. Being the main data +representation on memory, Redis operations must be carefully handled to make sure +there is always an updated version of the data set on disk. ## What's the Redis memory footprint? To give you a few examples (all obtained using 64-bit instances): -* An empty instance uses ~ 1MB of memory. -* 1 Million small Keys -> String Value pairs use ~ 100MB of memory. -* 1 Million Keys -> Hash value, representing an object with 5 fields, use ~ 200 MB of memory. +* An empty instance uses ~ 3MB of memory. +* 1 Million small Keys -> String Value pairs use ~ 85MB of memory. +* 1 Million Keys -> Hash value, representing an object with 5 fields, use ~ 160 MB of memory. To test your use case is trivial using the `redis-benchmark` utility to generate random data sets and check with the `INFO memory` command the space used. @@ -36,12 +39,22 @@ If your real problem is not the total RAM needed, but the fact that you need to split your data set into multiple Redis instances, please read the [Partitioning page](/topics/partitioning) in this documentation for more info. +Recently Redis Labs, the company sponsoring Redis developments, developed a +"Redis on flash" solution that is able to use a mixed RAM/flash approach for +larger data sets with a biased access pattern. You may check their offering +for more information, however this feature is not part of the open source Redis +code base. + ## Is using Redis together with an on-disk database a good idea? Yes, a common design pattern involves taking very write-heavy small data in Redis (and data you need the Redis data structures to model your problem in an efficient way), and big *blobs* of data into an SQL or eventually -consistent on-disk database. +consistent on-disk database. Similarly sometimes Redis is used in order to +take in memory another copy of a subset of the same data stored in the on-disk +database. This may look similar to caching, but actually is a more advanced model +since normally the Redis dataset is updated together with the on-disk DB dataset, +and not refreshed on cache misses. ## Is there something I can do to lower the Redis memory usage? @@ -55,20 +68,22 @@ way. There is more info in the [Memory Optimization page](/topics/memory-optimiz Redis will either be killed by the Linux kernel OOM killer, crash with an error, or will start to slow down. With modern operating systems malloc() returning NULL is not common, usually -the server will start swapping, and Redis performance will degrade, so -you'll probably notice there is something wrong. - -The INFO command will report the amount of memory Redis is using so you can -write scripts that monitor your Redis servers checking for critical conditions. +the server will start swapping (if some swap space is configured), and Redis +performance will start to degrade, so you'll probably notice there is something +wrong. Redis has built-in protections allowing the user to set a max limit to memory -usage, using the `maxmemory` option in the config file to put a limit +usage, using the `maxmemory` option in the configuration file to put a limit to the memory Redis can use. If this limit is reached Redis will start to reply with an error to write commands (but will continue to accept read-only commands), or you can configure it to evict keys when the max memory limit is reached in the case you are using Redis for caching. -We have documentation if you plan to use [Redis as an LRU cache](/topics/lru-cache). +We have detailed documentation in case you plan to use [Redis as an LRU cache](/topics/lru-cache). + +The INFO command will report the amount of memory Redis is using so you can +write scripts that monitor your Redis servers checking for critical conditions +before they are reached. ## Background saving fails with a fork() error under Linux even if I have a lot of free RAM! @@ -111,8 +126,8 @@ in RAM is also atomic from the point of view of the disk snapshot. ## Redis is single threaded. How can I exploit multiple CPU / cores? -It's very unlikely that CPU becomes your bottleneck with Redis, as usually Redis is either memory or network bound. For instance, using pipelining Redis running -on an average Linux system can deliver even 500k requests per second, so +It's not very frequent that CPU becomes your bottleneck with Redis, as usually Redis is either memory or network bound. For instance, using pipelining Redis running +on an average Linux system can deliver even 1 million requests per second, so if your application mainly uses O(N) or O(log(N)) commands, it is hardly going to use too much CPU. @@ -123,6 +138,11 @@ start thinking of some way to shard earlier. You can find more information about using multiple Redis instances in the [Partitioning page](/topics/partitioning). +However with Redis 4.0 we started to make Redis more threaded. For now this is +limited to deleting objects in the background, and to blocking commands +implemented via Redis modules. For the next releases, the plan is to make Redis +more and more threaded. + ## What is the maximum number of keys a single Redis instance can hold? and what the max number of elements in a Hash, List, Set, Sorted Set? Redis can handle up to 2^32 keys, and was tested in practice to From 09d7c614c8556a5128f55a565651b5cd7a40b0fe Mon Sep 17 00:00:00 2001 From: antirez Date: Mon, 26 Jun 2017 18:59:57 +0200 Subject: [PATCH 0746/2314] Replication doc updated. --- topics/replication.md | 157 ++++++++++++++++++++++++++---------------- 1 file changed, 96 insertions(+), 61 deletions(-) diff --git a/topics/replication.md b/topics/replication.md index 053ac4a1ff..83737c92bd 100644 --- a/topics/replication.md +++ b/topics/replication.md @@ -1,35 +1,54 @@ Replication === -Redis replication is a very simple to use and configure master-slave -replication that allows slave Redis servers to be exact copies of -master servers. The following are some very important facts about Redis -replication: +At the base of Redis replication there is a very simple to use and configure +master-slave replication that allows slave Redis servers to be exact copies of +master servers. The slave will automatically reconnect to the master every +time the link breaks, and will attempt to be an exact copy of it regardless +of what happens to the master. -* Redis uses asynchronous replication. Starting with Redis 2.8, -however, slaves periodically acknowledge the amount of data -processed from the replication stream. +This system works using three main mechanisms: + +1. When a master and a slave instance are well-connected, the master keeps the slave updated by sending a stream of commands in order to replicate the effects on the dataset happening in the master dataset: client writes, keys expiring or evicted, and so forth. +2. When the link between the master and the slave breaks, for network issues or because a timeout is sensed in the master or the slave, the slave reconnects and attempts to proceed with a partial resynchronization: it means that it will try to just obtain the part of the stream of commands it missed during the disconnection. +3. When a partial resynchronization is not possible, the slave will ask for a full resynchronization. This will involve a more complex process in which the master needs to create a snapshot of all its data, send it to the slave, and then continue sending the stream of commands as the dataset changes. + +Redis uses by default asynchronous replication, which being high latency and +high performance, is the natural replication mode for the vast majority of Redis +use cases. However Redis slaves asynchronously acknowledge the amount of data +the received periodically with the master. + +Synchronous replication of certain data can be requested by the clients using +the `WAIT` command. However `WAIT` is only able to ensure that there are the +specified number of acknowledged copies in the other Redis instances: acknowledged +writes can still be lost during a failover for different reasons during a +failover or depending on the exact configuration of the Redis persistence. +You could check the Sentinel or Redis Cluster documentation for more information +about high availability and failover. The rest of this document mainly describe the basic characteristics of Redis basic replication. + +The following are some very important facts about Redis replication: + +* Redis uses asynchronous replication, with asynchronous slave-to-master acknowledges of the amount of data processed. * A master can have multiple slaves. * Slaves are able to accept connections from other slaves. Aside from connecting a number of slaves to the same master, slaves can also be -connected to other slaves in a cascading-like structure. +connected to other slaves in a cascading-like structure. Since Redis 4.0, all the sub-slaves will receive exactly the same replication stream from the master. * Redis replication is non-blocking on the master side. This means that the master will continue to handle queries when one or more slaves perform -the initial synchronization. +the initial synchronization or a partial resynchronization. -* Replication is also non-blocking on the slave side. While the slave is performing the initial synchronization, it can handle queries using the old version of -the dataset, assuming you configured Redis to do so in redis.conf. +* Replication is also largely non-blocking on the slave side. While the slave is performing the initial synchronization, it can handle queries using the old version of the dataset, assuming you configured Redis to do so in redis.conf. Otherwise, you can configure Redis slaves to return an error to clients if the replication stream is down. However, after the initial sync, the old dataset must be deleted and the new one must be loaded. The slave will block incoming -connections during this brief window (that can be as long as many seconds for very large datasets). +connections during this brief window (that can be as long as many seconds for very large datasets). Since Redis 4.0 it is possible to configure Redis so that the deletion of the old data set happens in a different thread, however loading the new initial dataset will still happen in the main thread and block the slave. * Replication can be used both for scalability, in order to have multiple slaves for read-only queries (for example, slow O(N) -operations can be offloaded to slaves), or simply for data redundancy. +operations can be offloaded to slaves), or simply for data safety. * It is possible to use replication to avoid the cost of having the master write the full dataset to disk: a typical technique involves configuring your master `redis.conf` to avoid persisting to disk at all, then connect a slave configured to save from time to time, or with AOF enabled. However this setup must be handled with care, since a restarting master will start with an empty dataset: if the slave tries to synchronized with it, the slave will be emptied as well. @@ -37,16 +56,17 @@ Safety of replication when master has persistence turned off --- In setups where Redis replication is used, it is strongly advised to have -persistence turned on in the master, or when this is not possible, for example -because of latency concerns, instances should be configured to **avoid restarting automatically** after a reboot. +persistence turned on in the master and in the slaves. When this is not possible, +for example because of latency concerns due to very slow disks, instances should +be configured to **avoid restarting automatically** after a reboot. To better understand why masters with persistence turned off configured to auto restart are dangerous, check the following failure mode where data is wiped from the master and all its slaves: 1. We have a setup with node A acting as master, with persistence turned down, and nodes B and C replicating from node A. -2. A crashes, however it has some auto-restart system, that restarts the process. However since persistence is turned off, the node restarts with an empty data set. -3. Nodes B and C will replicate from A, which is empty, so they'll effectively destroy their copy of the data. +2. Node A crashes, however it has some auto-restart system, that restarts the process. However since persistence is turned off, the node restarts with an empty data set. +3. Nodes B and C will replicate from node A, which is empty, so they'll effectively destroy their copy of the data. When Redis Sentinel is used for high availability, also turning off persistence on the master, together with auto restart of the process, is dangerous. For example the master can restart fast enough for Sentinel to don't detect a failure, so that the failure mode described above happens. @@ -56,48 +76,36 @@ Every time data safety is important, and replication is used with master configu How Redis replication works --- -If you set up a slave, upon connection it sends a PSYNC command. +Every Redis master has a replication ID: it is a large pseudo random string +that marks a given story of the dataset. Each master also takes an offset that +increments for every byte of replication stream that it is produced to be +sent to slaves, in order to update the state of the slaves with the new changes +modifying the dataset. The replication offset is incremented even if no slave +is actually connected, so basically every given pair of: -If this is a reconnection and the master has enough *backlog*, only the difference (what the slave missed) is sent. Otherwise what is called a *full resynchronization* is triggered. + Replication ID, offset -When a full resynchronization is triggered, the master starts a background -saving process in order to produce an RDB file. At the same time it starts to -buffer all new write commands received from the clients. When the background -saving is complete, the master transfers the database file to the slave, -which saves it on disk, and then loads it into memory. The master will -then send all buffered commands to the slave. This is done as a -stream of commands and is in the same format of the Redis protocol itself. +Identifies an exact version of the dataset of a master. -You can try it yourself via telnet. Connect to the Redis port while the -server is doing some work and issue the `SYNC` command. You'll see a bulk -transfer and then every command received by the master will be re-issued -in the telnet session. +When slaves connects to master, they use the `PSYNC` command in order to send +their old master replication ID and the offsets they processed so far. This way +the master can send just the incremental part needed. However if there is not +enough *backlog* in the master buffers, or if the slave is referring to an +history (replication ID) which is no longer known, than a full resynchronization +happens: in this case the slave will get a full copy of the dataset, from scratch. -Slaves are able to automatically reconnect when the master-slave -link goes down for some reason. If the master receives multiple -concurrent slave synchronization requests, it performs a single -background save in order to serve all of them. - -Partial resynchronization ---- +This is how a full synchronization works in more details: -Starting with Redis 2.8, master and slave are usually able to continue the -replication process without requiring a full resynchronization after the -replication link went down. +The master starts a background saving process in order to produce an RDB file. At the same time it starts to buffer all new write commands received from the clients. When the background saving is complete, the master transfers the database file to the slave, which saves it on disk, and then loads it into memory. The master will then send all buffered commands to the slave. This is done as a stream of commands and is in the same format of the Redis protocol itself. -This works by creating an in-memory backlog of the replication stream on the -master side. The master and all the slaves agree on a *replication -offset* and a *master run ID*, so when the link goes down, the slave will -reconnect and ask the master to continue the replication. Assuming the -master run ID is still the same, and that the offset specified is available -in the replication backlog, replication will resume from the point where it left off. -If either of these conditions are unmet, a full resynchronization is performed -(which is the normal pre-2.8 behavior). As the run ID of the connected master is not persisted to disk, a full resynchronization is needed when the slave restarts. +You can try it yourself via telnet. Connect to the Redis port while the +server is doing some work and issue the `SYNC` command. You'll see a bulk +transfer and then every command received by the master will be re-issued +in the telnet session. Actually `SYNC` is an old protocol no longer used by +newer Redis instances, but is still there for backward compatibility: it does +not allow partial resynchronizations, so now `PSYNC` is used instead. -The new partial resynchronization feature uses the `PSYNC` command internally, -while the old implementation uses the `SYNC` command. Note that a Redis -slave is able to detect if the server it is talking with does not support -`PSYNC`, and will use `SYNC` instead. +As already said, slaves are able to automatically reconnect when the master-slave link goes down for some reason. If the master receives multiple concurrent slave synchronization requests, it performs a single background save in order to serve all of them. Diskless replication --- @@ -113,8 +121,7 @@ RDB over the wire to slaves, without using the disk as intermediate storage. Configuration --- -To configure replication is trivial: just add the following line to the slave -configuration file: +To configure basic Redis replication is trivial: just add the following line to the slave configuration file: slaveof 192.168.1.1 6379 @@ -141,17 +148,16 @@ This behavior is controlled by the `slave-read-only` option in the redis.conf fi Read-only slaves will reject all write commands, so that it is not possible to write to a slave because of a mistake. This does not mean that the feature is intended to expose a slave instance to the internet or more generally to a network where untrusted clients exist, because administrative commands like `DEBUG` or `CONFIG` are still enabled. However, security of read-only instances can be improved by disabling commands in redis.conf using the `rename-command` directive. You may wonder why it is possible to revert the read-only setting -and have slave instances that can be target of write operations. +and have slave instances that can be targeted by write operations. While those writes will be discarded if the slave and the master resynchronize or if the slave is restarted, there are a few legitimate use case for storing ephemeral data in writable slaves. -For example computing slow set or zset operations and storing them into local -keys is an use case for writable slaves that was observed multiple times. +For example computing slow Set or Sorted set operations and storing them into local keys is an use case for writable slaves that was observed multiple times. However note that **writable slaves before version 4.0 were uncapable of expiring keys with a time to live set**. This means that if you use `EXPIRE` or other commands that set a maximum TTL for a key, the key will leak, and while you may no longer see it while accessing it with read commands, you will see it in the count of keys and it will still use memory. So in general mixing writable slaves (previous version 4.0) and keys with TTL is going to create issues. -Redis 4.0 RC3 and greater totally resolve this problem and now writable +Redis 4.0 RC3 and greater versions totally solve this problem and now writable slaves are able to evict keys with TTL as masters do, with the exceptions of keys written in DB numbers greater than 63 (but by default Redis instances only have 16 databases). @@ -195,9 +201,7 @@ This is how the feature works: If there are at least N slaves, with a lag less than M seconds, then the write will be accepted. -You may think of it as a relaxed version of the "C" in the CAP theorem, where -consistency is not ensured for a given write, but at least the time window for -data loss is restricted to a given number of seconds. +You may think of it as a best effort data safety mechanism, where consistency is not ensured for a given write, but at least the time window for data loss is restricted to a given number of seconds. In general bound data loss is better than unbound one. If the conditions are not met, the master will instead reply with an error and the write will not be accepted. @@ -252,3 +256,34 @@ The two configurations directives to use are: slave-announce-port 1234 And are documented in the example `redis.conf` of recent Redis distributions. + +The INFO and ROLE command +--- + +There are two Redis commands that provide a lot of information on the current +replication parameters of master and slave instances. One is `INFO`. If the +command is called with the `replication` argument as `INFO replication` only +information relevant to the replication are displayed. Another more +computer-friendly command is `ROLE`, that provides the replication status of +masters and slaves together with their replication offsets, list of connected +slaves and so forth. + +Partial resynchronizations after restarts and failovers +--- + +Since Redis 4.0, when an instance instance is promoted to master after a failover, +it will be still able to perform a partial resynchronization with the slaves +of the old master. To do so, the slave remembers the old replication ID and +offset of its former master, so can provide part of the backlog to the connecting +slaves even if they ask for the old replication ID. + +However the new replication ID of the promoted slave will be different, since it +constitutes a different history of the data set. For example, the master can +return available and can continue accepting writes for some time, so using the +same replication ID in the promoted slave would violate the rule that a +of replication ID and offset pair identifies only a single data set. + +Moreover slaves when powered off gently and restarted, are able to store in the +`RDB` file the information needed in order to resynchronize with their master. +This is useful in case of upgrades. When this is needed, it is better to use +the `SHUTDOWN` command in order to perform a `save & quit` operation on the slave. From 3f69e51b61a722fb672a8b7234f738a4167ffd86 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 30 Jun 2017 12:04:00 +0200 Subject: [PATCH 0747/2314] Document GEO _RO variants. --- commands/georadius.md | 10 ++++++++++ commands/georadiusbymember.md | 2 ++ 2 files changed, 12 insertions(+) diff --git a/commands/georadius.md b/commands/georadius.md index 00bbaa4508..3e05631546 100644 --- a/commands/georadius.md +++ b/commands/georadius.md @@ -1,5 +1,7 @@ Return the members of a sorted set populated with geospatial information using `GEOADD`, which are within the borders of the area specified with the center location and the maximum distance from the center (the radius). +This manual page also covers the `GEORADIUS_RO` and `GEORADIUSBYRANGE_RO` variants (see the section below for more information). + The common use case for this command is to retrieve geospatial items near a specified point and no far than a given amount of meters (or other units). This allows, for example, to suggest mobile users of an application nearby places. The radius is specified in one of the following units: @@ -39,6 +41,14 @@ So for example the command `GEORADIUS Sicily 15 37 200 km WITHCOORD WITHDIST` wi ["Palermo","190.4424",["13.361389338970184","38.115556395496299"]] +## Read only variants + +Since `GEORADIUS` and `GEORADIUSBYMEMBER` have a `STORE` and `STOREDIST` option they are technically flagged as writing commands in the Redis command table. For this reason read-only slaves will flag them, and Redis Cluster slaves will redirect them to the master instance even if the connection is in read only mode (See the `READONLY` command of Redis Cluster). + +Breaking the compatibility with the past was considered but rejected, at least for Redis 4.0, so instead two read only variants of the commands were added. They are exactly like the original commands but refuse the `STORE` and `STOREDIST` options. The two variants are called `GEORADIUS_RO` and `GEORADIUSBYMEMBER_RO`, and can safely be used in slaves. + +Both commands were introduced in Redis 3.2.10 and Redis 4.0.0 respectively. + @examples ```cli diff --git a/commands/georadiusbymember.md b/commands/georadiusbymember.md index 8502e3156c..ad10745b31 100644 --- a/commands/georadiusbymember.md +++ b/commands/georadiusbymember.md @@ -5,6 +5,8 @@ The position of the specified member is used as the center of the query. Please check the example below and the `GEORADIUS` documentation for more information about the command and its options. +Note that `GEORADIUSBYMEMBER_RO` is also available since Redis 3.2.10 and Redis 4.0.0 in order to provide a read-only command that can be used in slaves. See the `GEORADIUS` page for more information. + @examples ```cli From 612c6f947b9710d76d14beda14d9da777398cae1 Mon Sep 17 00:00:00 2001 From: Danny Guo Date: Tue, 4 Jul 2017 15:36:48 -0400 Subject: [PATCH 0748/2314] Fix typos for clients handling (#838) --- topics/clients.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/topics/clients.md b/topics/clients.md index 62daf8d488..f621d0f653 100644 --- a/topics/clients.md +++ b/topics/clients.md @@ -46,14 +46,14 @@ However Redis does the following two things when serving clients: Maximum number of clients --- -In Redis 2.4 there was an hard-coded limit about the maximum number of clients -that was possible to handle simultaneously. +In Redis 2.4 there was a hard-coded limit for the maximum number of clients +that could be handled simultaneously. -In Redis 2.6 this limit is dynamic: by default is set to 10000 clients, unless +In Redis 2.6 this limit is dynamic: by default it is set to 10000 clients, unless otherwise stated by the `maxclients` directive in Redis.conf. -However Redis checks with the kernel what is the maximum number of file -descriptors that we are able to open (the *soft limit* is checked), if the +However, Redis checks with the kernel what is the maximum number of file +descriptors that we are able to open (the *soft limit* is checked). If the limit is smaller than the maximum number of clients we want to handle, plus 32 (that is the number of file descriptors Redis reserves for internal uses), then the number of maximum clients is modified by Redis to match the amount From f307f479b9940b3ca402aa1c90255d4bc039a676 Mon Sep 17 00:00:00 2001 From: Charles Chan Date: Tue, 4 Jul 2017 12:37:43 -0700 Subject: [PATCH 0749/2314] Fix issue #736: 128-bits requires 16 bytes of storage (#825) --- topics/data-types-intro.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/data-types-intro.md b/topics/data-types-intro.md index bc8a7a3e29..12f5381b4c 100644 --- a/topics/data-types-intro.md +++ b/topics/data-types-intro.md @@ -875,7 +875,7 @@ the `+` and `-` strings. See the documentation for more information. This feature is important because it allows us to use sorted sets as a generic index. For example, if you want to index elements by a 128-bit unsigned integer argument, all you need to do is to add elements into a sorted -set with the same score (for example 0) but with an 8 byte prefix +set with the same score (for example 0) but with an 16 byte prefix consisting of **the 128 bit number in big endian**. Since numbers in big endian, when ordered lexicographically (in raw bytes order) are actually ordered numerically as well, you can ask for ranges in the 128 bit space, From 8b0e0ecda28225a551f7255b002956f9839fc778 Mon Sep 17 00:00:00 2001 From: Antonio Mallia Date: Tue, 4 Jul 2017 21:38:39 +0200 Subject: [PATCH 0750/2314] Fixed typo in data-types-intro.md (#815) --- topics/data-types-intro.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/data-types-intro.md b/topics/data-types-intro.md index 12f5381b4c..15d2ab8f1f 100644 --- a/topics/data-types-intro.md +++ b/topics/data-types-intro.md @@ -83,7 +83,7 @@ already stored into the key, in the case that the key already exists, even if the key is associated with a non-string value. So `SET` performs an assignment. Values can be strings (including binary data) of every kind, for instance you -can store a jpeg image inside a key. A value can't be bigger than 512 MB. +can store a jpeg image inside a value. A value can't be bigger than 512 MB. The `SET` command has interesting options, that are provided as additional arguments. For example, I may ask `SET` to fail if the key already exists, From 2d424a8323c0b32b6f1ec1a2a9ba7e7a4e8783dc Mon Sep 17 00:00:00 2001 From: Dylan Thacker-Smith Date: Tue, 4 Jul 2017 15:45:16 -0400 Subject: [PATCH 0751/2314] Expire only returns 0 if the key doesn't exist. (#710) It was only early versions that would fail when the key already had a timeout set. --- commands/expire.md | 4 +++- commands/expireat.md | 2 +- commands/pexpire.md | 2 +- commands/pexpireat.md | 2 +- 4 files changed, 6 insertions(+), 4 deletions(-) diff --git a/commands/expire.md b/commands/expire.md index c0193cc54e..2006a9dcce 100644 --- a/commands/expire.md +++ b/commands/expire.md @@ -46,12 +46,14 @@ command altering its value had the effect of removing the key entirely. This semantics was needed because of limitations in the replication layer that are now fixed. +`EXPIRE` would return 0 and not alter the timeout for a key with a timeout set. + @return @integer-reply, specifically: * `1` if the timeout was set. -* `0` if `key` does not exist or the timeout could not be set. +* `0` if `key` does not exist. @examples diff --git a/commands/expireat.md b/commands/expireat.md index 87981300f6..a4430bb7c0 100644 --- a/commands/expireat.md +++ b/commands/expireat.md @@ -20,7 +20,7 @@ a given time in the future. @integer-reply, specifically: * `1` if the timeout was set. -* `0` if `key` does not exist or the timeout could not be set (see: `EXPIRE`). +* `0` if `key` does not exist. @examples diff --git a/commands/pexpire.md b/commands/pexpire.md index cadaaff2b6..33d9f0bc75 100644 --- a/commands/pexpire.md +++ b/commands/pexpire.md @@ -6,7 +6,7 @@ specified in milliseconds instead of seconds. @integer-reply, specifically: * `1` if the timeout was set. -* `0` if `key` does not exist or the timeout could not be set. +* `0` if `key` does not exist. @examples diff --git a/commands/pexpireat.md b/commands/pexpireat.md index f61f25038b..a15bb0a9a0 100644 --- a/commands/pexpireat.md +++ b/commands/pexpireat.md @@ -6,7 +6,7 @@ which the key will expire is specified in milliseconds instead of seconds. @integer-reply, specifically: * `1` if the timeout was set. -* `0` if `key` does not exist or the timeout could not be set (see: `EXPIRE`). +* `0` if `key` does not exist. @examples From f8e9ab0e3ebbc243efaeea76fdd7d6e0963a2fea Mon Sep 17 00:00:00 2001 From: Antoni Orfin Date: Tue, 4 Jul 2017 21:46:12 +0200 Subject: [PATCH 0752/2314] volatile-lru instead of allkeys-lru (#548) Fixed misspelled hint of using "allkeys-lru" in having both cache and storage keys in Redis. From d4cb70f4e46f890a1c3c5814a3cdcc4d257e9097 Mon Sep 17 00:00:00 2001 From: Mark Nunberg Date: Thu, 6 Jul 2017 06:20:35 -0700 Subject: [PATCH 0753/2314] redablooms is dead. long live rebloom. --- modules.json | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/modules.json b/modules.json index db797db9e3..4ff1122efc 100644 --- a/modules.json +++ b/modules.json @@ -81,12 +81,12 @@ }, { - "name": "redablooms", + "name": "rebloom", "license" : "AGPL", - "repository": "https://github.com/RedisLabsModules/redablooms", - "description": "Scalable, counting Bloom filters", - "authors": ["itamarhaber", "RedisLabs"], - "stars": 15 + "repository": "https://github.com/RedisLabsModules/rebloom", + "description": "Scalable Bloom filters", + "authors": ["mnunberg", "RedisLabs"], + "stars": 6 }, { From cdc590d21ae3f1c92109bf6413dd3c8f26b5c9d6 Mon Sep 17 00:00:00 2001 From: Stephen Nancekivell Date: Mon, 10 Jul 2017 22:01:17 +1000 Subject: [PATCH 0754/2314] remove scala-redis-client (#842) The page is down now. --- clients.json | 9 --------- 1 file changed, 9 deletions(-) diff --git a/clients.json b/clients.json index a75e023336..9f4389dc9c 100644 --- a/clients.json +++ b/clients.json @@ -558,15 +558,6 @@ "authors": ["pk11"] }, - { - "name": "scala-redis-client", - "language": "Scala", - "repository": "https://github.com/top10/scala-redis-client", - "description": "An idiomatic Scala client that keeps Jedis / Java hidden. Used in production at http://top10.com.", - "authors": ["thesmith", "heychinaski"], - "active": true - }, - { "name": "scredis", "language": "Scala", From e42c776e1fa68332731502066f88aab7b5dae700 Mon Sep 17 00:00:00 2001 From: Martin Angers Date: Mon, 10 Jul 2017 09:29:49 -0400 Subject: [PATCH 0755/2314] add Regis to the list of higher-level tools (#840) --- tools.json | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tools.json b/tools.json index 1fa42e22a8..a9026bde85 100644 --- a/tools.json +++ b/tools.json @@ -597,5 +597,12 @@ "repository": "https://github.com/vipshop/redis-migrate-tool", "description": "A convenient and useful tool for migrating data between redis groups.", "authors": ["diguo58"] + }, + { + "name": "Regis", + "language": "Swift", + "url": "https://www.harfangapps.com/regis/", + "description": "Full-featured Redis client for the Mac, available on the Mac App Store.", + "authors": ["harfangapps"] } ] From 0f7aee40b3d796bbda2764164944f41a1c9ed5f6 Mon Sep 17 00:00:00 2001 From: Danni Moiseyev Date: Mon, 10 Jul 2017 16:34:41 +0300 Subject: [PATCH 0756/2314] Add redis-timerseries to modules.json (#831) --- modules.json | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/modules.json b/modules.json index 4ff1122efc..d5cfa2d7ef 100644 --- a/modules.json +++ b/modules.json @@ -96,5 +96,14 @@ "description": "Online trainable neural networks as Redis data types.", "authors": ["antirez"], "stars": 1854 + }, + + { + "name": "redis-timerseries", + "license" : "AGPL", + "repository": "https://github.com/danni-m/redis-timeseries", + "description": "Time-series data structure for redis", + "authors": ["danni-m"], + "stars": 48 } ] From 682cdeb1601bc0fec228899d667447a7a50e23ed Mon Sep 17 00:00:00 2001 From: Ivan Baidakou Date: Mon, 10 Jul 2017 16:35:53 +0300 Subject: [PATCH 0757/2314] add bredis (c++ client) (#820) add bredis (c++ client) --- clients.json | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/clients.json b/clients.json index 9f4389dc9c..efd16d68ff 100644 --- a/clients.json +++ b/clients.json @@ -1322,6 +1322,14 @@ "active": true }, + { + "name": "bredis", + "language": "C++", + "repository": "https://github.com/basiliscos/cpp-bredis", + "description": "Boost::ASIO low-level redis client", + "active": true + }, + { "name": "mruby-redis", "language": "mruby", From daae482b6459ab109fa98400ffb372cac83083ea Mon Sep 17 00:00:00 2001 From: Tay Yang Shun Date: Mon, 10 Jul 2017 21:38:19 +0800 Subject: [PATCH 0758/2314] Fix typos in twitter-clone topic (#819) --- topics/twitter-clone.md | 54 ++++++++++++++++++++--------------------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/topics/twitter-clone.md b/topics/twitter-clone.md index 9dd37bc351..febe1e96d8 100644 --- a/topics/twitter-clone.md +++ b/topics/twitter-clone.md @@ -1,14 +1,14 @@ Tutorial: Design and implementation of a simple Twitter clone using PHP and the Redis key-value store === -This article describes the design and implementation of a [very simple Twitter clone](https://github.com/antirez/retwis) written using PHP with Redis as the only database. The programming community has traditionally considered key-value stores as a special purpose database that couldn't be used as a drop in replacement for a relational database for the development of web applications. This article will try to show that Redis data structures on top of a key-value layer are an effective data model to implement many kinds of applications. +This article describes the design and implementation of a [very simple Twitter clone](https://github.com/antirez/retwis) written using PHP with Redis as the only database. The programming community has traditionally considered key-value stores as a special purpose database that couldn't be used as a drop-in replacement for a relational database for the development of web applications. This article will try to show that Redis data structures on top of a key-value layer are an effective data model to implement many kinds of applications. -Before to continue, you may want to play a few seconds with [the Retwis online demo](http://retwis.redis.io), to check what we are going to actually +Before continuing, you may want to spend a few seconds playing with [the Retwis online demo](http://retwis.redis.io), to check what we are going to actually model. Long story short: it is a toy, but complex enough to be a foundation in order to learn how to create more complex applications. Note: the original version of this article was written in 2009 when Redis was -released. It was not exactly clear at the time that the Redis data model was +released. It was not exactly clear at that time that the Redis data model was suitable to write entire applications. Now after 5 years there are many cases of applications using Redis as their main store, so the goal of the article today is to be a tutorial for Redis newcomers. You'll learn how to design a simple @@ -16,7 +16,7 @@ data layout using Redis, and how to apply different data structures. Our Twitter clone, called [Retwis](http://retwis.antirez.com), is structurally simple, has very good performance, and can be distributed among any number of web and Redis servers with little efforts. You can find the source code [here](http://code.google.com/p/redis/downloads/list). -I use PHP for the example since it can be read by everybody. The same (or better) results can be obtained using Ruby, Python, Erlang, and so on. +I used PHP for the example since it can be read by everybody. The same (or better) results can be obtained using Ruby, Python, Erlang, and so on. A few clones exist (however not all the clones use the same data layout as the current version of this tutorial, so please, stick with the official PHP implementation for the sake of following the article better). @@ -24,9 +24,9 @@ implementation for the sake of following the article better). * [Retwis-RB](http://retwisrb.danlucraft.com/) is a port of Retwis to Ruby and Sinatra written by Daniel Lucraft! Full source code is included of course, and a link to its Git repository appears in the footer of this article. The rest of this article targets PHP, but Ruby programmers can also check the Retwis-RB source code since it's conceptually very similar. * [Retwis-J](http://retwisj.cloudfoundry.com/) is a port of Retwis to Java, using the Spring Data Framework, written by [Costin Leau](http://twitter.com/costinl). Its source code can be found on [GitHub](https://github.com/SpringSource/spring-data-keyvalue-examples), and there is comprehensive documentation available at [springsource.org](http://j.mp/eo6z6I). -What is a Key-value store? +What is a key-value store? --- -The essence of a key-value store is the ability to store some data, called a _value_, inside a key. The value can be retrieved later only if we know the specific key it was stored in. There is no direct way to search for a key by value. In a sense, it is like a very large hash/dictionary, but it is persistent, i.e. when your application ends, the data doesn't go away. So, for example, I can use the command `SET` to store the value *bar* in the key *foo*: +The essence of a key-value store is the ability to store some data, called a _value_, inside a key. The value can be retrieved later only if we know the specific key it was stored in. There is no direct way to search for a key by value. In some sense, it is like a very large hash/dictionary, but it is persistent, i.e. when your application ends, the data doesn't go away. So, for example, I can use the command `SET` to store the value *bar* in the key *foo*: SET foo bar @@ -44,7 +44,7 @@ Other common operations provided by key-value stores are `DEL`, to delete a give Atomic operations --- -There is something special about `INCR`. Think about why Redis provides such an operation if we can do it ourselves with a bit of code? After all, it is as simple as: +There is something special about `INCR`. You may wonder why Redis provides such an operation if we can do it ourselves with a bit of code? After all, it is as simple as: x = GET foo x = x + 1 @@ -92,7 +92,7 @@ Sorted Sets, which are kind of a more capable version of Sets, it is better to start introducing Sets first (which are a very useful data structure per se), and later Sorted Sets. -There are more data types than just Lists. Redis also supports Sets, which are unsorted collections of elements. It is possible to add, remove, and test for existence of members, and perform the intersection between different Sets. Of course it is possible to get the elements of a set. Some examples will make it more clear. Keep in mind that `SADD` is the _add to set_ operation, `SREM` is the _remove from set_ operation, _sismember_ is the _test if member_ operation, and `SINTER` is the _perform intersection_ operation. Other operations are `SCARD` to get the cardinality (the number of elements) of a Set, and `SMEMBERS` to return all the members of a Set. +There are more data types than just Lists. Redis also supports Sets, which are unsorted collections of elements. It is possible to add, remove, and test for existence of members, and perform the intersection between different Sets. Of course it is possible to get the elements of a Set. Some examples will make it more clear. Keep in mind that `SADD` is the _add to set_ operation, `SREM` is the _remove from set_ operation, _sismember_ is the _test if member_ operation, and `SINTER` is the _perform intersection_ operation. Other operations are `SCARD` to get the cardinality (the number of elements) of a Set, and `SMEMBERS` to return all the members of a Set. SADD myset a SADD myset b @@ -108,7 +108,7 @@ Note that `SMEMBERS` does not return the elements in the same order we added the SADD mynewset hello SINTER myset mynewset => foo,b -`SINTER` can return the intersection between Sets but it is not limited to two sets. You may ask for the intersection of 4,5, or 10000 Sets. Finally let's check how SISMEMBER works: +`SINTER` can return the intersection between Sets but it is not limited to two Sets. You may ask for the intersection of 4,5, or 10000 Sets. Finally let's check how SISMEMBER works: SISMEMBER myset foo => 1 SISMEMBER myset notamember => 0 @@ -118,7 +118,7 @@ The Sorted Set data type Sorted Sets are similar to Sets: collection of elements. However in Sorted Sets each element is associated with a floating point value, called the -*element score*. Because of the score, elements inside a sorted set are +*element score*. Because of the score, elements inside a Sorted Set are ordered, since we can always compare two elements by score (and if the score happens to be the same, we compare the two elements as strings). @@ -171,7 +171,7 @@ Prerequisites If you haven't downloaded the [Retwis source code](https://github.com/antirez/retwis) already please grab it now. It contains a few PHP files, and also a copy of [Predis](https://github.com/nrk/predis), the PHP client library we use in this example. -Another thing you probably want is a working Redis server. Just get the source, build with make, run with ./redis-server, and you're ready to go. No configuration is required at all in order to play with or run Retwis on your computer. +Another thing you probably want is a working Redis server. Just get the source, build with `make`, run with `./redis-server`, and you're ready to go. No configuration is required at all in order to play with or run Retwis on your computer. Data layout --- @@ -186,8 +186,8 @@ Let's start with Users. We need to represent users, of course, with their userna *Note: you should use a hashed password in a real application, for simplicity we store the password in clear text.* -We use the `next_user_id` key in order to always get an unique ID for every new user. Then we use this unique ID to name the key holding a Hash with user's data. *This is a common design pattern* with key-values stores! Keep it in mind. -Besides the fields already defined, we need some more stuff in order to fully define a User. For example, sometimes it can be useful to be able to get the user ID from the username, so every time we add an user, we also populate the `users` key, which is a Hash, with the username as field, and its ID as value. +We use the `next_user_id` key in order to always get a unique ID for every new user. Then we use this unique ID to name the key holding a Hash with user's data. *This is a common design pattern* with key-values stores! Keep it in mind. +Besides the fields already defined, we need some more stuff in order to fully define a User. For example, sometimes it can be useful to be able to get the user ID from the username, so every time we add a user, we also populate the `users` key, which is a Hash, with the username as field, and its ID as value. HSET users antirez 1000 @@ -220,12 +220,12 @@ Another important thing we need is a place were we can add the updates to displa This list is basically the User timeline. We'll push the IDs of her/his own posts, and, the IDs of all the posts of created by the following users. -Basically we implement a write fanout. +Basically, we'll implement a write fanout. Authentication --- -OK, we have more or less everything about the user except for authentication. We'll handle authentication in a simple but robust way: we don't want to use PHP sessions, our system must be ready to be distributed among different web servers easily, so we'll keep the whole state in our Redis database. All we need is a random **unguessable** string to set as the cookie of an authenticated user, and a key that will contain the user ID of the client holding the string. +OK, we have more or less everything about the user except for authentication. We'll handle authentication in a simple but robust way: we don't want to use PHP sessions, as our system must be ready to be distributed among different web servers easily, so we'll keep the whole state in our Redis database. All we need is a random **unguessable** string to set as the cookie of an authenticated user, and a key that will contain the user ID of the client holding the string. We need two things in order to make this thing work in a robust way. First: the current authentication *secret* (the random unguessable string) @@ -240,12 +240,12 @@ authentication secrets to user IDs. HSET auths fea5e81ac8ca77622bed1c2132a021f9 1000 -In order to authenticate a user we'll do these simple steps ( see the `login.php` file in the Retwis source code): +In order to authenticate a user we'll do these simple steps (see the `login.php` file in the Retwis source code): - * Get the username and password via the login form + * Get the username and password via the login form. * Check if the `username` field actually exists in the `users` Hash. - * If it exists we have the user id, (i.e. 1000) - * Check if user:1000 password matches, if not, return an error message + * If it exists we have the user id, (i.e. 1000). + * Check if user:1000 password matches, if not, return an error message. * Ok authenticated! Set "fea5e81ac8ca77622bed1c2132a021f9" (the value of user:1000 `auth` field) as the "auth" cookie. This is the actual code: @@ -274,7 +274,7 @@ This is the actual code: This happens every time a user logs in, but we also need a function `isLoggedIn` in order to check if a given user is already authenticated or not. These are the logical steps preformed by the `isLoggedIn` function: - * Get the "auth" cookie from the user. If there is no cookie, the user is not logged in, of course. Let's call the value of the cookie `` + * Get the "auth" cookie from the user. If there is no cookie, the user is not logged in, of course. Let's call the value of the cookie ``. * Check if `` field in the `auths` Hash exists, and what the value (the user ID) is (1000 in the example). * In order for the system to be more robust, also verify that user:1000 auth field also matches. * OK the user is authenticated, and we loaded a bit of information in the $User global variable. @@ -307,9 +307,9 @@ The code is simpler than the description, possibly: return true; } -Having `loadUserInfo` as a separate function is overkill for our application, but it's a good approach in a complex application. The only thing that's missing from all the authentication is the logout. What do we do on logout? That's simple, we'll just change the random string in user:1000 `auth` field, remove the old authentication secret from the `auths` Hash., and add the new one. +Having `loadUserInfo` as a separate function is overkill for our application, but it's a good approach in a complex application. The only thing that's missing from all the authentication is the logout. What do we do on logout? That's simple, we'll just change the random string in user:1000 `auth` field, remove the old authentication secret from the `auths` Hash, and add the new one. -*Important:* the logout procedure explains why we don't just authenticate the user after looking up the authentication secret in the `auths` Hash, but double check it against user:1000 `auth` field. The true authentication string is the latter, while the `auths` Hash is just an authentication field that may even be volatile, or, if there are bugs in the program or a script gets interrupted, we may even end with multiple entries in the `auths` key pointing to the same user ID. The logout code is the following (logout.php): +*Important:* the logout procedure explains why we don't just authenticate the user after looking up the authentication secret in the `auths` Hash, but double check it against user:1000 `auth` field. The true authentication string is the latter, while the `auths` Hash is just an authentication field that may even be volatile, or, if there are bugs in the program or a script gets interrupted, we may even end with multiple entries in the `auths` key pointing to the same user ID. The logout code is the following (`logout.php`): include("retwis.php"); @@ -339,7 +339,7 @@ Updates, also known as posts, are even simpler. In order to create a new post in INCR next_post_id => 10343 HMSET post:10343 user_id $owner_id time $time body "I'm having fun with Retwis" -As you can see each post is just represented by a Hash with three fields. The ID of the user owning the post, the time at which the post was published, and finally the body of the post, which is, the actual status message. +As you can see each post is just represented by a Hash with three fields. The ID of the user owning the post, the time at which the post was published, and finally, the body of the post, which is, the actual status message. After we create a post and we obtain the post ID, we need to LPUSH the ID in the timeline of every user that is following the author of the post, and of course in the list of posts of the author itself (everybody is virtually following herself/himself). This is the file `post.php` that shows how this is performed: @@ -371,7 +371,7 @@ The core of the function is the `foreach` loop. We use `ZRANGE` to get all the f Note that we also maintain a global timeline for all the posts, so that in the Retwis home page we can show everybody's updates easily. This requires just doing an `LPUSH` to the `timeline` List. Let's face it, aren't you starting to think it was a bit strange to have to sort things added in chronological order using `ORDER BY` with SQL? I think so. -There is an interesting thing to notice in the code above: we use a new +There is an interesting thing to notice in the code above: we used a new command called `LTRIM` after we perform the `LPUSH` operation in the global timeline. This is used in order to trim the list to just 1000 elements. The global timeline is actually only used in order to show a few posts in the @@ -426,14 +426,14 @@ It is not hard, but we did not yet check how we create following / follower rela ZADD following:1000 5000 ZADD followers:5000 1000 -Note the same pattern again and again. In theory with a relational database the list of following and followers would be contained in a single table with fields like `following_id` and `follower_id`. You can extract the followers or following of every user using an SQL query. With a key-value DB things are a bit different since we need to set both the `1000 is following 5000` and `5000 is followed by 1000` relations. This is the price to pay, but on the other hand accessing the data is simpler and extremely fast. Having these things as separate sets allows us to do interesting stuff. For example, using `ZINTERSTORE` we can have the intersection of 'following' of two different users, so we may add a feature to our Twitter clone so that it is able to tell you very quickly when you visit somebody else's profile, "you and Alice have 34 followers in common", and things like that. +Note the same pattern again and again. In theory with a relational database, the list of following and followers would be contained in a single table with fields like `following_id` and `follower_id`. You can extract the followers or following of every user using an SQL query. With a key-value DB things are a bit different since we need to set both the `1000 is following 5000` and `5000 is followed by 1000` relations. This is the price to pay, but on the other hand accessing the data is simpler and extremely fast. Having these things as separate sets allows us to do interesting stuff. For example, using `ZINTERSTORE` we can have the intersection of 'following' of two different users, so we may add a feature to our Twitter clone so that it is able to tell you very quickly when you visit somebody else's profile, "you and Alice have 34 followers in common", and things like that. You can find the code that sets or removes a following / follower relation in the `follow.php` file. Making it horizontally scalable --- -Gentle reader, if you reached this point you are already a hero. Thank you. Before talking about scaling horizontally it is worth checking performance on a single server. Retwis is *extremely fast*, without any kind of cache. On a very slow and loaded server, an Apache benchmark with 100 parallel clients issuing 100000 requests measured the average pageview to take 5 milliseconds. This means you can serve millions of users every day with just a single Linux box, and this one was monkey ass slow... Imagine the results with more recent hardware. +Gentle reader, if you read till this point you are already a hero. Thank you. Before talking about scaling horizontally it is worth checking performance on a single server. Retwis is *extremely fast*, without any kind of cache. On a very slow and loaded server, an Apache benchmark with 100 parallel clients issuing 100000 requests measured the average pageview to take 5 milliseconds. This means you can serve millions of users every day with just a single Linux box, and this one was monkey ass slow... Imagine the results with more recent hardware. However you can't go with a single server forever, how do you scale a key-value store? @@ -443,7 +443,7 @@ simple: you may use client-side sharding, or something like a sharding proxy like Twemproxy, or the upcoming Redis Cluster. To know more about those topics please read -[our documentation about sharding](/topics/partitioning). However here the point +[our documentation about sharding](/topics/partitioning). However, the point here to stress is that in a key-value store, if you design with care, the data set is split among **many independent small keys**. To distribute those keys to multiple nodes is more straightforward and predictable compared to using From 59d9e8d9baaf6abb27634c32a651065bbd438e84 Mon Sep 17 00:00:00 2001 From: jzh800 Date: Mon, 10 Jul 2017 21:39:18 +0800 Subject: [PATCH 0759/2314] update r3c introduction, add support password (#816) update r3c introduction, add support password --- clients.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clients.json b/clients.json index efd16d68ff..75f350d40e 100644 --- a/clients.json +++ b/clients.json @@ -1432,7 +1432,7 @@ "name": "r3c", "language": "C++", "repository": "https://github.com/eyjian/r3c", - "description": "A Redis Cluster C++ Client, based on hiredis and support standalone, it's easy to make and use, not depends on C++11 or later.", + "description": "Redis Cluster C++ Client, based on hiredis, support password and standalone, it's easy to make and use, not depends on C++11 or later.", "authors": ["eyjian"], "active": true }, From e5a4ec0aebb50d5f1711a6bc36a4647750643cbb Mon Sep 17 00:00:00 2001 From: Homer Huang Date: Sun, 2 Apr 2017 18:13:24 +0800 Subject: [PATCH 0760/2314] Add hiredispool. --- clients.json | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/clients.json b/clients.json index 6a5d578a22..78a94a37d0 100644 --- a/clients.json +++ b/clients.json @@ -1456,6 +1456,14 @@ "active": true }, + { + "name": "hiredispool", + "language": "C", + "repository": "https://github.com/aclisp/hiredispool", + "description": "Provides connection pooling and auto-reconnect for hiredis. It is also minimalistic and easy to do customization.", + "active": true + }, + { "name": "oxblood", "language": "Ruby", From 7bedd64feb8740f80c96b3252226b5c384daceea Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Mon, 10 Jul 2017 15:42:34 +0200 Subject: [PATCH 0761/2314] Remove whitespace --- clients.json | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/clients.json b/clients.json index 474621c4ad..3c69167801 100644 --- a/clients.json +++ b/clients.json @@ -490,7 +490,7 @@ "description": "Async minimal redis client for tornado ioloop designed for performances (use C hiredis parser)", "active": true }, - + { "name": "brukva", "language": "Python", @@ -1276,7 +1276,7 @@ "authors": ["esilverberg"], "active": true }, - + { "name": "Rackdis", "language": "Racket", @@ -1329,7 +1329,7 @@ "description": "Boost::ASIO low-level redis client", "active": true }, - + { "name": "mruby-redis", "language": "mruby", @@ -1338,7 +1338,7 @@ "authors": ["matsumotory"], "active": true }, - + { "name": "mruby-hiredis", "language": "mruby", @@ -1436,7 +1436,7 @@ "authors": ["eyjian"], "active": true }, - + { "name": "rebridge", "language": "Node.js", @@ -1471,7 +1471,7 @@ "authors": ["etehtsea"], "active": true }, - + { "name": "oredis", "language": "PL/SQL", From 1d81b4158419fd398705665b97180ab22b3b1e35 Mon Sep 17 00:00:00 2001 From: Charles Chan Date: Mon, 10 Jul 2017 06:43:15 -0700 Subject: [PATCH 0762/2314] Fix grammar in documentation for eviction polices. (#824) Also make the statements more consistents among one another. --- topics/lru-cache.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/topics/lru-cache.md b/topics/lru-cache.md index 19c093de4b..dadbcb3023 100644 --- a/topics/lru-cache.md +++ b/topics/lru-cache.md @@ -47,11 +47,11 @@ configured using the `maxmemory-policy` configuration directive. The following policies are available: * **noeviction**: return errors when the memory limit was reached and the client is trying to execute commands that could result in more memory to be used (most write commands, but `DEL` and a few more exceptions). -* **allkeys-lru**: evict keys trying to remove the less recently used (LRU) keys first, in order to make space for the new data added. -* **volatile-lru**: evict keys trying to remove the less recently used (LRU) keys first, but only among keys that have an **expire set**, in order to make space for the new data added. -* **allkeys-random**: evict random keys in order to make space for the new data added. -* **volatile-random**: evict random keys in order to make space for the new data added, but only evict keys with an **expire set**. -* **volatile-ttl**: In order to make space for the new data, evict only keys with an **expire set**, and try to evict keys with a shorter time to live (TTL) first. +* **allkeys-lru**: evict keys by trying to remove the less recently used (LRU) keys first, in order to make space for the new data added. +* **volatile-lru**: evict keys by trying to remove the less recently used (LRU) keys first, but only among keys that have an **expire set**, in order to make space for the new data added. +* **allkeys-random**: evict keys randomly in order to make space for the new data added. +* **volatile-random**: evict keys randomly in order to make space for the new data added, but only evict keys with an **expire set**. +* **volatile-ttl**: evict keys with an **expire set**, and try to evict keys with a shorter time to live (TTL) first, in order to make space for the new data added. The policies **volatile-lru**, **volatile-random** and **volatile-ttl** behave like **noeviction** if there are no keys to evict matching the prerequisites. From eda06668bf1e28370f1d5200c3cf7e68dc849f3a Mon Sep 17 00:00:00 2001 From: Adam Lev-Libfeld Date: Sun, 14 May 2017 23:31:38 +0300 Subject: [PATCH 0763/2314] Update modules.json --- modules.json | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/modules.json b/modules.json index d5cfa2d7ef..e2e9183962 100644 --- a/modules.json +++ b/modules.json @@ -105,5 +105,24 @@ "description": "Time-series data structure for redis", "authors": ["danni-m"], "stars": 48 + }, + + { + "name": "ReDe", + "license" : "MIT", + "repository": "https://github.com/TamarLabs/ReDe", + "description": "Low Latancy timed queues (Dehydrators) as Redis data types.", + "authors": ["daTokenizer"], + "stars": 6 + }, + + { + "name": "commentDis", + "license" : "MIT", + "repository": "https://github.com/picotera/commentDis", + "description": "Add comment syntax to your redis-cli scripts.", + "authors": ["daTokenizer"], + "stars": 1 } + ] From cecf74a6f12e9b5bdfb973dbcb682d9fc8b85ca2 Mon Sep 17 00:00:00 2001 From: Harmen Date: Sun, 16 Apr 2017 10:14:15 +0200 Subject: [PATCH 0764/2314] add miniredis to tools.json --- tools.json | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tools.json b/tools.json index a9026bde85..e2c67c3d39 100644 --- a/tools.json +++ b/tools.json @@ -604,5 +604,12 @@ "url": "https://www.harfangapps.com/regis/", "description": "Full-featured Redis client for the Mac, available on the Mac App Store.", "authors": ["harfangapps"] + }, + { + "name": "miniredis", + "language": "Go", + "repository": "https://github.com/alicebob/miniredis", + "description": "Pure Go Redis server for Go unittests", + "authors": [] } ] From a6e7f4db38d689ba42aa90e1bdd0a31b6a972c94 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=83=91=E6=A0=91=E6=96=B0?= Date: Thu, 8 Dec 2016 23:10:40 +0800 Subject: [PATCH 0765/2314] C++ redis client of acl is very efficient and stable, used by many people in their projects. I've rewrited the summary about this C++ redis client. --- clients.json | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/clients.json b/clients.json index 3c69167801..cf6c8e3028 100644 --- a/clients.json +++ b/clients.json @@ -805,12 +805,12 @@ }, { - "name": "redis-client for C++", + "name": "acl-redis", "language": "C++", "url": "https://github.com/zhengshuxin/acl/tree/master/lib_acl_cpp/samples/redis", "repository": "https://github.com/zhengshuxin/acl/tree/master/lib_acl_cpp/include/acl_cpp/redis", - "description": "Full Redis client commands, one redis command, one redis function", - "authors": [], + "description": "Standard C++ Redis Client with high performance and stl-like interface, supporting Redis Cluster, thread-safe", + "authors": ["zhengshuxin"], "active": true }, From d9a7f1a41e659efc26ab2b6e0bde17c0447c54dd Mon Sep 17 00:00:00 2001 From: btegs Date: Fri, 20 Jan 2017 13:41:13 -0500 Subject: [PATCH 0766/2314] Added aredis --- clients.json | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/clients.json b/clients.json index cf6c8e3028..4bba83afb6 100644 --- a/clients.json +++ b/clients.json @@ -1479,5 +1479,14 @@ "description": "Redis client library for Oracle PL/SQL. This support Redis cluster and asynchronous execution", "authors": ["SeyoungLee"], "active": true + }, + + { + "name": "aredis", + "language": "Python", + "repository": "https://github.com/NoneGG/aredis", + "description": "An efficient and user-friendly async redis client ported from redis-py.", + "authors": [], + "active": true } ] From 98cfc2840e9b6cf5e45be6d2ca5025789adf93bc Mon Sep 17 00:00:00 2001 From: Tevin Date: Mon, 10 Jul 2017 22:00:09 +0800 Subject: [PATCH 0767/2314] Refine explanation of MIGRATE (#796) --- topics/cluster-spec.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/topics/cluster-spec.md b/topics/cluster-spec.md index c53f4f512b..0d8e5452c6 100644 --- a/topics/cluster-spec.md +++ b/topics/cluster-spec.md @@ -486,9 +486,9 @@ there are no race conditions). This is how `MIGRATE` works: MIGRATE target_host target_port key target_database id timeout `MIGRATE` will connect to the target instance, send a serialized version of -the key, and once an OK code is received will delete the old key from its own -dataset. From the point of view of an external client a key exists either -in A or B at any given time. +the key, and once an OK code is received, the old key from its own dataset +will be deleted. From the point of view of an external client a key exists +either in A or B at any given time. In Redis Cluster there is no need to specify a database other than 0, but `MIGRATE` is a general command that can be used for other tasks not From 144a24c722353191fb172b82db07b4185847bc65 Mon Sep 17 00:00:00 2001 From: Phill Date: Mon, 10 Jul 2017 16:02:35 +0200 Subject: [PATCH 0768/2314] Added RcppRedis and Redux R Packages (#798) * Added RcppRedis and Redux R Packages * Fixed Broken JSON Missing comma no longer missing --- clients.json | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/clients.json b/clients.json index 4bba83afb6..ba018430af 100644 --- a/clients.json +++ b/clients.json @@ -1471,6 +1471,26 @@ "authors": ["etehtsea"], "active": true }, + + { + "name": "RcppRedis", + "language": "R", + "url": "https://cran.rstudio.com/web/packages/RcppRedis/index.html", + "repository": "https://github.com/eddelbuettel/rcppredis", + "description": "R interface to Redis using the hiredis library.", + "authors": ["eddelbuettel"], + "active": true + }, + + { + "name": "Redux", + "language": "R", + "url": "http://richfitz.github.io/redux/", + "repository": "https://github.com/richfitz/redux", + "description": "Provides a low-level interface to Redis, allowing execution of arbitrary Redis commands with almost no interface.", + "authors": ["rgfitzjohn"], + "active": true + }, { "name": "oredis", From 3979c192b538d34ccc3657ab3cde52728f50b848 Mon Sep 17 00:00:00 2001 From: Ed Costello Date: Mon, 10 Jul 2017 10:04:22 -0400 Subject: [PATCH 0769/2314] Copy edits for typos/misspellings (#811) --- commands/command.md | 2 +- topics/ARM.md | 4 ++-- topics/indexes.md | 4 ++-- topics/rediscli.md | 2 +- topics/replication.md | 2 +- topics/twitter-clone.md | 2 +- 6 files changed, 8 insertions(+), 8 deletions(-) diff --git a/commands/command.md b/commands/command.md index 03122cee76..88c0ef5a7a 100644 --- a/commands/command.md +++ b/commands/command.md @@ -99,7 +99,7 @@ Command flags is @array-reply containing one or more status replies: Some Redis commands have no predetermined key locations. For those commands, flag `movablekeys` is added to the command flags @array-reply. Your Redis -Cluster client needs to parse commands marked `movabkeleys` to locate all relevant key positions. +Cluster client needs to parse commands marked `movablekeys` to locate all relevant key positions. Complete list of commands currently requiring key location parsing: diff --git a/topics/ARM.md b/topics/ARM.md index 67a5b62fec..7c649b5e1e 100644 --- a/topics/ARM.md +++ b/topics/ARM.md @@ -35,13 +35,13 @@ run as expected. There is nothing special in the process. The only difference is that by default, Redis uses the libc allocator instead of defaulting to Jemalloc as it does in other Linux based environments. This is because we believe -that for the small use cases inside embeddeed devices, memory fragmentation +that for the small use cases inside embedded devices, memory fragmentation is unlikely to be a problem. Moreover Jemalloc on ARM may not be as tested as the libc allocator. ## Performance -Performance testing of Redis was performend in the Raspberry Pi 3 and in the +Performance testing of Redis was performed in the Raspberry Pi 3 and in the original model B Pi. The difference between the two Pis in terms of delivered performance is quite big. The benchmarks were performed via the loopback interface, since most use cases will probably use Redis from within diff --git a/topics/indexes.md b/topics/indexes.md index 0e4b50582f..a7dd0b0c30 100644 --- a/topics/indexes.md +++ b/topics/indexes.md @@ -644,7 +644,7 @@ What we do is to start with the left bottom corner of our search box, which is 50,100, and find the first range by substituting the last 6 bits in each number with 0. Then we do the same with the right top corner. -With two trivial nested for loops where we increment only the significative +With two trivial nested for loops where we increment only the significant bits, we can find all the squares between these two. For each square we convert the two numbers into our interleaved representation, and create the range using the converted representation as our start, and the same @@ -687,7 +687,7 @@ Turning this into code is simple. Here is a Ruby example: While non immediately trivial this is a very useful indexing strategy that in the future may be implemented in Redis in a native way. -For now, the good thing is that the complexity may be easily encapsualted +For now, the good thing is that the complexity may be easily encapsulated inside a library that can be used in order to perform indexing and queries. One example of such library is [Redimension](https://github.com/antirez/redimension), a proof of concept Ruby library which indexes N-dimensional data inside Redis using the technique described here. diff --git a/topics/rediscli.md b/topics/rediscli.md index 27ff39f6a5..8373bc0b0e 100644 --- a/topics/rediscli.md +++ b/topics/rediscli.md @@ -679,7 +679,7 @@ name is simply `--slave`. This is how it works: "SELECT","0" "set","foo","bar" "PING" - "incr","myconuter" + "incr","mycounter" The command begins by discarding the RDB file of the first synchronization and then logs each command received as in CSV format. diff --git a/topics/replication.md b/topics/replication.md index 83737c92bd..518fabc3c5 100644 --- a/topics/replication.md +++ b/topics/replication.md @@ -155,7 +155,7 @@ use case for storing ephemeral data in writable slaves. For example computing slow Set or Sorted set operations and storing them into local keys is an use case for writable slaves that was observed multiple times. -However note that **writable slaves before version 4.0 were uncapable of expiring keys with a time to live set**. This means that if you use `EXPIRE` or other commands that set a maximum TTL for a key, the key will leak, and while you may no longer see it while accessing it with read commands, you will see it in the count of keys and it will still use memory. So in general mixing writable slaves (previous version 4.0) and keys with TTL is going to create issues. +However note that **writable slaves before version 4.0 were incapable of expiring keys with a time to live set**. This means that if you use `EXPIRE` or other commands that set a maximum TTL for a key, the key will leak, and while you may no longer see it while accessing it with read commands, you will see it in the count of keys and it will still use memory. So in general mixing writable slaves (previous version 4.0) and keys with TTL is going to create issues. Redis 4.0 RC3 and greater versions totally solve this problem and now writable slaves are able to evict keys with TTL as masters do, with the exceptions diff --git a/topics/twitter-clone.md b/topics/twitter-clone.md index febe1e96d8..e27f350831 100644 --- a/topics/twitter-clone.md +++ b/topics/twitter-clone.md @@ -265,7 +265,7 @@ This is the actual code: goback("Wrong username or password"); $realpassword = $r->hget("user:$userid","password"); if ($realpassword != $password) - goback("Wrong useranme or password"); + goback("Wrong username or password"); # Username / password OK, set the cookie and redirect to index.php $authsecret = $r->hget("user:$userid","auth"); From 935b99d6aaa64a312099fd0cce8b4c468aa0c144 Mon Sep 17 00:00:00 2001 From: Shiem Edelbrock Date: Wed, 6 Jul 2016 17:28:13 -0700 Subject: [PATCH 0770/2314] Grammar changes --- topics/replication.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/replication.md b/topics/replication.md index 518fabc3c5..389e74ad03 100644 --- a/topics/replication.md +++ b/topics/replication.md @@ -231,7 +231,7 @@ able to work: 2. However because of master-driven expire, sometimes slaves may still have in memory keys that are already logically expired, since the master was not able to provide the `DEL` command in time. In order to deal with that the slave uses its logical clock in order to report that a key does not exist **only for read operations** that don't violate the consistency of the data set (as new commands from the master will arrive). In this way slaves avoid to report logically expired keys are still existing. In practical terms, an HTML fragments cache that uses slaves to scale will avoid returning items that are already older than the desired time to live. 3. During Lua scripts executions no keys expires are performed. As a Lua script runs, conceptually the time in the master is frozen, so that a given key will either exist or not for all the time the script runs. This prevents keys to expire in the middle of a script, and is needed in order to send the same script to the slave in a way that is guaranteed to have the same effects in the data set. -As it is expected, once a slave is turned into a master because of a fail over, it start to expire keys in an independent way without requiring help from its old master. +Once a slave is promoted to a master it will start to expire keys independently, and will not require any help from its old master. Configuring replication in Docker and NAT --- From 3bc019861ebc76b1f3db6c76ad90112e2298bbf6 Mon Sep 17 00:00:00 2001 From: spccold Date: Mon, 10 Jul 2017 22:34:36 +0800 Subject: [PATCH 0771/2314] corrent cluster-spec.md about 'Practical example of configuration epoch usefulness during partitions' (#806) --- topics/cluster-spec.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/cluster-spec.md b/topics/cluster-spec.md index 0d8e5452c6..b9a33e9259 100644 --- a/topics/cluster-spec.md +++ b/topics/cluster-spec.md @@ -895,7 +895,7 @@ This section illustrates how the epoch concept is used to make the slave promoti At this point B is down and A is available again with a role of master (actually `UPDATE` messages would reconfigure it promptly, but here we assume all `UPDATE` messages were lost). At the same time, slave C will try to get elected in order to fail over B. This is what happens: -1. B will try to get elected and will succeed, since for the majority of masters its master is actually down. It will obtain a new incremental `configEpoch`. +1. C will try to get elected and will succeed, since for the majority of masters its master is actually down. It will obtain a new incremental `configEpoch`. 2. A will not be able to claim to be the master for its hash slots, because the other nodes already have the same hash slots associated with a higher configuration epoch (the one of B) compared to the one published by A. 3. So, all the nodes will upgrade their table to assign the hash slots to C, and the cluster will continue its operations. From 2645642648655ee6dfe61038332ff8c7a0b044b9 Mon Sep 17 00:00:00 2001 From: cbarbu Date: Mon, 10 Jul 2017 16:38:58 +0200 Subject: [PATCH 0772/2314] Adding RedisCli for language R. (#765) --- clients.json | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/clients.json b/clients.json index ba018430af..ba607376b8 100644 --- a/clients.json +++ b/clients.json @@ -1039,6 +1039,14 @@ "description": "Redis client based on Finagle", "authors": [] }, + { + "name": "RedisCli", + "language": "R", + "repository": "https://bitbucket.org/cmbce/r-package-rediscli", + "description": "Basic client passing a (batch of) command(s) to redis-cli, getting back a (list of) character vector(s).", + "authors": ["CorentinBarbu"], + "active": true + }, { "name": "rredis", From 0b0167090bd57da92aac8d13bc15b34d0a480bfd Mon Sep 17 00:00:00 2001 From: Eugene Ponizovsky Date: Fri, 29 Jul 2016 11:23:31 +0300 Subject: [PATCH 0773/2314] Perl client AnyEvent::Redis::RipeRedis will continue to develop as AnyEvent::RipeRedis AnyEvent::RipeRedis is the new incarnation of AnyEvent::Redis::RipeRedis. AnyEvent::Redis::RipeRedis will no longer be supported and must not be used in new code. --- clients.json | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/clients.json b/clients.json index ba018430af..008e5a63d6 100644 --- a/clients.json +++ b/clients.json @@ -341,12 +341,13 @@ }, { - "name": "AnyEvent::Redis::RipeRedis", + "name": "AnyEvent::RipeRedis", "language": "Perl", - "url": "http://search.cpan.org/dist/AnyEvent-Redis-RipeRedis/", - "repository": "https://github.com/iph0/AnyEvent-Redis-RipeRedis", - "description": "Flexible non-blocking Redis client with reconnect feature", - "authors": ["iph"], + "url": "http://search.cpan.org/dist/AnyEvent-RipeRedis/", + "repository": "https://github.com/iph0/AnyEvent-RipeRedis", + "description": "Flexible non-blocking Redis client", + "authors": ["iph0"], + "recommended": true, "active": true }, From 3fd4cf93246cb821a094db87c8d554beaa4e4459 Mon Sep 17 00:00:00 2001 From: Eugene Ponizovsky Date: Tue, 27 Sep 2016 11:14:38 +0300 Subject: [PATCH 0774/2314] Added Perl client AnyEvent::RipeRedis::Cluster --- clients.json | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/clients.json b/clients.json index 008e5a63d6..8a4877a292 100644 --- a/clients.json +++ b/clients.json @@ -347,7 +347,16 @@ "repository": "https://github.com/iph0/AnyEvent-RipeRedis", "description": "Flexible non-blocking Redis client", "authors": ["iph0"], - "recommended": true, + "active": true + }, + + { + "name": "AnyEvent::RipeRedis::Cluster", + "language": "Perl", + "url": "http://search.cpan.org/dist/AnyEvent-RipeRedis-Cluster/", + "repository": "https://github.com/iph0/AnyEvent-RipeRedis-Cluster", + "description": "Non-blocking Redis Cluster client", + "authors": ["iph0"], "active": true }, From 2d94cbd9861f199d2de4a9ab12f609927ff9119b Mon Sep 17 00:00:00 2001 From: reidwmulkey Date: Tue, 26 Apr 2016 17:33:28 -0500 Subject: [PATCH 0775/2314] adding entry in tools.json for noncis --- tools.json | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tools.json b/tools.json index e2c67c3d39..73f2cf4e3a 100644 --- a/tools.json +++ b/tools.json @@ -611,5 +611,12 @@ "repository": "https://github.com/alicebob/miniredis", "description": "Pure Go Redis server for Go unittests", "authors": [] + }, + { + "name": "noncis", + "language": "Javascript", + "repository": "https://github.com/reidwmulkey/noncis", + "description": "Synchronizes nonces across node instances.", + "authors": [] } ] From a99634f91f05717e6754ef5eb7c84cc2f199f722 Mon Sep 17 00:00:00 2001 From: Bo Date: Mon, 10 Jul 2017 12:59:37 -0400 Subject: [PATCH 0776/2314] Consolidate PR #836 and #837 into an updated branch (#843) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit facil.io includes an asynchronous Redis client as well as a RESP parser/formatter that could be used as an independent module. The parser and client were written from the ground up and are licensed under the MIT license. This places fail.io in a unique position, the could provide an MIT licensing option where required. This is in contrast to most of the other clients, that are based on hiredis and require the BSD-3-clause license. This might not be the fastest client or parser (it's more concerned with protecting itself from bad code than with being optimized), but it is (to the best of my knowledge) the only MIT licensed option. Thanks! 👍🏻 === Iodine is a Ruby HTTP / Websocket server with native Pub/Sub (built using the facil.io C framework). Iodine includes an integrated Redis client that allows Pub/Sub to be extended across machine boundaries (the native Pub/Sub is limited to the process cluster). Thanks! 🙏🏻 --- tools.json | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/tools.json b/tools.json index 73f2cf4e3a..11924a7fb7 100644 --- a/tools.json +++ b/tools.json @@ -598,6 +598,21 @@ "description": "A convenient and useful tool for migrating data between redis groups.", "authors": ["diguo58"] }, + { + "name": "facil.io", + "language": "C", + "url": "http://facil.io/", + "repository": "https://github.com/boazsegev/facil.io", + "description": "facil.io includes an asynchronous Redis client as well as a RESP parser/formatter that can be used independently. It's MIT licensed and doesn't use hiredis.", + "authors": ["bowildmusic"] + }, + { + "name": "iodine", + "language": "Ruby", + "repository": "https://github.com/boazsegev/iodine", + "description": "Iodine is an HTTP / Websocket server with native pub/sub support. Iodine includes an integrated Redis client that provides Pub/Sub scaling beyond machine boundaries.", + "authors": ["bowildmusic"] + }, { "name": "Regis", "language": "Swift", From 734af8b2c5a6d453bd0ce25d00baf1a2b962ebc9 Mon Sep 17 00:00:00 2001 From: Dongqi Xue Date: Mon, 10 Jul 2017 14:33:15 -0700 Subject: [PATCH 0777/2314] Fix a typo in replication.md --- topics/replication.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/replication.md b/topics/replication.md index 389e74ad03..3dfe767964 100644 --- a/topics/replication.md +++ b/topics/replication.md @@ -271,7 +271,7 @@ slaves and so forth. Partial resynchronizations after restarts and failovers --- -Since Redis 4.0, when an instance instance is promoted to master after a failover, +Since Redis 4.0, when an instance is promoted to master after a failover, it will be still able to perform a partial resynchronization with the slaves of the old master. To do so, the slave remembers the old replication ID and offset of its former master, so can provide part of the backlog to the connecting From 9779d072411b08a2762da75ad9e2816f914cffa8 Mon Sep 17 00:00:00 2001 From: Hamid Date: Wed, 12 Jul 2017 12:55:28 +0430 Subject: [PATCH 0778/2314] Add async-redis for c++ clients (#835) --- clients.json | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/clients.json b/clients.json index bea68dbfad..680485e5e2 100644 --- a/clients.json +++ b/clients.json @@ -814,6 +814,16 @@ "authors": ["loopole"] }, + { + "name": "async-redis", + "language": "C++", + "url": "https://github.com/hamidr/async-redis", + "repository": "https://github.com/hamidr/async-redis", + "description": "An async redis library for C++ based on libevpp/boost-asio", + "authors": ["hamidr_"], + "active": true + }, + { "name": "acl-redis", "language": "C++", From 2a5204ad311cd45b2ee00570751fef33c579ac64 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 14 Jul 2017 11:34:48 +0200 Subject: [PATCH 0779/2314] Modules API doc added. --- topics/modules-api-ref.md | 1427 ++++++++++++++++++++++++++++++++ topics/modules-blocking-ops.md | 265 ++++++ topics/modules-intro.md | 857 +++++++++++++++++++ topics/modules-native-types.md | 379 +++++++++ 4 files changed, 2928 insertions(+) create mode 100644 topics/modules-api-ref.md create mode 100644 topics/modules-blocking-ops.md create mode 100644 topics/modules-intro.md create mode 100644 topics/modules-native-types.md diff --git a/topics/modules-api-ref.md b/topics/modules-api-ref.md new file mode 100644 index 0000000000..c99a80da7d --- /dev/null +++ b/topics/modules-api-ref.md @@ -0,0 +1,1427 @@ +# Modules API reference + +## `RedisModule_Alloc` + + void *RedisModule_Alloc(size_t bytes); + +Use like malloc(). Memory allocated with this function is reported in +Redis INFO memory, used for keys eviction according to maxmemory settings +and in general is taken into account as memory allocated by Redis. +You should avoid using malloc(). + +## `RedisModule_Calloc` + + void *RedisModule_Calloc(size_t nmemb, size_t size); + +Use like calloc(). Memory allocated with this function is reported in +Redis INFO memory, used for keys eviction according to maxmemory settings +and in general is taken into account as memory allocated by Redis. +You should avoid using calloc() directly. + +## `RedisModule_Realloc` + + void* RedisModule_Realloc(void *ptr, size_t bytes); + +Use like realloc() for memory obtained with `RedisModule_Alloc()`. + +## `RedisModule_Free` + + void RedisModule_Free(void *ptr); + +Use like free() for memory obtained by `RedisModule_Alloc()` and +`RedisModule_Realloc()`. However you should never try to free with +`RedisModule_Free()` memory allocated with malloc() inside your module. + +## `RedisModule_Strdup` + + char *RedisModule_Strdup(const char *str); + +Like strdup() but returns memory allocated with `RedisModule_Alloc()`. + +## `RedisModule_PoolAlloc` + + void *RedisModule_PoolAlloc(RedisModuleCtx *ctx, size_t bytes); + +Return heap allocated memory that will be freed automatically when the +module callback function returns. Mostly suitable for small allocations +that are short living and must be released when the callback returns +anyway. The returned memory is aligned to the architecture word size +if at least word size bytes are requested, otherwise it is just +aligned to the next power of two, so for example a 3 bytes request is +4 bytes aligned while a 2 bytes request is 2 bytes aligned. + +There is no realloc style function since when this is needed to use the +pool allocator is not a good idea. + +The function returns NULL if `bytes` is 0. + +## `RedisModule_GetApi` + + int RedisModule_GetApi(const char *funcname, void **targetPtrPtr); + +Lookup the requested module API and store the function pointer into the +target pointer. The function returns `REDISMODULE_ERR` if there is no such +named API, otherwise `REDISMODULE_OK`. + +This function is not meant to be used by modules developer, it is only +used implicitly by including redismodule.h. + +## `RedisModule_IsKeysPositionRequest` + + int RedisModule_IsKeysPositionRequest(RedisModuleCtx *ctx); + +Return non-zero if a module command, that was declared with the +flag "getkeys-api", is called in a special way to get the keys positions +and not to get executed. Otherwise zero is returned. + +## `RedisModule_KeyAtPos` + + void RedisModule_KeyAtPos(RedisModuleCtx *ctx, int pos); + +When a module command is called in order to obtain the position of +keys, since it was flagged as "getkeys-api" during the registration, +the command implementation checks for this special call using the +`RedisModule_IsKeysPositionRequest()` API and uses this function in +order to report keys, like in the following example: + + if (RedisModule_IsKeysPositionRequest(ctx)) { + RedisModule_KeyAtPos(ctx,1); + RedisModule_KeyAtPos(ctx,2); + } + + Note: in the example below the get keys API would not be needed since + keys are at fixed positions. This interface is only used for commands + with a more complex structure. + +## `RedisModule_CreateCommand` + + int RedisModule_CreateCommand(RedisModuleCtx *ctx, const char *name, RedisModuleCmdFunc cmdfunc, const char *strflags, int firstkey, int lastkey, int keystep); + +Register a new command in the Redis server, that will be handled by +calling the function pointer 'func' using the RedisModule calling +convention. The function returns `REDISMODULE_ERR` if the specified command +name is already busy or a set of invalid flags were passed, otherwise +`REDISMODULE_OK` is returned and the new command is registered. + +This function must be called during the initialization of the module +inside the `RedisModule_OnLoad()` function. Calling this function outside +of the initialization function is not defined. + +The command function type is the following: + + int MyCommand_RedisCommand(RedisModuleCtx *ctx, RedisModuleString **argv, int argc); + +And is supposed to always return `REDISMODULE_OK`. + +The set of flags 'strflags' specify the behavior of the command, and should +be passed as a C string compoesd of space separated words, like for +example "write deny-oom". The set of flags are: + +* **"write"**: The command may modify the data set (it may also read + from it). +* **"readonly"**: The command returns data from keys but never writes. +* **"admin"**: The command is an administrative command (may change + replication or perform similar tasks). +* **"deny-oom"**: The command may use additional memory and should be + denied during out of memory conditions. +* **"deny-script"**: Don't allow this command in Lua scripts. +* **"allow-loading"**: Allow this command while the server is loading data. + Only commands not interacting with the data set + should be allowed to run in this mode. If not sure + don't use this flag. +* **"pubsub"**: The command publishes things on Pub/Sub channels. +* **"random"**: The command may have different outputs even starting + from the same input arguments and key values. +* **"allow-stale"**: The command is allowed to run on slaves that don't + serve stale data. Don't use if you don't know what + this means. +* **"no-monitor"**: Don't propoagate the command on monitor. Use this if + the command has sensible data among the arguments. +* **"fast"**: The command time complexity is not greater + than O(log(N)) where N is the size of the collection or + anything else representing the normal scalability + issue with the command. +* **"getkeys-api"**: The command implements the interface to return + the arguments that are keys. Used when start/stop/step + is not enough because of the command syntax. +* **"no-cluster"**: The command should not register in Redis Cluster + since is not designed to work with it because, for + example, is unable to report the position of the + keys, programmatically creates key names, or any + other reason. + +## `RedisModule_SetModuleAttribs` + + void RedisModule_SetModuleAttribs(RedisModuleCtx *ctx, const char *name, int ver, int apiver); + +Called by `RM_Init()` to setup the `ctx->module` structure. + +This is an internal function, Redis modules developers don't need +to use it. + +## `RedisModule_Milliseconds` + + long long RedisModule_Milliseconds(void); + +Return the current UNIX time in milliseconds. + +## `RedisModule_AutoMemory` + + void RedisModule_AutoMemory(RedisModuleCtx *ctx); + +Enable automatic memory management. See API.md for more information. + +The function must be called as the first function of a command implementation +that wants to use automatic memory. + +## `RedisModule_CreateString` + + RedisModuleString *RedisModule_CreateString(RedisModuleCtx *ctx, const char *ptr, size_t len); + +Create a new module string object. The returned string must be freed +with `RedisModule_FreeString()`, unless automatic memory is enabled. + +The string is created by copying the `len` bytes starting +at `ptr`. No reference is retained to the passed buffer. + +## `RedisModule_CreateStringPrintf` + + RedisModuleString *RedisModule_CreateStringPrintf(RedisModuleCtx *ctx, const char *fmt, ...); + +Create a new module string object from a printf format and arguments. +The returned string must be freed with `RedisModule_FreeString()`, unless +automatic memory is enabled. + +The string is created using the sds formatter function sdscatvprintf(). + +## `RedisModule_CreateStringFromLongLong` + + RedisModuleString *RedisModule_CreateStringFromLongLong(RedisModuleCtx *ctx, long long ll); + +Like `RedisModule_CreatString()`, but creates a string starting from a long long +integer instead of taking a buffer and its length. + +The returned string must be released with `RedisModule_FreeString()` or by +enabling automatic memory management. + +## `RedisModule_CreateStringFromString` + + RedisModuleString *RedisModule_CreateStringFromString(RedisModuleCtx *ctx, const RedisModuleString *str); + +Like `RedisModule_CreatString()`, but creates a string starting from another +RedisModuleString. + +The returned string must be released with `RedisModule_FreeString()` or by +enabling automatic memory management. + +## `RedisModule_FreeString` + + void RedisModule_FreeString(RedisModuleCtx *ctx, RedisModuleString *str); + +Free a module string object obtained with one of the Redis modules API calls +that return new string objects. + +It is possible to call this function even when automatic memory management +is enabled. In that case the string will be released ASAP and removed +from the pool of string to release at the end. + +## `RedisModule_RetainString` + + void RedisModule_RetainString(RedisModuleCtx *ctx, RedisModuleString *str); + +Every call to this function, will make the string 'str' requiring +an additional call to `RedisModule_FreeString()` in order to really +free the string. Note that the automatic freeing of the string obtained +enabling modules automatic memory management counts for one +`RedisModule_FreeString()` call (it is just executed automatically). + +Normally you want to call this function when, at the same time +the following conditions are true: + +1) You have automatic memory management enabled. +2) You want to create string objects. +3) Those string objects you create need to live *after* the callback + function(for example a command implementation) creating them returns. + +Usually you want this in order to store the created string object +into your own data structure, for example when implementing a new data +type. + +Note that when memory management is turned off, you don't need +any call to RetainString() since creating a string will always result +into a string that lives after the callback function returns, if +no FreeString() call is performed. + +## `RedisModule_StringPtrLen` + + const char *RedisModule_StringPtrLen(const RedisModuleString *str, size_t *len); + +Given a string module object, this function returns the string pointer +and length of the string. The returned pointer and length should only +be used for read only accesses and never modified. + +## `RedisModule_StringToLongLong` + + int RedisModule_StringToLongLong(const RedisModuleString *str, long long *ll); + +Convert the string into a long long integer, storing it at `*ll`. +Returns `REDISMODULE_OK` on success. If the string can't be parsed +as a valid, strict long long (no spaces before/after), `REDISMODULE_ERR` +is returned. + +## `RedisModule_StringToDouble` + + int RedisModule_StringToDouble(const RedisModuleString *str, double *d); + +Convert the string into a double, storing it at `*d`. +Returns `REDISMODULE_OK` on success or `REDISMODULE_ERR` if the string is +not a valid string representation of a double value. + +## `RedisModule_StringCompare` + + int RedisModule_StringCompare(RedisModuleString *a, RedisModuleString *b); + +Compare two string objects, returning -1, 0 or 1 respectively if +a < b, a == b, a > b. Strings are compared byte by byte as two +binary blobs without any encoding care / collation attempt. + +## `RedisModule_StringAppendBuffer` + + int RedisModule_StringAppendBuffer(RedisModuleCtx *ctx, RedisModuleString *str, const char *buf, size_t len); + +Append the specified buffere to the string 'str'. The string must be a +string created by the user that is referenced only a single time, otherwise +`REDISMODULE_ERR` is returend and the operation is not performed. + +## `RedisModule_WrongArity` + + int RedisModule_WrongArity(RedisModuleCtx *ctx); + +Send an error about the number of arguments given to the command, +citing the command name in the error message. + +Example: + + if (argc != 3) return RedisModule_WrongArity(ctx); + +## `RedisModule_ReplyWithLongLong` + + int RedisModule_ReplyWithLongLong(RedisModuleCtx *ctx, long long ll); + +Send an integer reply to the client, with the specified long long value. +The function always returns `REDISMODULE_OK`. + +## `RedisModule_ReplyWithError` + + int RedisModule_ReplyWithError(RedisModuleCtx *ctx, const char *err); + +Reply with the error 'err'. + +Note that 'err' must contain all the error, including +the initial error code. The function only provides the initial "-", so +the usage is, for example: + + RedisModule_ReplyWithError(ctx,"ERR Wrong Type"); + +and not just: + + RedisModule_ReplyWithError(ctx,"Wrong Type"); + +The function always returns `REDISMODULE_OK`. + +## `RedisModule_ReplyWithSimpleString` + + int RedisModule_ReplyWithSimpleString(RedisModuleCtx *ctx, const char *msg); + +Reply with a simple string (+... \r\n in RESP protocol). This replies +are suitable only when sending a small non-binary string with small +overhead, like "OK" or similar replies. + +The function always returns `REDISMODULE_OK`. + +## `RedisModule_ReplyWithArray` + + int RedisModule_ReplyWithArray(RedisModuleCtx *ctx, long len); + +Reply with an array type of 'len' elements. However 'len' other calls +to `ReplyWith*` style functions must follow in order to emit the elements +of the array. + +When producing arrays with a number of element that is not known beforehand +the function can be called with the special count +`REDISMODULE_POSTPONED_ARRAY_LEN`, and the actual number of elements can be +later set with `RedisModule_ReplySetArrayLength()` (which will set the +latest "open" count if there are multiple ones). + +The function always returns `REDISMODULE_OK`. + +## `RedisModule_ReplySetArrayLength` + + void RedisModule_ReplySetArrayLength(RedisModuleCtx *ctx, long len); + +When `RedisModule_ReplyWithArray()` is used with the argument +`REDISMODULE_POSTPONED_ARRAY_LEN`, because we don't know beforehand the number +of items we are going to output as elements of the array, this function +will take care to set the array length. + +Since it is possible to have multiple array replies pending with unknown +length, this function guarantees to always set the latest array length +that was created in a postponed way. + +For example in order to output an array like [1,[10,20,30]] we +could write: + + RedisModule_ReplyWithArray(ctx,REDISMODULE_POSTPONED_ARRAY_LEN); + RedisModule_ReplyWithLongLong(ctx,1); + RedisModule_ReplyWithArray(ctx,REDISMODULE_POSTPONED_ARRAY_LEN); + RedisModule_ReplyWithLongLong(ctx,10); + RedisModule_ReplyWithLongLong(ctx,20); + RedisModule_ReplyWithLongLong(ctx,30); + RedisModule_ReplySetArrayLength(ctx,3); // Set len of 10,20,30 array. + RedisModule_ReplySetArrayLength(ctx,2); // Set len of top array + +Note that in the above example there is no reason to postpone the array +length, since we produce a fixed number of elements, but in the practice +the code may use an interator or other ways of creating the output so +that is not easy to calculate in advance the number of elements. + +## `RedisModule_ReplyWithStringBuffer` + + int RedisModule_ReplyWithStringBuffer(RedisModuleCtx *ctx, const char *buf, size_t len); + +Reply with a bulk string, taking in input a C buffer pointer and length. + +The function always returns `REDISMODULE_OK`. + +## `RedisModule_ReplyWithString` + + int RedisModule_ReplyWithString(RedisModuleCtx *ctx, RedisModuleString *str); + +Reply with a bulk string, taking in input a RedisModuleString object. + +The function always returns `REDISMODULE_OK`. + +## `RedisModule_ReplyWithNull` + + int RedisModule_ReplyWithNull(RedisModuleCtx *ctx); + +Reply to the client with a NULL. In the RESP protocol a NULL is encoded +as the string "$-1\r\n". + +The function always returns `REDISMODULE_OK`. + +## `RedisModule_ReplyWithCallReply` + + int RedisModule_ReplyWithCallReply(RedisModuleCtx *ctx, RedisModuleCallReply *reply); + +Reply exactly what a Redis command returned us with `RedisModule_Call()`. +This function is useful when we use `RedisModule_Call()` in order to +execute some command, as we want to reply to the client exactly the +same reply we obtained by the command. + +The function always returns `REDISMODULE_OK`. + +## `RedisModule_ReplyWithDouble` + + int RedisModule_ReplyWithDouble(RedisModuleCtx *ctx, double d); + +Send a string reply obtained converting the double 'd' into a bulk string. +This function is basically equivalent to converting a double into +a string into a C buffer, and then calling the function +`RedisModule_ReplyWithStringBuffer()` with the buffer and length. + +The function always returns `REDISMODULE_OK`. + +## `RedisModule_Replicate` + + int RedisModule_Replicate(RedisModuleCtx *ctx, const char *cmdname, const char *fmt, ...); + +Replicate the specified command and arguments to slaves and AOF, as effect +of execution of the calling command implementation. + +The replicated commands are always wrapped into the MULTI/EXEC that +contains all the commands replicated in a given module command +execution. However the commands replicated with `RedisModule_Call()` +are the first items, the ones replicated with `RedisModule_Replicate()` +will all follow before the EXEC. + +Modules should try to use one interface or the other. + +This command follows exactly the same interface of `RedisModule_Call()`, +so a set of format specifiers must be passed, followed by arguments +matching the provided format specifiers. + +Please refer to `RedisModule_Call()` for more information. + +The command returns `REDISMODULE_ERR` if the format specifiers are invalid +or the command name does not belong to a known command. + +## `RedisModule_ReplicateVerbatim` + + int RedisModule_ReplicateVerbatim(RedisModuleCtx *ctx); + +This function will replicate the command exactly as it was invoked +by the client. Note that this function will not wrap the command into +a MULTI/EXEC stanza, so it should not be mixed with other replication +commands. + +Basically this form of replication is useful when you want to propagate +the command to the slaves and AOF file exactly as it was called, since +the command can just be re-executed to deterministically re-create the +new state starting from the old one. + +The function always returns `REDISMODULE_OK`. + +## `RedisModule_GetClientId` + + unsigned long long RedisModule_GetClientId(RedisModuleCtx *ctx); + +Return the ID of the current client calling the currently active module +command. The returned ID has a few guarantees: + +1. The ID is different for each different client, so if the same client + executes a module command multiple times, it can be recognized as + having the same ID, otherwise the ID will be different. +2. The ID increases monotonically. Clients connecting to the server later + are guaranteed to get IDs greater than any past ID previously seen. + +Valid IDs are from 1 to 2^64-1. If 0 is returned it means there is no way +to fetch the ID in the context the function was currently called. + +## `RedisModule_GetSelectedDb` + + int RedisModule_GetSelectedDb(RedisModuleCtx *ctx); + +Return the currently selected DB. + +## `RedisModule_SelectDb` + + int RedisModule_SelectDb(RedisModuleCtx *ctx, int newid); + +Change the currently selected DB. Returns an error if the id +is out of range. + +Note that the client will retain the currently selected DB even after +the Redis command implemented by the module calling this function +returns. + +If the module command wishes to change something in a different DB and +returns back to the original one, it should call `RedisModule_GetSelectedDb()` +before in order to restore the old DB number before returning. + +## `RedisModule_OpenKey` + + void *RedisModule_OpenKey(RedisModuleCtx *ctx, robj *keyname, int mode); + +Return an handle representing a Redis key, so that it is possible +to call other APIs with the key handle as argument to perform +operations on the key. + +The return value is the handle repesenting the key, that must be +closed with `RM_CloseKey()`. + +If the key does not exist and WRITE mode is requested, the handle +is still returned, since it is possible to perform operations on +a yet not existing key (that will be created, for example, after +a list push operation). If the mode is just READ instead, and the +key does not exist, NULL is returned. However it is still safe to +call `RedisModule_CloseKey()` and `RedisModule_KeyType()` on a NULL +value. + +## `RedisModule_CloseKey` + + void RedisModule_CloseKey(RedisModuleKey *key); + +Close a key handle. + +## `RedisModule_KeyType` + + int RedisModule_KeyType(RedisModuleKey *key); + +Return the type of the key. If the key pointer is NULL then +`REDISMODULE_KEYTYPE_EMPTY` is returned. + +## `RedisModule_ValueLength` + + size_t RedisModule_ValueLength(RedisModuleKey *key); + +Return the length of the value associated with the key. +For strings this is the length of the string. For all the other types +is the number of elements (just counting keys for hashes). + +If the key pointer is NULL or the key is empty, zero is returned. + +## `RedisModule_DeleteKey` + + int RedisModule_DeleteKey(RedisModuleKey *key); + +If the key is open for writing, remove it, and setup the key to +accept new writes as an empty key (that will be created on demand). +On success `REDISMODULE_OK` is returned. If the key is not open for +writing `REDISMODULE_ERR` is returned. + +## `RedisModule_GetExpire` + + mstime_t RedisModule_GetExpire(RedisModuleKey *key); + +Return the key expire value, as milliseconds of remaining TTL. +If no TTL is associated with the key or if the key is empty, +`REDISMODULE_NO_EXPIRE` is returned. + +## `RedisModule_SetExpire` + + int RedisModule_SetExpire(RedisModuleKey *key, mstime_t expire); + +Set a new expire for the key. If the special expire +`REDISMODULE_NO_EXPIRE` is set, the expire is cancelled if there was +one (the same as the PERSIST command). + +Note that the expire must be provided as a positive integer representing +the number of milliseconds of TTL the key should have. + +The function returns `REDISMODULE_OK` on success or `REDISMODULE_ERR` if +the key was not open for writing or is an empty key. + +## `RedisModule_StringSet` + + int RedisModule_StringSet(RedisModuleKey *key, RedisModuleString *str); + +If the key is open for writing, set the specified string 'str' as the +value of the key, deleting the old value if any. +On success `REDISMODULE_OK` is returned. If the key is not open for +writing or there is an active iterator, `REDISMODULE_ERR` is returned. + +## `RedisModule_StringDMA` + + char *RedisModule_StringDMA(RedisModuleKey *key, size_t *len, int mode); + +Prepare the key associated string value for DMA access, and returns +a pointer and size (by reference), that the user can use to read or +modify the string in-place accessing it directly via pointer. + +The 'mode' is composed by bitwise OR-ing the following flags: + + REDISMODULE_READ -- Read access + REDISMODULE_WRITE -- Write access + +If the DMA is not requested for writing, the pointer returned should +only be accessed in a read-only fashion. + +On error (wrong type) NULL is returned. + +DMA access rules: + +1. No other key writing function should be called since the moment +the pointer is obtained, for all the time we want to use DMA access +to read or modify the string. + +2. Each time `RM_StringTruncate()` is called, to continue with the DMA +access, `RM_StringDMA()` should be called again to re-obtain +a new pointer and length. + +3. If the returned pointer is not NULL, but the length is zero, no +byte can be touched (the string is empty, or the key itself is empty) +so a `RM_StringTruncate()` call should be used if there is to enlarge +the string, and later call StringDMA() again to get the pointer. + +## `RedisModule_StringTruncate` + + int RedisModule_StringTruncate(RedisModuleKey *key, size_t newlen); + +If the string is open for writing and is of string type, resize it, padding +with zero bytes if the new length is greater than the old one. + +After this call, `RM_StringDMA()` must be called again to continue +DMA access with the new pointer. + +The function returns `REDISMODULE_OK` on success, and `REDISMODULE_ERR` on +error, that is, the key is not open for writing, is not a string +or resizing for more than 512 MB is requested. + +If the key is empty, a string key is created with the new string value +unless the new length value requested is zero. + +## `RedisModule_ListPush` + + int RedisModule_ListPush(RedisModuleKey *key, int where, RedisModuleString *ele); + +Push an element into a list, on head or tail depending on 'where' argumnet. +If the key pointer is about an empty key opened for writing, the key +is created. On error (key opened for read-only operations or of the wrong +type) `REDISMODULE_ERR` is returned, otherwise `REDISMODULE_OK` is returned. + +## `RedisModule_ListPop` + + RedisModuleString *RedisModule_ListPop(RedisModuleKey *key, int where); + +Pop an element from the list, and returns it as a module string object +that the user should be free with `RM_FreeString()` or by enabling +automatic memory. 'where' specifies if the element should be popped from +head or tail. The command returns NULL if: +1) The list is empty. +2) The key was not open for writing. +3) The key is not a list. + +## `RedisModule_ZsetAddFlagsToCoreFlags` + + int RedisModule_ZsetAddFlagsToCoreFlags(int flags); + +Conversion from/to public flags of the Modules API and our private flags, +so that we have everything decoupled. + +## `RedisModule_ZsetAddFlagsFromCoreFlags` + + int RedisModule_ZsetAddFlagsFromCoreFlags(int flags); + +See previous function comment. + +## `RedisModule_ZsetAdd` + + int RedisModule_ZsetAdd(RedisModuleKey *key, double score, RedisModuleString *ele, int *flagsptr); + +Add a new element into a sorted set, with the specified 'score'. +If the element already exists, the score is updated. + +A new sorted set is created at value if the key is an empty open key +setup for writing. + +Additional flags can be passed to the function via a pointer, the flags +are both used to receive input and to communicate state when the function +returns. 'flagsptr' can be NULL if no special flags are used. + +The input flags are: + + REDISMODULE_ZADD_XX: Element must already exist. Do nothing otherwise. + REDISMODULE_ZADD_NX: Element must not exist. Do nothing otherwise. + +The output flags are: + + REDISMODULE_ZADD_ADDED: The new element was added to the sorted set. + REDISMODULE_ZADD_UPDATED: The score of the element was updated. + REDISMODULE_ZADD_NOP: No operation was performed because XX or NX flags. + +On success the function returns `REDISMODULE_OK`. On the following errors +`REDISMODULE_ERR` is returned: + +* The key was not opened for writing. +* The key is of the wrong type. +* 'score' double value is not a number (NaN). + +## `RedisModule_ZsetIncrby` + + int RedisModule_ZsetIncrby(RedisModuleKey *key, double score, RedisModuleString *ele, int *flagsptr, double *newscore); + +This function works exactly like `RM_ZsetAdd()`, but instead of setting +a new score, the score of the existing element is incremented, or if the +element does not already exist, it is added assuming the old score was +zero. + +The input and output flags, and the return value, have the same exact +meaning, with the only difference that this function will return +`REDISMODULE_ERR` even when 'score' is a valid double number, but adding it +to the existing score resuts into a NaN (not a number) condition. + +This function has an additional field 'newscore', if not NULL is filled +with the new score of the element after the increment, if no error +is returned. + +## `RedisModule_ZsetRem` + + int RedisModule_ZsetRem(RedisModuleKey *key, RedisModuleString *ele, int *deleted); + +Remove the specified element from the sorted set. +The function returns `REDISMODULE_OK` on success, and `REDISMODULE_ERR` +on one of the following conditions: + +* The key was not opened for writing. +* The key is of the wrong type. + +The return value does NOT indicate the fact the element was really +removed (since it existed) or not, just if the function was executed +with success. + +In order to know if the element was removed, the additional argument +'deleted' must be passed, that populates the integer by reference +setting it to 1 or 0 depending on the outcome of the operation. +The 'deleted' argument can be NULL if the caller is not interested +to know if the element was really removed. + +Empty keys will be handled correctly by doing nothing. + +## `RedisModule_ZsetScore` + + int RedisModule_ZsetScore(RedisModuleKey *key, RedisModuleString *ele, double *score); + +On success retrieve the double score associated at the sorted set element +'ele' and returns `REDISMODULE_OK`. Otherwise `REDISMODULE_ERR` is returned +to signal one of the following conditions: + +* There is no such element 'ele' in the sorted set. +* The key is not a sorted set. +* The key is an open empty key. + +## `RedisModule_ZsetRangeStop` + + void RedisModule_ZsetRangeStop(RedisModuleKey *key); + +Stop a sorted set iteration. + +## `RedisModule_ZsetRangeEndReached` + + int RedisModule_ZsetRangeEndReached(RedisModuleKey *key); + +Return the "End of range" flag value to signal the end of the iteration. + +## `RedisModule_ZsetFirstInScoreRange` + + int RedisModule_ZsetFirstInScoreRange(RedisModuleKey *key, double min, double max, int minex, int maxex); + +Setup a sorted set iterator seeking the first element in the specified +range. Returns `REDISMODULE_OK` if the iterator was correctly initialized +otherwise `REDISMODULE_ERR` is returned in the following conditions: + +1. The value stored at key is not a sorted set or the key is empty. + +The range is specified according to the two double values 'min' and 'max'. +Both can be infinite using the following two macros: + +`REDISMODULE_POSITIVE_INFINITE` for positive infinite value +`REDISMODULE_NEGATIVE_INFINITE` for negative infinite value + +'minex' and 'maxex' parameters, if true, respectively setup a range +where the min and max value are exclusive (not included) instead of +inclusive. + +## `RedisModule_ZsetLastInScoreRange` + + int RedisModule_ZsetLastInScoreRange(RedisModuleKey *key, double min, double max, int minex, int maxex); + +Exactly like `RedisModule_ZsetFirstInScoreRange()` but the last element of +the range is selected for the start of the iteration instead. + +## `RedisModule_ZsetFirstInLexRange` + + int RedisModule_ZsetFirstInLexRange(RedisModuleKey *key, RedisModuleString *min, RedisModuleString *max); + +Setup a sorted set iterator seeking the first element in the specified +lexicographical range. Returns `REDISMODULE_OK` if the iterator was correctly +initialized otherwise `REDISMODULE_ERR` is returned in the +following conditions: + +1. The value stored at key is not a sorted set or the key is empty. +2. The lexicographical range 'min' and 'max' format is invalid. + +'min' and 'max' should be provided as two RedisModuleString objects +in the same format as the parameters passed to the ZRANGEBYLEX command. +The function does not take ownership of the objects, so they can be released +ASAP after the iterator is setup. + +## `RedisModule_ZsetLastInLexRange` + + int RedisModule_ZsetLastInLexRange(RedisModuleKey *key, RedisModuleString *min, RedisModuleString *max); + +Exactly like `RedisModule_ZsetFirstInLexRange()` but the last element of +the range is selected for the start of the iteration instead. + +## `RedisModule_ZsetRangeCurrentElement` + + RedisModuleString *RedisModule_ZsetRangeCurrentElement(RedisModuleKey *key, double *score); + +Return the current sorted set element of an active sorted set iterator +or NULL if the range specified in the iterator does not include any +element. + +## `RedisModule_ZsetRangeNext` + + int RedisModule_ZsetRangeNext(RedisModuleKey *key); + +Go to the next element of the sorted set iterator. Returns 1 if there was +a next element, 0 if we are already at the latest element or the range +does not include any item at all. + +## `RedisModule_ZsetRangePrev` + + int RedisModule_ZsetRangePrev(RedisModuleKey *key); + +Go to the previous element of the sorted set iterator. Returns 1 if there was +a previous element, 0 if we are already at the first element or the range +does not include any item at all. + +## `RedisModule_HashSet` + + int RedisModule_HashSet(RedisModuleKey *key, int flags, ...); + +Set the field of the specified hash field to the specified value. +If the key is an empty key open for writing, it is created with an empty +hash value, in order to set the specified field. + +The function is variadic and the user must specify pairs of field +names and values, both as RedisModuleString pointers (unless the +CFIELD option is set, see later). + +Example to set the hash argv[1] to the value argv[2]: + + RedisModule_HashSet(key,REDISMODULE_HASH_NONE,argv[1],argv[2],NULL); + +The function can also be used in order to delete fields (if they exist) +by setting them to the specified value of `REDISMODULE_HASH_DELETE`: + + RedisModule_HashSet(key,REDISMODULE_HASH_NONE,argv[1], + REDISMODULE_HASH_DELETE,NULL); + +The behavior of the command changes with the specified flags, that can be +set to `REDISMODULE_HASH_NONE` if no special behavior is needed. + + REDISMODULE_HASH_NX: The operation is performed only if the field was not + already existing in the hash. + REDISMODULE_HASH_XX: The operation is performed only if the field was + already existing, so that a new value could be + associated to an existing filed, but no new fields + are created. + REDISMODULE_HASH_CFIELDS: The field names passed are null terminated C + strings instead of RedisModuleString objects. + +Unless NX is specified, the command overwrites the old field value with +the new one. + +When using `REDISMODULE_HASH_CFIELDS`, field names are reported using +normal C strings, so for example to delete the field "foo" the following +code can be used: + + RedisModule_HashSet(key,REDISMODULE_HASH_CFIELDS,"foo", + REDISMODULE_HASH_DELETE,NULL); + +Return value: + +The number of fields updated (that may be less than the number of fields +specified because of the XX or NX options). + +In the following case the return value is always zero: + +* The key was not open for writing. +* The key was associated with a non Hash value. + +## `RedisModule_HashGet` + + int RedisModule_HashGet(RedisModuleKey *key, int flags, ...); + +Get fields from an hash value. This function is called using a variable +number of arguments, alternating a field name (as a StringRedisModule +pointer) with a pointer to a StringRedisModule pointer, that is set to the +value of the field if the field exist, or NULL if the field did not exist. +At the end of the field/value-ptr pairs, NULL must be specified as last +argument to signal the end of the arguments in the variadic function. + +This is an example usage: + + RedisModuleString *first, *second; + RedisModule_HashGet(mykey,REDISMODULE_HASH_NONE,argv[1],&first, + argv[2],&second,NULL); + +As with `RedisModule_HashSet()` the behavior of the command can be specified +passing flags different than `REDISMODULE_HASH_NONE`: + +`REDISMODULE_HASH_CFIELD`: field names as null terminated C strings. + +`REDISMODULE_HASH_EXISTS`: instead of setting the value of the field +expecting a RedisModuleString pointer to pointer, the function just +reports if the field esists or not and expects an integer pointer +as the second element of each pair. + +Example of `REDISMODULE_HASH_CFIELD`: + + RedisModuleString *username, *hashedpass; + RedisModule_HashGet(mykey,"username",&username,"hp",&hashedpass, NULL); + +Example of `REDISMODULE_HASH_EXISTS`: + + int exists; + RedisModule_HashGet(mykey,argv[1],&exists,NULL); + +The function returns `REDISMODULE_OK` on success and `REDISMODULE_ERR` if +the key is not an hash value. + +Memory management: + +The returned RedisModuleString objects should be released with +`RedisModule_FreeString()`, or by enabling automatic memory management. + +## `RedisModule_FreeCallReply_Rec` + + void RedisModule_FreeCallReply_Rec(RedisModuleCallReply *reply, int freenested); + +Free a Call reply and all the nested replies it contains if it's an +array. + +## `RedisModule_FreeCallReply` + + void RedisModule_FreeCallReply(RedisModuleCallReply *reply); + +Wrapper for the recursive free reply function. This is needed in order +to have the first level function to return on nested replies, but only +if called by the module API. + +## `RedisModule_CallReplyType` + + int RedisModule_CallReplyType(RedisModuleCallReply *reply); + +Return the reply type. + +## `RedisModule_CallReplyLength` + + size_t RedisModule_CallReplyLength(RedisModuleCallReply *reply); + +Return the reply type length, where applicable. + +## `RedisModule_CallReplyArrayElement` + + RedisModuleCallReply *RedisModule_CallReplyArrayElement(RedisModuleCallReply *reply, size_t idx); + +Return the 'idx'-th nested call reply element of an array reply, or NULL +if the reply type is wrong or the index is out of range. + +## `RedisModule_CallReplyInteger` + + long long RedisModule_CallReplyInteger(RedisModuleCallReply *reply); + +Return the long long of an integer reply. + +## `RedisModule_CallReplyStringPtr` + + const char *RedisModule_CallReplyStringPtr(RedisModuleCallReply *reply, size_t *len); + +Return the pointer and length of a string or error reply. + +## `RedisModule_CreateStringFromCallReply` + + RedisModuleString *RedisModule_CreateStringFromCallReply(RedisModuleCallReply *reply); + +Return a new string object from a call reply of type string, error or +integer. Otherwise (wrong reply type) return NULL. + +## `RedisModule_Call` + + RedisModuleCallReply *RedisModule_Call(RedisModuleCtx *ctx, const char *cmdname, const char *fmt, ...); + +Exported API to call any Redis command from modules. +On success a RedisModuleCallReply object is returned, otherwise +NULL is returned and errno is set to the following values: + +EINVAL: command non existing, wrong arity, wrong format specifier. +EPERM: operation in Cluster instance with key in non local slot. + +## `RedisModule_CallReplyProto` + + const char *RedisModule_CallReplyProto(RedisModuleCallReply *reply, size_t *len); + +Return a pointer, and a length, to the protocol returned by the command +that returned the reply object. + +## `RedisModule_CreateDataType` + + moduleType *RedisModule_CreateDataType(RedisModuleCtx *ctx, const char *name, int encver, void *typemethods_ptr); + +Register a new data type exported by the module. The parameters are the +following. Please for in depth documentation check the modules API +documentation, especially the TYPES.md file. + +* **name**: A 9 characters data type name that MUST be unique in the Redis + Modules ecosystem. Be creative... and there will be no collisions. Use + the charset A-Z a-z 9-0, plus the two "-_" characters. A good + idea is to use, for example `-`. For example + "tree-AntZ" may mean "Tree data structure by @antirez". To use both + lower case and upper case letters helps in order to prevent collisions. +* **encver**: Encoding version, which is, the version of the serialization + that a module used in order to persist data. As long as the "name" + matches, the RDB loading will be dispatched to the type callbacks + whatever 'encver' is used, however the module can understand if + the encoding it must load are of an older version of the module. + For example the module "tree-AntZ" initially used encver=0. Later + after an upgrade, it started to serialize data in a different format + and to register the type with encver=1. However this module may + still load old data produced by an older version if the rdb_load + callback is able to check the encver value and act accordingly. + The encver must be a positive value between 0 and 1023. +* **typemethods_ptr** is a pointer to a RedisModuleTypeMethods structure + that should be populated with the methods callbacks and structure + version, like in the following example: + + RedisModuleTypeMethods tm = { + .version = REDISMODULE_TYPE_METHOD_VERSION, + .rdb_load = myType_RDBLoadCallBack, + .rdb_save = myType_RDBSaveCallBack, + .aof_rewrite = myType_AOFRewriteCallBack, + .free = myType_FreeCallBack, + + // Optional fields + .digest = myType_DigestCallBack, + .mem_usage = myType_MemUsageCallBack, + } + +* **rdb_load**: A callback function pointer that loads data from RDB files. +* **rdb_save**: A callback function pointer that saves data to RDB files. +* **aof_rewrite**: A callback function pointer that rewrites data as commands. +* **digest**: A callback function pointer that is used for `DEBUG DIGEST`. +* **free**: A callback function pointer that can free a type value. + +The **digest* and **mem_usage** methods should currently be omitted since +they are not yet implemented inside the Redis modules core. + +Note: the module name "AAAAAAAAA" is reserved and produces an error, it +happens to be pretty lame as well. + +If there is already a module registering a type with the same name, +and if the module name or encver is invalid, NULL is returned. +Otherwise the new type is registered into Redis, and a reference of +type RedisModuleType is returned: the caller of the function should store +this reference into a gobal variable to make future use of it in the +modules type API, since a single module may register multiple types. +Example code fragment: + + static RedisModuleType *BalancedTreeType; + + int RedisModule_OnLoad(RedisModuleCtx *ctx) { + // some code here ... + BalancedTreeType = RM_CreateDataType(...); + } + +## `RedisModule_ModuleTypeSetValue` + + int RedisModule_ModuleTypeSetValue(RedisModuleKey *key, moduleType *mt, void *value); + +If the key is open for writing, set the specified module type object +as the value of the key, deleting the old value if any. +On success `REDISMODULE_OK` is returned. If the key is not open for +writing or there is an active iterator, `REDISMODULE_ERR` is returned. + +## `RedisModule_ModuleTypeGetType` + + moduleType *RedisModule_ModuleTypeGetType(RedisModuleKey *key); + +Assuming `RedisModule_KeyType()` returned `REDISMODULE_KEYTYPE_MODULE` on +the key, returns the moduel type pointer of the value stored at key. + +If the key is NULL, is not associated with a module type, or is empty, +then NULL is returned instead. + +## `RedisModule_ModuleTypeGetValue` + + void *RedisModule_ModuleTypeGetValue(RedisModuleKey *key); + +Assuming `RedisModule_KeyType()` returned `REDISMODULE_KEYTYPE_MODULE` on +the key, returns the module type low-level value stored at key, as +it was set by the user via `RedisModule_ModuleTypeSet()`. + +If the key is NULL, is not associated with a module type, or is empty, +then NULL is returned instead. + +## `RedisModule_SaveUnsigned` + + void RedisModule_SaveUnsigned(RedisModuleIO *io, uint64_t value); + +Save an unsigned 64 bit value into the RDB file. This function should only +be called in the context of the rdb_save method of modules implementing new +data types. + +## `RedisModule_LoadUnsigned` + + uint64_t RedisModule_LoadUnsigned(RedisModuleIO *io); + +Load an unsigned 64 bit value from the RDB file. This function should only +be called in the context of the rdb_load method of modules implementing +new data types. + +## `RedisModule_SaveSigned` + + void RedisModule_SaveSigned(RedisModuleIO *io, int64_t value); + +Like `RedisModule_SaveUnsigned()` but for signed 64 bit values. + +## `RedisModule_LoadSigned` + + int64_t RedisModule_LoadSigned(RedisModuleIO *io); + +Like `RedisModule_LoadUnsigned()` but for signed 64 bit values. + +## `RedisModule_SaveString` + + void RedisModule_SaveString(RedisModuleIO *io, RedisModuleString *s); + +In the context of the rdb_save method of a module type, saves a +string into the RDB file taking as input a RedisModuleString. + +The string can be later loaded with `RedisModule_LoadString()` or +other Load family functions expecting a serialized string inside +the RDB file. + +## `RedisModule_SaveStringBuffer` + + void RedisModule_SaveStringBuffer(RedisModuleIO *io, const char *str, size_t len); + +Like `RedisModule_SaveString()` but takes a raw C pointer and length +as input. + +## `RedisModule_LoadString` + + RedisModuleString *RedisModule_LoadString(RedisModuleIO *io); + +In the context of the rdb_load method of a module data type, loads a string +from the RDB file, that was previously saved with `RedisModule_SaveString()` +functions family. + +The returned string is a newly allocated RedisModuleString object, and +the user should at some point free it with a call to `RedisModule_FreeString()`. + +If the data structure does not store strings as RedisModuleString objects, +the similar function `RedisModule_LoadStringBuffer()` could be used instead. + +## `RedisModule_LoadStringBuffer` + + char *RedisModule_LoadStringBuffer(RedisModuleIO *io, size_t *lenptr); + +Like `RedisModule_LoadString()` but returns an heap allocated string that +was allocated with `RedisModule_Alloc()`, and can be resized or freed with +`RedisModule_Realloc()` or `RedisModule_Free()`. + +The size of the string is stored at '*lenptr' if not NULL. +The returned string is not automatically NULL termianted, it is loaded +exactly as it was stored inisde the RDB file. + +## `RedisModule_SaveDouble` + + void RedisModule_SaveDouble(RedisModuleIO *io, double value); + +In the context of the rdb_save method of a module data type, saves a double +value to the RDB file. The double can be a valid number, a NaN or infinity. +It is possible to load back the value with `RedisModule_LoadDouble()`. + +## `RedisModule_LoadDouble` + + double RedisModule_LoadDouble(RedisModuleIO *io); + +In the context of the rdb_save method of a module data type, loads back the +double value saved by `RedisModule_SaveDouble()`. + +## `RedisModule_SaveFloat` + + void RedisModule_SaveFloat(RedisModuleIO *io, float value); + +In the context of the rdb_save method of a module data type, saves a float +value to the RDB file. The float can be a valid number, a NaN or infinity. +It is possible to load back the value with `RedisModule_LoadFloat()`. + +## `RedisModule_LoadFloat` + + float RedisModule_LoadFloat(RedisModuleIO *io); + +In the context of the rdb_save method of a module data type, loads back the +float value saved by `RedisModule_SaveFloat()`. + +## `RedisModule_DigestAddStringBuffer` + + void RedisModule_DigestAddStringBuffer(RedisModuleDigest *md, unsigned char *ele, size_t len); + +Add a new element to the digest. This function can be called multiple times +one element after the other, for all the elements that constitute a given +data structure. The function call must be followed by the call to +``RedisModule_DigestEndSequence`` eventually, when all the elements that are +always in a given order are added. See the Redis Modules data types +documentation for more info. However this is a quick example that uses Redis +data types as an example. + +To add a sequence of unordered elements (for example in the case of a Redis +Set), the pattern to use is: + + foreach element { + AddElement(element); + EndSequence(); + } + +Because Sets are not ordered, so every element added has a position that +does not depend from the other. However if instead our elements are +ordered in pairs, like field-value pairs of an Hash, then one should +use: + + foreach key,value { + AddElement(key); + AddElement(value); + EndSquence(); + } + +Because the key and value will be always in the above order, while instead +the single key-value pairs, can appear in any position into a Redis hash. + +A list of ordered elements would be implemented with: + + foreach element { + AddElement(element); + } + EndSequence(); + +## `RedisModule_DigestAddLongLong` + + void RedisModule_DigestAddLongLong(RedisModuleDigest *md, long long ll); + +Like ``RedisModule_DigestAddStringBuffer()`` but takes a long long as input +that gets converted into a string before adding it to the digest. + +## `RedisModule_DigestEndSequence` + + void RedisModule_DigestEndSequence(RedisModuleDigest *md); + +See the doucmnetation for ``RedisModule_DigestAddElement()``. + +## `RedisModule_EmitAOF` + + void RedisModule_EmitAOF(RedisModuleIO *io, const char *cmdname, const char *fmt, ...); + +Emits a command into the AOF during the AOF rewriting process. This function +is only called in the context of the aof_rewrite method of data types exported +by a module. The command works exactly like `RedisModule_Call()` in the way +the parameters are passed, but it does not return anything as the error +handling is performed by Redis itself. + +## `RedisModule_LogRaw` + + void RedisModule_LogRaw(RedisModule *module, const char *levelstr, const char *fmt, va_list ap); + +This is the low level function implementing both: + + RM_Log() + RM_LogIOError() + +## `RedisModule_Log` + + void RedisModule_Log(RedisModuleCtx *ctx, const char *levelstr, const char *fmt, ...); + +Produces a log message to the standard Redis log, the format accepts +printf-alike specifiers, while level is a string describing the log +level to use when emitting the log, and must be one of the following: + +* "debug" +* "verbose" +* "notice" +* "warning" + +If the specified log level is invalid, verbose is used by default. +There is a fixed limit to the length of the log line this function is able +to emit, this limti is not specified but is guaranteed to be more than +a few lines of text. + +## `RedisModule_LogIOError` + + void RedisModule_LogIOError(RedisModuleIO *io, const char *levelstr, const char *fmt, ...); + +Log errors from RDB / AOF serialization callbacks. + +This function should be used when a callback is returning a critical +error to the caller since cannot load or save the data for some +critical reason. + +## `RedisModule_BlockClient` + + RedisModuleBlockedClient *RedisModule_BlockClient(RedisModuleCtx *ctx, RedisModuleCmdFunc reply_callback, RedisModuleCmdFunc timeout_callback, void (*free_privdata)(void*), long long timeout_ms); + +Block a client in the context of a blocking command, returning an handle +which will be used, later, in order to block the client with a call to +`RedisModule_UnblockClient()`. The arguments specify callback functions +and a timeout after which the client is unblocked. + +The callbacks are called in the following contexts: + + reply_callback: called after a successful RedisModule_UnblockClient() + call in order to reply to the client and unblock it. + + reply_timeout: called when the timeout is reached in order to send an + error to the client. + + free_privdata: called in order to free the privata data that is passed + by RedisModule_UnblockClient() call. + +## `RedisModule_UnblockClient` + + int RedisModule_UnblockClient(RedisModuleBlockedClient *bc, void *privdata); + +Unblock a client blocked by ``RedisModule_BlockedClient``. This will trigger +the reply callbacks to be called in order to reply to the client. +The 'privdata' argument will be accessible by the reply callback, so +the caller of this function can pass any value that is needed in order to +actually reply to the client. + +A common usage for 'privdata' is a thread that computes something that +needs to be passed to the client, included but not limited some slow +to compute reply or some reply obtained via networking. + +Note: this function can be called from threads spawned by the module. + +## `RedisModule_AbortBlock` + + int RedisModule_AbortBlock(RedisModuleBlockedClient *bc); + +Abort a blocked client blocking operation: the client will be unblocked +without firing the reply callback. + +## `RedisModule_IsBlockedReplyRequest` + + int RedisModule_IsBlockedReplyRequest(RedisModuleCtx *ctx); + +Return non-zero if a module command was called in order to fill the +reply for a blocked client. + +## `RedisModule_IsBlockedTimeoutRequest` + + int RedisModule_IsBlockedTimeoutRequest(RedisModuleCtx *ctx); + +Return non-zero if a module command was called in order to fill the +reply for a blocked client that timed out. + +## `RedisModule_GetBlockedClientPrivateData` + + void *RedisModule_GetBlockedClientPrivateData(RedisModuleCtx *ctx); + +Get the privata data set by `RedisModule_UnblockClient()` + +## `RedisModule_GetThreadSafeContext` + + RedisModuleCtx *RedisModule_GetThreadSafeContext(RedisModuleBlockedClient *bc); + +Return a context which can be used inside threads to make Redis context +calls with certain modules APIs. If 'bc' is not NULL then the module will +be bound to a blocked client, and it will be possible to use the +``RedisModule_Reply`*` family of functions to accumulate a reply for when the +client will be unblocked. Otherwise the thread safe context will be +detached by a specific client. + +To call non-reply APIs, the thread safe context must be prepared with: + + RedisModule_ThreadSafeCallStart(ctx); + ... make your call here ... + RedisModule_ThreadSafeCallStop(ctx); + +This is not needed when using ``RedisModule_Reply`*` functions, assuming +that a blocked client was used when the context was created, otherwise +no `RedisModule_Reply`* call should be made at all. + +TODO: thread safe contexts do not inherit the blocked client +selected database. + +## `RedisModule_FreeThreadSafeContext` + + void RedisModule_FreeThreadSafeContext(RedisModuleCtx *ctx); + +Release a thread safe context. + +## `RedisModule_ThreadSafeContextLock` + + void RedisModule_ThreadSafeContextLock(RedisModuleCtx *ctx); + +Acquire the server lock before executing a thread safe API call. +This is not needed for ``RedisModule_Reply`*` calls when there is +a blocked client connected to the thread safe context. + +## `RedisModule_ThreadSafeContextUnlock` + + void RedisModule_ThreadSafeContextUnlock(RedisModuleCtx *ctx); + +Release the server lock after a thread safe API call was executed. + diff --git a/topics/modules-blocking-ops.md b/topics/modules-blocking-ops.md new file mode 100644 index 0000000000..d4f3c93bc8 --- /dev/null +++ b/topics/modules-blocking-ops.md @@ -0,0 +1,265 @@ +Blocking commands in Redis modules +=== + +Redis has a few blocking commands among the built-in set of commands. +One of the most used is `BLPOP` (or the symmetric `BRPOP`) which blocks +waiting for elements arriving in a list. + +The interesting fact about blocking commands is that they do not block +the whole server, but just the client calling them. Usually the reason to +block is that we expect some external event to happen: this can be +some change in the Redis data structures like in the `BLPOP` case, a +long computation happening in a thread, to receive some data from the +network, and so forth. + +Redis modules have the ability to implement blocking commands as well, +this documentation shows how the API works and describes a few patterns +that can be used in order to model blocking commands. + +How blocking and resuming works. +--- + +_Note: You may want to check the `helloblock.c` example in the Redis source tree +inside the `src/modules` directory, for a simple to understand example +on how the blocking API is applied._ + +In Redis modules, commands are implemented by callback functions that +are invoked by the Redis core when the specific command is called +by the user. Normally the callback terminates its execution sending +some reply to the client. Using the following function instead, the +function implementing the module command may request that the client +is put into the blocked state: + + RedisModuleBlockedClient *RedisModule_BlockClient(RedisModuleCtx *ctx, RedisModuleCmdFunc reply_callback, RedisModuleCmdFunc timeout_callback, void (*free_privdata)(void*), long long timeout_ms); + +The function returns a `RedisModuleBlockedClient` object, which is later +used in order to unblock the client. The arguments have the following +meaning: + +* `ctx` is the command execution context as usually in the rest of the API. +* `reply_callback` is the callback, having the same prototype of a normal command function, that is called when the client is unblocked in order to return a reply to the client. +* `timeout_callback` is the callback, having the same prototype of a normal command function that is called when the client reached the `ms` timeout. +* `free_privdata` is the callback that is called in order to free the private data. Private data is a pointer to some data that is passed between the API used to unblock the client, to the callback that will send the reply to the client. We'll see how this mechanism works later in this document. +* `ms` is the timeout in milliseconds. When the timeout is reached, the timeout callback is called and the client is automatically aborted. + +Once a client is blocked, it can be unblocked with the following API: + + int RedisModule_UnblockClient(RedisModuleBlockedClient *bc, void *privdata); + +The function takes as argument the blocked client object returned by +the previous call to `RedisModule_BlockClient()`, and unblock the client. +Immediately before the client gets unblocked, the `reply_callback` function +specified when the client was blocked is called: this function will +have access to the `privdata` pointer used here. + +IMPORTANT: The above function is thread safe, and can be called from within +a thread doing some work in order to implement the command that blocked +the client. + +The `privdata` data will be freed automatically using the `free_privdata` +callback when the client is unblocked. This is useful **since the reply +callback may never be called** in case the client timeouts or disconnects +from the server, so it's important that it's up to an external function +to have the responsibility to free the data passed if needed. + +To better understand how the API works, we can imagine writing a command +that blocks a client for one second, and then send as reply "Hello!". + +Note: arity checks and other non important things are not implemented +int his command, in order to take the example simple. + + int Example_RedisCommand(RedisModuleCtx *ctx, RedisModuleString **argv, + int argc) + { + RedisModuleBlockedClient *bc = + RedisModule_BlockClient(ctx,reply_func,timeout_func,NULL,0); + + pthread_t tid; + pthread_create(&tid,NULL,threadmain,bc); + + return REDISMODULE_OK; + } + + void *threadmain(void *arg) { + RedisModuleBlockedClient *bc = arg; + + sleep(1); /* Wait one second and unblock. */ + RedisModule_UnblockClient(bc,NULL); + } + +The above command blocks the client ASAP, spawining a thread that will +wait a second and will unblock the client. Let's check the reply and +timeout callbacks, which are in our case very similar, since they +just reply the client with a different reply type. + + int reply_func(RedisModuleCtx *ctx, RedisModuleString **argv, + int argc) + { + return RedisModule_ReplyWithSimpleString(ctx,"Hello!"); + } + + int timeout_func(RedisModuleCtx *ctx, RedisModuleString **argv, + int argc) + { + return RedisModule_ReplyWithNull(ctx); + } + +The reply callback just sends the "Hello!" string to the client. +The important bit here is that the reply callback is called when the +client is unblocked from the thread. + +The timeout command returns `NULL`, as it often happens with actual +Redis blocking commands timing out. + +Passing reply data when unblocking +--- + +The above example is simple to understand but lacks an important +real world aspect of an actual blocking command implementation: often +the reply function will need to know what to reply to the client, +and this information is often provided as the client is unblocked. + +We could modify the above example so that the thread generates a +random number after waiting one second. You can think at it as an +actually expansive operation of some kind. Then this random number +can be passed to the reply function so that we return it to the command +caller. In order to make this working, we modify the functions as follow: + + void *threadmain(void *arg) { + RedisModuleBlockedClient *bc = arg; + + sleep(1); /* Wait one second and unblock. */ + + long *mynumber = RedisModule_Alloc(sizeof(long)); + *mynumber = rand(); + RedisModule_UnblockClient(bc,mynumber); + } + +As you can see, now the unblocking call is passing some private data, +that is the `mynumber` pointer, to the reply callback. In order to +obtain this private data, the reply callback will use the following +fnuction: + + void *RedisModule_GetBlockedClientPrivateData(RedisModuleCtx *ctx); + +So our reply callback is modified like that: + + int reply_func(RedisModuleCtx *ctx, RedisModuleString **argv, + int argc) + { + long *mynumber = RedisModule_GetBlockedClientPrivateData(ctx); + /* IMPORTANT: don't free mynumber here, but in the + * free privdata callback. */ + return RedisModule_ReplyWithLongLong(ctx,mynumber); + } + +Note that we also need to pass a `free_privdata` function when blocking +the client with `RedisModule_BlockClient()`, since the allocated +long value must be freed. Our callback will look like the following: + + void free_privdata(void *privdata) { + RedisModule_Free(privdata); + } + +NOTE: It is important to stress that the private data is best freed in the +`free_privdata` callback becaues the reply function may not be called +if the client disconnects or timeout. + +Also note that the private data is also accessible from the timeout +callback, always using the `GetBlockedClientPrivateData()` API. + +Aborting the blocking of a client +--- + +One problem that sometimes arises is that we need to allocate resources +in order to implement the non blocking command. So we block the client, +then, for example, try to create a thread, but the thread creation function +returns an error. What to do in such a condition in order to recover? We +don't want to take the client blocked, nor we want to call `UnblockClient()` +because this will trigger the reply callback to be called. + +In this case the best thing to do is to use the following function: + + int RedisModule_AbortBlock(RedisModuleBlockedClient *bc); + +Practically this is how to use it: + + int Example_RedisCommand(RedisModuleCtx *ctx, RedisModuleString **argv, + int argc) + { + RedisModuleBlockedClient *bc = + RedisModule_BlockClient(ctx,reply_func,timeout_func,NULL,0); + + pthread_t tid; + if (pthread_create(&tid,NULL,threadmain,bc) != 0) { + RedisModule_AbortBlock(bc); + RedisModule_ReplyWithError(ctx,"Sorry can't create a thread"); + } + + return REDISMODULE_OK; + } + +The client will be unblocked but the reply callback will not be called. + +Implementing the command, reply and timeout callback using a single function +--- + +The following functions can be used in order to implement the reply and +callback with the same function that implements the primary command +function: + + int RedisModule_IsBlockedReplyRequest(RedisModuleCtx *ctx); + int RedisModule_IsBlockedTimeoutRequest(RedisModuleCtx *ctx); + +So I could rewrite the example command without using a separated +reply and timeout callback: + + int Example_RedisCommand(RedisModuleCtx *ctx, RedisModuleString **argv, + int argc) + { + if (RedisModule_IsBlockedReplyRequest(ctx)) { + long *mynumber = RedisModule_GetBlockedClientPrivateData(ctx); + return RedisModule_ReplyWithLongLong(ctx,mynumber); + } else if (RedisModule_IsBlockedTimeoutRequest) { + return RedisModule_ReplyWithNull(ctx); + } + + RedisModuleBlockedClient *bc = + RedisModule_BlockClient(ctx,reply_func,timeout_func,NULL,0); + + pthread_t tid; + if (pthread_create(&tid,NULL,threadmain,bc) != 0) { + RedisModule_AbortBlock(bc); + RedisModule_ReplyWithError(ctx,"Sorry can't create a thread"); + } + + return REDISMODULE_OK; + } + +Functionally is the same but there are people that will prefer the less +verbose implementation that concentrates most of the command logic in a +single function. + +Working on copies of data inside a thread +--- + +An interesting pattern in order to work with threads implementing the +slow part of a command, is to work with a copy of the data, so that +while some operation is performed in a key, the user continues to see +the old version. However when the thread terminated its work, the +representations are swapped and the new, processed version, is used. + +An example of this approach is the +[Neural Redis module](https://github.com/antirez/neural-redis) +where neural networks are trained in different threads while the +user can still execute and inspect their older versions. + +Future work +--- + +An API is work in progress right now in order to allow Redis modules APIs +to be called in a safe way from threads, so that the threaded command +can access the data space and do incremental operations. + +There is no ETA for this feature but it may appear in the course of the +Redis 4.0 release at some point. diff --git a/topics/modules-intro.md b/topics/modules-intro.md new file mode 100644 index 0000000000..3ac6a46732 --- /dev/null +++ b/topics/modules-intro.md @@ -0,0 +1,857 @@ +Redis Modules: an introduction to the API +=== + +The modules documentation is composed of the following files: + +* `INTRO.md` (this file). An overview about Redis Modules system and API. It's a good idea to start your reading here. +* `API.md` is generated from module.c top comments of RedisMoule functions. It is a good reference in order to understand how each function works. +* `TYPES.md` covers the implementation of native data types into modules. +* `BLOCK.md` shows how to write blocking commands that will not reply immediately, but will block the client, without blocking the Redis server, and will provide a reply whenever will be possible. + +Redis modules make possible to extend Redis functionality using external +modules, implementing new Redis commands at a speed and with features +similar to what can be done inside the core itself. + +Redis modules are dynamic libraries, that can be loaded into Redis at +startup or using the `MODULE LOAD` command. Redis exports a C API, in the +form of a single C header file called `redismodule.h`. Modules are meant +to be written in C, however it will be possible to use C++ or other languages +that have C binding functionalities. + +Modules are designed in order to be loaded into different versions of Redis, +so a given module does not need to be designed, or recompiled, in order to +run with a specific version of Redis. For this reason, the module will +register to the Redis core using a specific API version. The current API +version is "1". + +This document is about an alpha version of Redis modules. API, functionalities +and other details may change in the future. + +# Loading modules + +In order to test the module you are developing, you can load the module +using the following `redis.conf` configuration directive: + + loadmodule /path/to/mymodule.so + +It is also possible to load a module at runtime using the following command: + + MODULE LOAD /path/to/mymodule.so + +In order to list all loaded modules, use: + + MODULE LIST + +Finally, you can unload (and later reload if you wish) a module using the +following command: + + MODULE UNLOAD mymodule + +Note that `mymodule` above is not the filename without the `.so` suffix, but +instead, the name the module used to register itself into the Redis core. +The name can be obtained using `MODULE LIST`. However it is good practice +that the filename of the dynamic library is the same as the name the module +uses to register itself into the Redis core. + +# The simplest module you can write + +In order to show the different parts of a module, here we'll show a very +simple module that implements a command that outputs a random number. + + #include "redismodule.h" + #include + + int HelloworldRand_RedisCommand(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { + RedisModule_ReplyWithLongLong(ctx,rand()); + return REDISMODULE_OK; + } + + int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { + if (RedisModule_Init(ctx,"helloworld",1,REDISMODULE_APIVER_1) + == REDISMODULE_ERR) return REDISMODULE_ERR; + + if (RedisModule_CreateCommand(ctx,"helloworld.rand", + HelloworldRand_RedisCommand) == REDISMODULE_ERR) + return REDISMODULE_ERR; + + return REDISMODULE_OK; + } + +The example module has two functions. One implements a command called +HELLOWORLD.RAND. This function is specific of that module. However the +other function called `RedisModule_OnLoad()` must be present in each +Redis module. It is the entry point for the module to be initialized, +register its commands, and potentially other private data structures +it uses. + +Note that it is a good idea for modules to call commands with the +name of the module followed by a dot, and finally the command name, +like in the case of `HELLOWORLD.RAND`. This way it is less likely to +have collisions. + +Note that if different modules have colliding commands, they'll not be +able to work in Redis at the same time, since the function +`RedisModule_CreateCommand` will fail in one of the modules, so the module +loading will abort returning an error condition. + +# Module initialization + +The above example shows the usage of the function `RedisModule_Init()`. +It should be the first function called by the module `OnLoad` function. +The following is the function prototype: + + int RedisModule_Init(RedisModuleCtx *ctx, const char *modulename, + int module_version, int api_version); + +The `Init` function announces the Redis core that the module has a given +name, its version (that is reported by `MODULE LIST`), and that is willing +to use a specific version of the API. + +If the API version is wrong, the name is already taken, or there are other +similar errors, the function will return `REDISMODULE_ERR`, and the module +`OnLoad` function should return ASAP with an error. + +Before the `Init` function is called, no other API function can be called, +otherwise the module will segfault and the Redis instance will crash. + +The second function called, `RedisModule_CreateCommand`, is used in order +to register commands into the Redis core. The following is the prototype: + + int RedisModule_CreateCommand(RedisModuleCtx *ctx, const char *cmdname, + RedisModuleCmdFunc cmdfunc); + +As you can see, most Redis modules API calls all take as first argument +the `context` of the module, so that they have a reference to the module +calling it, to the command and client executing a given command, and so forth. + +To create a new command, the above function needs the context, the command +name, and the function pointer of the function implementing the command, +which must have the following prototype: + + + int mycommand(RedisModuleCtx *ctx, RedisModuleString **argv, int argc); + +The command function arguments are just the context, that will be passed +to all the other API calls, the command argument vector, and total number +of arguments, as passed by the user. + +As you can see, the arguments are provided as pointers to a specific data +type, the `RedisModuleString`. This is an opaque data type you have API +functions to access and use, direct access to its fields is never needed. + +Zooming into the example command implementation, we can find another call: + + int RedisModule_ReplyWithLongLong(RedisModuleCtx *ctx, long long integer); + +This function returns an integer to the client that invoked the command, +exactly like other Redis commands do, like for example `INCR` or `SCARD`. + +# Setup and dependencies of a Redis module + +Redis modules don't depend on Redis or some other library, nor they +need to be compiled with a specific `redismodule.h` file. In order +to create a new module, just copy a recent version of `redismodule.h` +in your source tree, link all the libraries you want, and create +a dynamic library having the `RedisModule_OnLoad()` function symbol +exported. + +The module will be able to load into different versions of Redis. + +# Passing configuration parameters to Redis modules + +When the module is loaded with the `MODULE LOAD` command, or using the +`loadmodule` directive in the `redis.conf` file, the user is able to pass +configuration parameters to the module by adding arguments after the module +file name: + + loadmodule mymodule.so foo bar 1234 + +In the above example the strings `foo`, `bar` and `123` will be passed +to the module `OnLoad()` function in the `argv` argument as an array +of RedisModuleString pointers. The number of arguments passed is into `argc`. + +The way you can access those strings will be explained in the rest of this +document. Normally the module will store the module configuration parameters +in some `static` global variable that can be accessed module wide, so that +the configuration can change the behavior of different commands. + +# Working with RedisModuleString objects + +The command argument vector `argv` passed to module commands, and the +return value of other module APIs functions, are of type `RedisModuleString`. + +Usually you directly pass module strings to other API calls, however sometimes +you may need to directly access the string object. + +There are a few functions in order to work with string objects: + + const char *RedisModule_StringPtrLen(RedisModuleString *string, size_t *len); + +The above function accesses a string by returning its pointer and setting its +length in `len`. +You should never write to a string object pointer, as you can see from the +`const` pointer qualifier. + +However, if you want, you can create new string objects using the following +API: + + RedisModuleString *RedisModule_CreateString(RedisModuleCtx *ctx, const char *ptr, size_t len); + +The string returned by the above command must be freed using a corresponding +call to `RedisModule_FreeString()`: + + void RedisModule_FreeString(RedisModuleString *str); + +However if you want to avoid having to free strings, the automatic memory +management, covered later in this document, can be a good alternative, by +doing it for you. + +Note that the strings provided via the argument vector `argv` never need +to be freed. You only need to free new strings you create, or new strings +returned by other APIs, where it is specified that the returned string must +be freed. + +## Creating strings from numbers or parsing strings as numbers + +Creating a new string from an integer is a very common operation, so there +is a function to do this: + + RedisModuleString *mystr = RedisModule_CreateStringFromLongLong(ctx,10); + +Similarly in order to parse a string as a number: + + long long myval; + if (RedisModule_StringToLongLong(ctx,argv[1],&myval) == REDISMODULE_OK) { + /* Do something with 'myval' */ + } + +## Accessing Redis keys from modules + +Most Redis modules, in order to be useful, have to interact with the Redis +data space (this is not always true, for example an ID generator may +never touch Redis keys). Redis modules have two different APIs in order to +access the Redis data space, one is a low level API that provides very +fast access and a set of functions to manipulate Redis data structures. +The other API is more high level, and allows to call Redis commands and +fetch the result, similarly to how Lua scripts access Redis. + +The high level API is also useful in order to access Redis functionalities +that are not available as APIs. + +In general modules developers should prefer the low level API, because commands +implemented using the low level API run at a speed comparable to the speed +of native Redis commands. However there are definitely use cases for the +higher level API. For example often the bottleneck could be processing the +data and not accessing it. + +Also note that sometimes using the low level API is not harder compared to +the higher level one. + +# Calling Redis commands + +The high level API to access Redis is the sum of the `RedisModule_Call()` +function, together with the functions needed in order to access the +reply object returned by `Call()`. + +`RedisModule_Call` uses a special calling convention, with a format specifier +that is used to specify what kind of objects you are passing as arguments +to the function. + +Redis commands are invoked just using a command name and a list of arguments. +However when calling commands, the arguments may originate from different +kind of strings: null-terminated C strings, RedisModuleString objects as +received from the `argv` parameter in the command implementation, binary +safe C buffers with a pointer and a length, and so forth. + +For example if I want to call `INCRBY` using a first argument (the key) +a string received in the argument vector `argv`, which is an array +of RedisModuleString object pointers, and a C string representing the +number "10" as second argument (the increment), I'll use the following +function call: + + RedisModuleCallReply *reply; + reply = RedisModule_Call(ctx,"INCR","sc",argv[1],"10"); + +The first argument is the context, and the second is always a null terminated +C string with the command name. The third argument is the format specifier +where each character corresponds to the type of the arguments that will follow. +In the above case `"sc"` means a RedisModuleString object, and a null +terminated C string. The other arguments are just the two arguments as +specified. In fact `argv[1]` is a RedisModuleString and `"10"` is a null +terminated C string. + +This is the full list of format specifiers: + +* **c** -- Null terminated C string pointer. +* **b** -- C buffer, two arguments needed: C string pointer and `size_t` length. +* **s** -- RedisModuleString as received in `argv` or by other Redis module APIs returning a RedisModuleString object. +* **l** -- Long long integer. +* **v** -- Array of RedisModuleString objects. +* **!** -- This modifier just tells the function to replicate the command to slaves and AOF. It is ignored from the point of view of arguments parsing. + +The function returns a `RedisModuleCallReply` object on success, on +error NULL is returned. + +NULL is returned when the command name is invalid, the format specifier uses +characters that are not recognized, or when the command is called with the +wrong number of arguments. In the above cases the `errno` var is set to `EINVAL`. NULL is also returned when, in an instance with Cluster enabled, the target +keys are about non local hash slots. In this case `errno` is set to `EPERM`. + +## Working with RedisModuleCallReply objects. + +`RedisModuleCall` returns reply objects that can be accessed using the +`RedisModule_CallReply*` family of functions. + +In order to obtain the type or reply (corresponding to one of the data types +supported by the Redis protocol), the function `RedisModule_CallReplyType()` +is used: + + reply = RedisModule_Call(ctx,"INCR","sc",argv[1],"10"); + if (RedisModule_CallReplyType(reply) == REDISMODULE_REPLY_INTEGER) { + long long myval = RedisModule_CallReplyInteger(reply); + /* Do something with myval. */ + } + +Valid reply types are: + +* `REDISMODULE_REPLY_STRING` Bulk string or status replies. +* `REDISMODULE_REPLY_ERROR` Errors. +* `REDISMODULE_REPLY_INTEGER` Signed 64 bit integers. +* `REDISMODULE_REPLY_ARRAY` Array of replies. +* `REDISMODULE_REPLY_NULL` NULL reply. + +Strings, errors and arrays have an associated length. For strings and errors +the length corresponds to the length of the string. For arrays the length +is the number of elements. To obtain the reply length the following function +is used: + + size_t reply_len = RedisModule_CallReplyLength(reply); + +In order to obtain the value of an integer reply, the following function is used, as already shown in the example above: + + long long reply_integer_val = RedisModule_CallReplyInteger(reply); + +Called with a reply object of the wrong type, the above function always +returns `LLONG_MIN`. + +Sub elements of array replies are accessed this way: + + RedisModuleCallReply *subreply; + subreply = RedisModule_CallReplyArrayElement(reply,idx); + +The above function returns NULL if you try to access out of range elements. + +Strings and errors (which are like strings but with a different type) can +be accessed using in the following way, making sure to never write to +the resulting pointer (that is returned as as `const` pointer so that +misusing must be pretty explicit): + + size_t len; + char *ptr = RedisModule_CallReplyStringPtr(reply,&len); + +If the reply type is not a string or an error, NULL is returned. + +RedisCallReply objects are not the same as module string objects +(RedisModuleString types). However sometimes you may need to pass replies +of type string or integer, to API functions expecting a module string. + +When this is the case, you may want to evaluate if using the low level +API could be a simpler way to implement your command, or you can use +the following function in order to create a new string object from a +call reply of type string, error or integer: + + RedisModuleString *mystr = RedisModule_CreateStringFromCallReply(myreply); + +If the reply is not of the right type, NULL is returned. +The returned string object should be released with `RedisModule_FreeString()` +as usually, or by enabling automatic memory management (see corresponding +section). + +# Releasing call reply objects + +Reply objects must be freed using `RedisModule_FreeCallReply`. For arrays, +you need to free only the top level reply, not the nested replies. +Currently the module implementation provides a protection in order to avoid +crashing if you free a nested reply object for error, however this feature +is not guaranteed to be here forever, so should not be considered part +of the API. + +If you use automatic memory management (explained later in this document) +you don't need to free replies (but you still could if you wish to release +memory ASAP). + +## Returning values from Redis commands + +Like normal Redis commands, new commands implemented via modules must be +able to return values to the caller. The API exports a set of functions for +this goal, in order to return the usual types of the Redis protocol, and +arrays of such types as elemented. Also errors can be returned with any +error string and code (the error code is the initial uppercase letters in +the error message, like the "BUSY" string in the "BUSY the sever is busy" error +message). + +All the functions to send a reply to the client are called +`RedisModule_ReplyWith`. + +To return an error, use: + + RedisModule_ReplyWithError(RedisModuleCtx *ctx, const char *err); + +There is a predefined error string for key of wrong type errors: + + REDISMODULE_ERRORMSG_WRONGTYPE + +Example usage: + + RedisModule_ReplyWithError(ctx,"ERR invalid arguments"); + +We already saw how to reply with a long long in the examples above: + + RedisModule_ReplyWithLongLong(ctx,12345); + +To reply with a simple string, that can't contain binary values or newlines, +(so it's suitable to send small words, like "OK") we use: + + RedisModule_ReplyWithSimpleString(ctx,"OK"); + +It's possible to reply with "bulk strings" that are binary safe, using +two different functions: + + int RedisModule_ReplyWithStringBuffer(RedisModuleCtx *ctx, const char *buf, size_t len); + + int RedisModule_ReplyWithString(RedisModuleCtx *ctx, RedisModuleString *str); + +The first function gets a C pointer and length. The second a RedisMoudleString +object. Use one or the other depending on the source type you have at hand. + +In order to reply with an array, you just need to use a function to emit the +array length, followed by as many calls to the above functions as the number +of elements of the array are: + + RedisModule_ReplyWithArray(ctx,2); + RedisModule_ReplyWithStringBuffer(ctx,"age",3); + RedisModule_ReplyWithLongLong(ctx,22); + +To return nested arrays is easy, your nested array element just uses another +call to `RedisModule_ReplyWithArray()` followed by the calls to emit the +sub array elements. + +## Returning arrays with dynamic length + +Sometimes it is not possible to know beforehand the number of items of +an array. As an example, think of a Redis module implementing a FACTOR +command that given a number outputs the prime factors. Instead of +factorializing the number, storing the prime factors into an array, and +later produce the command reply, a better solution is to start an array +reply where the length is not known, and set it later. This is accomplished +with a special argument to `RedisModule_ReplyWithArray()`: + + RedisModule_ReplyWithArray(ctx, REDISMODULE_POSTPONED_ARRAY_LEN); + +The above call starts an array reply so we can use other `ReplyWith` calls +in order to produce the array items. Finally in order to set the length +se use the following call: + + RedisModule_ReplySetArrayLength(ctx, number_of_items); + +In the case of the FACTOR command, this translates to some code similar +to this: + + RedisModule_ReplyWithArray(ctx, REDISMODULE_POSTPONED_ARRAY_LEN); + number_of_factors = 0; + while(still_factors) { + RedisModule_ReplyWithLongLong(ctx, some_factor); + number_of_factors++; + } + RedisModule_ReplySetArrayLength(ctx, number_of_factors); + +Another common use case for this feature is iterating over the arrays of +some collection and only returning the ones passing some kind of filtering. + +It is possible to have multiple nested arrays with postponed reply. +Each call to `SetArray()` will set the length of the latest corresponding +call to `ReplyWithArray()`: + + RedisModule_ReplyWithArray(ctx, REDISMODULE_POSTPONED_ARRAY_LEN); + ... generate 100 elements ... + RedisModule_ReplyWithArray(ctx, REDISMODULE_POSTPONED_ARRAY_LEN); + ... generate 10 elements ... + RedisModule_ReplySetArrayLength(ctx, 10); + RedisModule_ReplySetArrayLength(ctx, 100); + +This creates a 100 items array having as last element a 10 items array. + +# Arity and type checks + +Often commands need to check that the number of arguments and type of the key +is correct. In order to report a wrong arity, there is a specific function +called `RedisModule_WrongArity()`. The usage is trivial: + + if (argc != 2) return RedisModule_WrongArity(ctx); + +Checking for the wrong type involves opening the key and checking the type: + + RedisModuleKey *key = RedisModule_OpenKey(ctx,argv[1], + REDISMODULE_READ|REDISMODULE_WRITE); + + int keytype = RedisModule_KeyType(key); + if (keytype != REDISMODULE_KEYTYPE_STRING && + keytype != REDISMODULE_KEYTYPE_EMPTY) + { + RedisModule_CloseKey(key); + return RedisModule_ReplyWithError(ctx,REDISMODULE_ERRORMSG_WRONGTYPE); + } + +Note that you often want to proceed with a command both if the key +is of the expected type, or if it's empty. + +## Low level access to keys + +Low level access to keys allow to perform operations on value objects associated +to keys directly, with a speed similar to what Redis uses internally to +implement the built-in commands. + +Once a key is opened, a key pointer is returned that will be used with all the +other low level API calls in order to perform operations on the key or its +associated value. + +Because the API is meant to be very fast, it cannot do too many run-time +checks, so the user must be aware of certain rules to follow: + +* Opening the same key multiple times where at least one instance is opened for writing, is undefined and may lead to crashes. +* While a key is open, it should only be accessed via the low level key API. For example opening a key, then calling DEL on the same key using the `RedisModule_Call()` API will result into a crash. However it is safe to open a key, perform some operation with the low level API, closing it, then using other APIs to manage the same key, and later opening it again to do some more work. + +In order to open a key the `RedisModule_OpenKey` function is used. It returns +a key pointer, that we'll use with all the next calls to access and modify +the value: + + RedisModuleKey *key; + key = RedisModule_OpenKey(ctx,argv[1],REDISMODULE_READ); + +The second argument is the key name, that must be a `RedisModuleString` object. +The third argument is the mode: `REDISMODULE_READ` or `REDISMODULE_WRITE`. +It is possible to use `|` to bitwise OR the two modes to open the key in +both modes. Currently a key opened for writing can also be accessed for reading +but this is to be considered an implementation detail. The right mode should +be used in sane modules. + +You can open non exisitng keys for writing, since the keys will be created +when an attempt to write to the key is performed. However when opening keys +just for reading, `RedisModule_OpenKey` will return NULL if the key does not +exist. + +Once you are done using a key, you can close it with: + + RedisModule_CloseKey(key); + +Note that if automatic memory management is enabled, you are not forced to +close keys. When the module function returns, Redis will take care to close +all the keys which are still open. + +## Getting the key type + +In order to obtain the value of a key, use the `RedisModule_KeyType()` function: + + int keytype = RedisModule_KeyType(key); + +It returns one of the following values: + + REDISMODULE_KEYTYPE_EMPTY + REDISMODULE_KEYTYPE_STRING + REDISMODULE_KEYTYPE_LIST + REDISMODULE_KEYTYPE_HASH + REDISMODULE_KEYTYPE_SET + REDISMODULE_KEYTYPE_ZSET + +The above are just the usual Redis key types, with the addition of an empty +type, that signals the key pointer is associated with an empty key that +does not yet exists. + +## Creating new keys + +To create a new key, open it for writing and then write to it using one +of the key writing functions. Example: + + RedisModuleKey *key; + key = RedisModule_OpenKey(ctx,argv[1],REDISMODULE_READ); + if (RedisModule_KeyType(key) == REDISMODULE_KEYTYPE_EMPTY) { + RedisModule_StringSet(key,argv[2]); + } + +## Deleting keys + +Just use: + + RedisModule_DeleteKey(key); + +The function returns `REDISMODULE_ERR` if the key is not open for writing. +Note that after a key gets deleted, it is setup in order to be targeted +by new key commands. For example `RedisModule_KeyType()` will return it is +an empty key, and writing to it will create a new key, possibly of another +type (depending on the API used). + +## Managing key expires (TTLs) + +To control key expires two functions are provided, that are able to set, +modify, get, and unset the time to live associated with a key. + +One function is used in order to query the current expire of an open key: + + mstime_t RedisModule_GetExpire(RedisModuleKey *key); + +The function returns the time to live of the key in milliseconds, or +`REDISMODULE_NO_EXPIRE` as a special value to signal the key has no associated +expire or does not exist at all (you can differentiate the two cases checking +if the key type is `REDISMODULE_KEYTYPE_EMPTY`). + +In order to change the expire of a key the following function is used instead: + + int RedisModule_SetExpire(RedisModuleKey *key, mstime_t expire); + +When called on a non existing key, `REDISMODULE_ERR` is returned, because +the function can only associate expires to existing open keys (non existing +open keys are only useful in order to create new values with data type +specific write operations). + +Again the `expire` time is specified in milliseconds. If the key has currently +no expire, a new expire is set. If the key already have an expire, it is +replaced with the new value. + +If the key has an expire, and the special value `REDISMODULE_NO_EXPIRE` is +used as a new expire, the expire is removed, similarly to the Redis +`PERSIST` command. In case the key was already persistent, no operation is +performed. + +## Obtaining the length of values + +There is a single function in order to retrieve the length of the value +associated to an open key. The returned length is value-specific, and is +the string length for strings, and the number of elements for the aggregated +data types (how many elements there is in a list, set, sorted set, hash). + + size_t len = RedisModule_ValueLength(key); + +If the key does not exist, 0 is returned by the function: + +## String type API + +Setting a new string value, like the Redis `SET` command does, is performed +using: + + int RedisModule_StringSet(RedisModuleKey *key, RedisModuleString *str); + +The function works exactly like the Redis `SET` command itself, that is, if +there is a prior value (of any type) it will be deleted. + +Accessing existing string values is performed using DMA (direct memory +access) for speed. The API will return a pointer and a length, so that's +possible to access and, if needed, modify the string directly. + + size_t len, j; + char *myptr = RedisModule_StringDMA(key,&len,REDISMODULE_WRITE); + for (j = 0; j < len; j++) myptr[j] = 'A'; + +In the above example we write directly on the string. Note that if you want +to write, you must be sure to ask for `WRITE` mode. + +DMA pointers are only valid if no other operations are performed with the key +before using the pointer, after the DMA call. + +Sometimes when we want to manipulate strings directly, we need to change +their size as well. For this scope, the `RedisModule_StringTruncate` function +is used. Example: + + RedisModule_StringTruncate(mykey,1024); + +The function truncates, or enlarges the string as needed, padding it with +zero bytes if the previos length is smaller than the new length we request. +If the string does not exist since `key` is associated to an open empty key, +a string value is created and associated to the key. + +Note that every time `StringTruncate()` is called, we need to re-obtain +the DMA pointer again, since the old may be invalid. + +## List type API + +It's possible to push and pop values from list values: + + int RedisModule_ListPush(RedisModuleKey *key, int where, RedisModuleString *ele); + RedisModuleString *RedisModule_ListPop(RedisModuleKey *key, int where); + +In both the APIs the `where` argument specifies if to push or pop from tail +or head, using the following macros: + + REDISMODULE_LIST_HEAD + REDISMODULE_LIST_TAIL + +Elements returned by `RedisModule_ListPop()` are like strings craeted with +`RedisModule_CreateString()`, they must be released with +`RedisModule_FreeString()` or by enabling automatic memory management. + +## Set type API + +Work in progress. + +## Sorted set type API + +Documentation missing, please refer to the top comments inside `module.c` +for the following functions: + +* `RedisModule_ZsetAdd` +* `RedisModule_ZsetIncrby` +* `RedisModule_ZsetScore` +* `RedisModule_ZsetRem` + +And for the sorted set iterator: + +* `RedisModule_ZsetRangeStop` +* `RedisModule_ZsetFirstInScoreRange` +* `RedisModule_ZsetLastInScoreRange` +* `RedisModule_ZsetFirstInLexRange` +* `RedisModule_ZsetLastInLexRange` +* `RedisModule_ZsetRangeCurrentElement` +* `RedisModule_ZsetRangeNext` +* `RedisModule_ZsetRangePrev` +* `RedisModule_ZsetRangeEndReached` + +## Hash type API + +Documentation missing, please refer to the top comments inside `module.c` +for the following functions: + +* `RedisModule_HashSet` +* `RedisModule_HashGet` + +## Iterating aggregated values + +Work in progress. + +# Replicating commands + +If you want to use module commands exactly like normal Redis commands, in the +context of replicated Redis instances, or using the AOF file for persistence, +it is important for module commands to handle their replication in a consistent +way. + +When using the higher level APIs to invoke commands, replication happens +automatically if you use the "!" modifier in the format string of +`RedisModule_Call()` as in the following example: + + reply = RedisModule_Call(ctx,"INCR","!sc",argv[1],"10"); + +As you can see the format specifier is `"!sc"`. The bang is not parsed as a +format specifier, but it internally flags the command as "must replicate". + +If you use the above programming style, there are no problems. +However sometimes things are more complex than that, and you use the low level +API. In this case, if there are no side effects in the command execution, and +it consistently always performs the same work, what is possible to do is to +replicate the command verbatim as the user executed it. To do that, you just +need to call the following function: + + RedisModule_ReplicateVerbatim(ctx); + +When you use the above API, you should not use any other replication function +since they are not guaranteed to mix well. + +However this is not the only option. It's also possible to exactly tell +Redis what commands to replicate as the effect of the command execution, using +an API similar to `RedisModule_Call()` but that instead of calling the command +sends it to the AOF / slaves stream. Example: + + RedisModule_Replicate(ctx,"INCRBY","cl","foo",my_increment); + +It's possible to call `RedisModule_Replicate` multiple times, and each +will emit a command. All the sequence emitted is wrapped between a +`MULTI/EXEC` transaction, so that the AOF and replication effects are the +same as executing a single command. + +Note that `Call()` replication and `Replicate()` replication have a rule, +in case you want to mix both forms of replication (not necessarily a good +idea if there are simpler approaches). Commands replicated with `Call()` +are always the first emitted in the final `MULTI/EXEC` block, while all +the commands emitted with `Replicate()` will follow. + +# Automatic memory management + +Normally when writing programs in the C language, programmers need to manage +memory manually. This is why the Redis modules API has functions to release +strings, close open keys, free replies, and so forth. + +However given that commands are executed in a contained environment and +with a set of strict APIs, Redis is able to provide automatic memory management +to modules, at the cost of some performance (most of the time, a very low +cost). + +When automatic memory management is enabled: + +1. You don't need to close open keys. +2. You don't need to free replies. +3. You don't need to free RedisModuleString objects. + +However you can still do it, if you want. For example, automatic memory +management may be active, but inside a loop allocating a lot of strings, +you may still want to free strings no longer used. + +In order to enable automatic memory management, just call the following +function at the start of the command implementation: + + RedisModule_AutoMemory(ctx); + +Automatic memory management is usually the way to go, however experienced +C programmers may not use it in order to gain some speed and memory usage +benefit. + +# Allocating memory into modules + +Normal C programs use `malloc()` and `free()` in order to allocate and +release memory dynamically. While in Redis modules the use of malloc is +not technically forbidden, it is a lot better to use the Redis Modules +specific functions, that are exact replacements for `malloc`, `free`, +`realloc` and `strdup`. These functions are: + + void *RedisModule_Alloc(size_t bytes); + void* RedisModule_Realloc(void *ptr, size_t bytes); + void RedisModule_Free(void *ptr); + void RedisModule_Calloc(size_t nmemb, size_t size); + char *RedisModule_Strdup(const char *str); + +They work exactly like their `libc` equivalent calls, however they use +the same allocator Redis uses, and the memory allocated using these +functions is reported by the `INFO` command in the memory section, is +accounted when enforcing the `maxmemory` policy, and in general is +a first citizen of the Redis executable. On the contrar, the method +allocated inside modules with libc `malloc()` is transparent to Redis. + +Another reason to use the modules functions in order to allocate memory +is that, when creating native data types inside modules, the RDB loading +functions can return deserialized strings (from the RDB file) directly +as `RedisModule_Alloc()` allocations, so they can be used directly to +populate data structures after loading, instead of having to copy them +to the data structure. + +## Pool allocator + +Sometimes in commands implementations, it is required to perform many +small allocations that will be not retained at the end of the command +execution, but are just functional to execute the command itself. + +This work can be more easily accomplished using the Redis pool allocator: + + void *RedisModule_PoolAlloc(RedisModuleCtx *ctx, size_t bytes); + +It works similarly to `malloc()`, and returns memory aligned to the +next power of two of greater or equal to `bytes` (for a maximum alignment +of 8 bytes). However it allocates memory in blocks, so it the overhead +of the allocations is small, and more important, the memory allocated +is automatically released when the command returns. + +So in general short living allocations are a good candidates for the pool +allocator. + +# Writing commands compatible with Redis Cluster + +Documentation missing, please check the following functions inside `module.c`: + + RedisModule_IsKeysPositionRequest(ctx); + RedisModule_KeyAtPos(ctx,pos); diff --git a/topics/modules-native-types.md b/topics/modules-native-types.md new file mode 100644 index 0000000000..4d497356a2 --- /dev/null +++ b/topics/modules-native-types.md @@ -0,0 +1,379 @@ +Native types in Redis modules +=== + +Redis modules can access Redis built-in data structures both at high level, +by calling Redis commands, and at low level, by manipulating the data structures +directly. + +By using these capabilities in order to build new abstractions on top of existing +Redis data structures, or by using strings DMA in order to encode modules +data structures into Redis strings, it is possible to create modules that +*feel like* they are exporting new data types. However, for more complex +problems, this is not enough, and the implementation of new data structures +inside the module is needed. + +We call the ability of Redis modules to implement new data structures that +feel like native Redis ones **native types support**. This document describes +the API exported by the Redis modules system in order to create new data +structures and handle the serialization in RDB files, the rewriting process +in AOF, the type reporting via the `TYPE` command, and so forth. + +Overview of native types +--- + +A module exporting a native type is composed of the following main parts: + +* The implementation of some kind of new data structure and of commands operating on the new data structure. +* A set of callbacks that handle: RDB saving, RDB loading, AOF rewriting, releasing of a value associated with a key, calculation of a value digest (hash) to be used with the `DEBUG DIGEST` command. +* A 9 characters name that is unique to each module native data type. +* An encoding version, used to persist into RDB files a module-specific data version, so that a module will be able to load older representations from RDB files. + +While to handle RDB loading, saving and AOF rewriting may look complex as a first glance, the modules API provide very high level function for handling all this, without requiring the user to handle read/write errors, so in practical terms, writing a new data structure for Redis is a simple task. + +A **very easy** to understand but complete example of native type implementation +is available inside the Redis distribution in the `/modules/hellotype.c` file. +The reader is encouraged to read the documentation by looking at this example +implementation to see how things are applied in the practice. + +Registering a new data type +=== + +In order to register a new native type into the Redis core, the module needs +to declare a global variable that will hold a reference to the data type. +The API to register the data type will return a data type reference that will +be stored in the global variable. + + static RedisModuleType *MyType; + #define MYTYPE_ENCODING_VERSION 0 + + int RedisModule_OnLoad(RedisModuleCtx *ctx) { + RedisModuleTypeMethods tm = { + .version = REDISMODULE_TYPE_METHOD_VERSION, + .rdb_load = MyTypeRDBLoad, + .rdb_save = MyTypeRDBSave, + .aof_rewrite = MyTypeAOFRewrite, + .free = MyTypeFree + }; + + MyType = RedisModule_CreateDataType(ctx, "MyType-AZ", + MYTYPE_ENCODING_VERSION, &tm); + if (MyType == NULL) return REDISMODULE_ERR; + } + +As you can see from the example above, a single API call is needed in order to +register the new type. However a number of function pointers are passed as +arguments. Certain are optionals while some are mandatory. The above set +of methods *must* be passed, while `.digest` and `.mem_usage` are optional +and are currently not actually supported by the modules internals, so for +now you can just ignore them. + +The `ctx` argument is the context that we receive in the `OnLoad` function. +The type `name` is a 9 character name in the character set that includes +from `A-Z`, `a-z`, `0-9`, plus the underscore `_` and minus `-` characters. + +Note that **this name must be unique** for each data type in the Redis +ecosystem, so be creative, use both lower-case and upper case if it makes +sense, and try to use the convention of mixing the type name with the name +of the author of the module, to create a 9 character unique name. + +**NOTE:** It is very important that the name is exactly 9 chars or the +registration of the type will fail. Read more to understand why. + +For example if I'm building a *b-tree* data structure and my name is *antirez* +I'll call my type **btree1-az**. The name, converted to a 64 bit integer, +is stored inside the RDB file when saving the type, and will be used when the +RDB data is loaded in order to resolve what module can load the data. If Redis +finds no matching module, the integer is converted back to a name in order to +provide some clue to the user about what module is missing in order to load +the data. + +The type name is also used as a reply for the `TYPE` command when called +with a key holding the registered type. + +The `encver` argument is the encoding version used by the module to store data +inside the RDB file. For example I can start with an encoding version of 0, +but later when I release version 2.0 of my module, I can switch encoding to +something better. The new module will register with an encoding version of 1, +so when it saves new RDB files, the new version will be stored on disk. However +when loading RDB files, the module `rdb_load` method will be called even if +there is data found for a different encoding version (and the encoding version +is passed as argument to `rdb_load`), so that the module can still load old +RDB files. + +The last argument is a structure used in order to pass the type methods to the +registration function: `rdb_load`, `rdb_save`, `aof_rewrite`, `digest` and +`free` and `mem_usage` are all callbacks with the following prototypes and uses: + + typedef void *(*RedisModuleTypeLoadFunc)(RedisModuleIO *rdb, int encver); + typedef void (*RedisModuleTypeSaveFunc)(RedisModuleIO *rdb, void *value); + typedef void (*RedisModuleTypeRewriteFunc)(RedisModuleIO *aof, RedisModuleString *key, void *value); + typedef size_t (*RedisModuleTypeMemUsageFunc)(void *value); + typedef void (*RedisModuleTypeDigestFunc)(RedisModuleDigest *digest, void *value); + typedef void (*RedisModuleTypeFreeFunc)(void *value); + +* `rdb_load` is called when loading data from the RDB file. It loads data in the same format as `rdb_save` produces. +* `rdb_save` is called when saving data to the RDB file. +* `aof_rewrite` is called when the AOF is being rewritten, and the module needs to tell Redis what is the sequence of commands to recreate the content of a given key. +* `digest` is called when `DEBUG DIGEST` is executed and a key holding this module type is found. Currently this is not yet implemented so the function ca be left empty. +* `mem_usage` is called when the `MEMORY` command asks for the total memory consumed by a specific key, and is used in order to get the amount of bytes used by the module value. +* `free` is called when a key with the module native type is deleted via `DEL` or in any other mean, in order to let the module reclaim the memory associated with such a value. + +Ok, but *why* modules types require a 9 characters name? +--- + +Oh, I understand you need to understand this, so here is a very specific +explanation. + +When Redis persists to RDB files, modules specific data types require to +be persisted as well. Now RDB files are sequences of key-value pairs +like the following: + + [1 byte type] [key] [a type specific value] + +The 1 byte type identifies strings, lists, sets, and so forth. In the case +of modules data, it is set to a special value of `module data`, but of +course this is not enough, we need the information needed to link a specific +value with a specific module type that is able to load and handle it. + +So when we save a `type specific value` about a module, we prefix it with +a 64 bit integer. 64 bits is large enough to store the informations needed +in order to lookup the module that can handle that specific type, but is +short enough that we can prefix each module value we store inside the RDB +without making the final RDB file too big. At the same time, this solution +of prefixing the value with a 64 bit *signature* does not require to do +strange things like defining in the RDB header a list of modules specific +types. Everything is pretty simple. + +So, what you can store in 64 bits in order to identify a given module in +a reliable way? Well if you build a character set of 64 symbols, you can +easily store 9 characters of 6 bits, and you are left with 10 bits, that +are used in order to store the *encoding version* of the type, so that +the same type can evolve in the future and provide a different and more +efficient or updated serialization format for RDB files. + +So the 64 bit prefix stored before each module value is like the following: + + 6|6|6|6|6|6|6|6|6|10 + +The first 9 elements are 6-bits characters, the final 10 bits is the +encoding version. + +When the RDB file is loaded back, it reads the 64 bit value, masks the final +10 bits, and searches for a matching module in the modules types cache. +When a matching one is found, the method to load the RDB file value is called +with the 10 bits encoding version as argument, so that the module knows +what version of the data layout to load, if it can support multiple versions. + +Now the interesting thing about all this is that, if instead the module type +cannot be resolved, since there is no loaded module having this signature, +we can convert back the 64 bit value into a 9 characters name, and print +an error to the user that includes the module type name! So that she or he +immediately realizes what's wrong. + +Setting and getting keys +--- + +After registering our new data type in the `RedisModule_OnLoad()` function, +we also need to be able to set Redis keys having as value our native type. + +This normally happens in the context of commands that write data to a key. +The native types API allow to set and get keys to module native data types, +and to test if a given key is already associated to a value of a specific data +type. + +The API uses the normal modules `RedisModule_OpenKey()` low level key access +interface in order to deal with this. This is an eaxmple of setting a +native type private data structure to a Redis key: + + RedisModuleKey *key = RedisModule_OpenKey(ctx,keyname,REDISMODULE_WRITE); + struct some_private_struct *data = createMyDataStructure(); + RedisModule_ModuleTypeSetValue(key,MyType,data); + +The function `RedisModule_ModuleTypeSetValue()` is used with a key handle open +for writing, and gets three arguments: the key handle, the reference to the +native type, as obtained during the type registration, and finally a `void*` +pointer that contains the private data implementing the module native type. + +Note that Redis has no clues at all about what your data contains. It will +just call the callbacks you provided during the method registration in order +to perform operations on the type. + +Similarly we can retrieve the private data from a key using this function: + + struct some_private_struct *data; + data = RedisModule_ModuleTypeGetValue(key); + +We can also test for a key to have our native type as value: + + if (RedisModule_ModuleTypeGetType(key) == MyType) { + /* ... do something ... */ + } + +However for the calls to do the right thing, we need to check if the key +is empty, if it contains a value of the right kind, and so forth. So +the idiomatic code to implement a command writing to our native type +is along these lines: + + RedisModuleKey *key = RedisModule_OpenKey(ctx,argv[1], + REDISMODULE_READ|REDISMODULE_WRITE); + int type = RedisModule_KeyType(key); + if (type != REDISMODULE_KEYTYPE_EMPTY && + RedisModule_ModuleTypeGetType(key) != MyType) + { + return RedisModule_ReplyWithError(ctx,REDISMODULE_ERRORMSG_WRONGTYPE); + } + +Then if we successfully verified the key is not of the wrong type, and +we are going to write to it, we usually want to create a new data structure if +the key is empty, or retrieve the reference to the value associated to the +key if there is already one: + + /* Create an empty value object if the key is currently empty. */ + struct some_private_struct *data; + if (type == REDISMODULE_KEYTYPE_EMPTY) { + data = createMyDataStructure(); + RedisModule_ModuleTypeSetValue(key,MyTyke,data); + } else { + data = RedisModule_ModuleTypeGetValue(key); + } + /* Do something with 'data'... */ + +Free method +--- + +As already mentioned, when Redis needs to free a key holding a native type +value, it needs help from the module in order to release the memory. This +is the reason why we pass a `free` callback during the type registration: + + typedef void (*RedisModuleTypeFreeFunc)(void *value); + +A trivial implementation of the free method can be something like this, +assuming our data structure is composed of a single allocation: + + void MyTypeFreeCallback(void *value) { + RedisModule_Free(value); + } + +However a more real world one will call some function that performs a more +complex memory reclaiming, by casting the void pointer to some structure +and freeing all the resources composing the value. + +RDB load and save methods +--- + +The RDB saving and loading callbacks need to create (and load back) a +representation of the data type on disk. Redis offers an high level API +that can automatically store inside the RDB file the following types: + +* Unsigned 64 bit integers. +* Signed 64 bit integers. +* Doubles. +* Strings. + +It is up to the module to find a viable representation using the above base +types. However note that while the integer and double values are stored +and loaded in an architecture and *endianess* agnostic way, if you use +the raw string saving API to, for example, save a structure on disk, you +have to care those details yourself. + +This is the list of functions performing RDB saving and loading: + + void RedisModule_SaveUnsigned(RedisModuleIO *io, uint64_t value); + uint64_t RedisModule_LoadUnsigned(RedisModuleIO *io); + void RedisModule_SaveSigned(RedisModuleIO *io, int64_t value); + int64_t RedisModule_LoadSigned(RedisModuleIO *io); + void RedisModule_SaveString(RedisModuleIO *io, RedisModuleString *s); + void RedisModule_SaveStringBuffer(RedisModuleIO *io, const char *str, size_t len); + RedisModuleString *RedisModule_LoadString(RedisModuleIO *io); + char *RedisModule_LoadStringBuffer(RedisModuleIO *io, size_t *lenptr); + void RedisModule_SaveDouble(RedisModuleIO *io, double value); + double RedisModule_LoadDouble(RedisModuleIO *io); + +The functions don't require any error checking from the module, that can +always assume calls succeed. + +As an example, imagine I've a native type that implements an array of +double values, with the following structure: + + struct double_array { + size_t count; + double *values; + }; + +My `rdb_save` method may look like the following: + + void DoubleArrayRDBSave(RedisModuleIO *io, void *ptr) { + struct dobule_array *da = ptr; + RedisModule_SaveUnsigned(io,da->count); + for (size_t j = 0; j < da->count; j++) + RedisModule_SaveDouble(io,da->values[j]); + } + +What we did was to store the number of elements followed by each double +value. So when later we'll have to load the structure in the `rdb_load` +method we'll do something like this: + + void *DoubleArrayRDBLoad(RedisModuleIO *io, int encver) { + if (encver != DOUBLE_ARRAY_ENC_VER) { + /* We should actually log an error here, or try to implement + the ability to load older versions of our data structure. */ + return NULL; + } + + struct double_array *da; + da = RedisModule_Alloc(sizeof(*da)); + da->count = RedisModule_LoadUnsigned(io); + da->values = RedisModule_Alloc(da->count * sizeof(double)); + for (size_t j = 0; j < da->count; j++) + da->values = RedisModule_LoadDouble(io); + return da; + } + +The load callback just reconstruct back the data structure from the data +we stored in the RDB file. + +Note that while there is no error handling on the API that writes and reads +from disk, still the load callback can return NULL on errors in case what +it reads does not look correct. Redis will just panic in that case. + +AOF rewriting +--- + + void RedisModule_EmitAOF(RedisModuleIO *io, const char *cmdname, const char *fmt, ...); + +Handling multiple encodings +--- + + WORK IN PROGRESS + +Allocating memory +--- + +Modules data types should try to use `RedisModule_Alloc()` functions family +in order to allocate, reallocate and release heap memory used to implement the native data structures (see the other Redis Modules documentation for detailed information). + +This is not just useful in order for Redis to be able to account for the memory used by the module, but there are also more advantages: + +* Redis uses the `jemalloc` allcator, that often prevents fragmentation problems that could be caused by using the libc allocator. +* When loading strings from the RDB file, the native types API is able to return strings allocated directly with `RedisModule_Alloc()`, so that the module can directly link this memory into the data structure representation, avoiding an useless copy of the data. + +Even if you are using external libraries implementing your data structures, the +allocation functions provided by the module API is exactly compatible with +`malloc()`, `realloc()`, `free()` and `strdup()`, so converting the libraries +in order to use these functions should be trivial. + +In case you have an external library that uses libc `malloc()`, and you want +to avoid replacing manually all the calls with the Redis Modules API calls, +an approach could be to use simple macros in order to replace the libc calls +with the Redis API calls. Something like this could work: + + #define malloc RedisModule_Alloc + #define realloc RedisModule_Realloc + #define free RedisModule_Free + #define strdup RedisModule_Strdup + +However take in mind that mixing libc calls with Redis API calls will result +into troubles and crashes, so if you replace calls using macros, you need to +make sure that all the calls are correctly replaced, and that the code with +the substituted calls will never, for example, attempt to call +`RedisModule_Free()` with a pointer allocated using libc `malloc()`. From c50a291a20700f0192c9fe63ab16ac4ef1579b17 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 14 Jul 2017 12:12:24 +0200 Subject: [PATCH 0780/2314] Document that modules blocking APIs are experimental. --- topics/modules-blocking-ops.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/topics/modules-blocking-ops.md b/topics/modules-blocking-ops.md index d4f3c93bc8..87409de7f5 100644 --- a/topics/modules-blocking-ops.md +++ b/topics/modules-blocking-ops.md @@ -16,6 +16,16 @@ Redis modules have the ability to implement blocking commands as well, this documentation shows how the API works and describes a few patterns that can be used in order to model blocking commands. +NOTE: This API si currently *experimental*, so it can only be used if +the macro `REDISMODULE_EXPERIMENTAL_API` is defined. This is required because +these calls are still not in their final stage of design, so may change +in the future, certain parts may be reprecated and so forth. + +To use this part of the modules API include the modules header like that: + + #define REDISMODULE_EXPERIMENTAL_API + #include "redismodule.h" + How blocking and resuming works. --- From d59b9787759fb579351a457d89e3e1d513f3d069 Mon Sep 17 00:00:00 2001 From: Junya Hayashi Date: Wed, 19 Jul 2017 06:01:07 +0900 Subject: [PATCH 0781/2314] Add gxredis for language Python (#786) --- clients.json | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/clients.json b/clients.json index 680485e5e2..399eef5ac8 100644 --- a/clients.json +++ b/clients.json @@ -467,6 +467,15 @@ "active": true }, + { + "name": "gxredis", + "language": "Python", + "repository": "https://github.com/groove-x/gxredis", + "description": "Simple redis-py wrapper library", + "authors": ["loose_agilist"], + "active": true + }, + { "name": "txredis", "language": "Python", @@ -823,7 +832,7 @@ "authors": ["hamidr_"], "active": true }, - + { "name": "acl-redis", "language": "C++", @@ -1499,7 +1508,7 @@ "authors": ["etehtsea"], "active": true }, - + { "name": "RcppRedis", "language": "R", @@ -1509,7 +1518,7 @@ "authors": ["eddelbuettel"], "active": true }, - + { "name": "Redux", "language": "R", From ce6a7a6d786959479dc3f33959ebe001f961a806 Mon Sep 17 00:00:00 2001 From: John Clover Date: Mon, 24 Jul 2017 12:52:17 -0700 Subject: [PATCH 0782/2314] Fixes minor typo (#848) - you -> your --- topics/notifications.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/notifications.md b/topics/notifications.md index 6f0f72ee89..07f684669d 100644 --- a/topics/notifications.md +++ b/topics/notifications.md @@ -19,7 +19,7 @@ Events are delivered using the normal Pub/Sub layer of Redis, so clients implementing Pub/Sub are able to use this feature without modifications. Because Redis Pub/Sub is *fire and forget* currently there is no way to use this -feature if you application demands **reliable notification** of events, that is, +feature if your application demands **reliable notification** of events, that is, if your Pub/Sub client disconnects, and reconnects later, all the events delivered during the time the client was disconnected are lost. From 6e58ab2ca978e78b62d0d7c4370a13700a315d15 Mon Sep 17 00:00:00 2001 From: Itamar Haber Date: Sun, 30 Jul 2017 00:43:05 +0300 Subject: [PATCH 0783/2314] Initial docs for the `MEMORY` command --- commands.json | 44 +++++++++++++++++++++++++++++++++ commands/memory-doctor.md | 6 +++++ commands/memory-help.md | 6 +++++ commands/memory-malloc-stats.md | 9 +++++++ commands/memory-purge.md | 9 +++++++ commands/memory-stats.md | 43 ++++++++++++++++++++++++++++++++ commands/memory-usage.md | 40 ++++++++++++++++++++++++++++++ 7 files changed, 157 insertions(+) create mode 100644 commands/memory-doctor.md create mode 100644 commands/memory-help.md create mode 100644 commands/memory-malloc-stats.md create mode 100644 commands/memory-purge.md create mode 100644 commands/memory-stats.md create mode 100644 commands/memory-usage.md diff --git a/commands.json b/commands.json index 06a9b10719..2124d0453c 100644 --- a/commands.json +++ b/commands.json @@ -1534,6 +1534,50 @@ "since": "1.0.0", "group": "list" }, + "MEMORY DOCTOR": { + "summary": "Outputs memory problems report", + "since": "4.0.0", + "group": "server" + }, + "MEMORY HELP": { + "summary": "Show helpful text about the different subcommands", + "since": "4.0.0", + "group": "server" + + }, + "MEMORY MALLOC-STATS": { + "summary": "Show allocator internal stats", + "since": "4.0.0", + "group": "server" + }, + "MEMORY PURGE": { + "summary": "Ask the allocator to release memory", + "since": "4.0.0", + "group": "server" + }, + "MEMORY STATS": { + "summary": "Show memory usage details", + "since": "4.0.0", + "group": "server" + }, + "MEMORY USAGE": { + "summary": "Estimate the memory usage of a key", + "complexity": "O(N) where N is the number of samples.", + "arguments": [ + { + "name": "key", + "type": "key" + }, + { + "command": "SAMPLES", + "name": "count", + "type": "integer", + "optional": true + } + ], + "since": "4.0.0", + "group": "server" + }, "MGET": { "summary": "Get the values of all the given keys", "complexity": "O(N) where N is the number of keys to retrieve.", diff --git a/commands/memory-doctor.md b/commands/memory-doctor.md new file mode 100644 index 0000000000..dbb9db3e33 --- /dev/null +++ b/commands/memory-doctor.md @@ -0,0 +1,6 @@ +The `MEMORY DOCTOR` command reports about different memory-related issues that +the Redis server experiences, and advises about possible remedies. + +@return + +@bulk-string-reply \ No newline at end of file diff --git a/commands/memory-help.md b/commands/memory-help.md new file mode 100644 index 0000000000..c0f4086f53 --- /dev/null +++ b/commands/memory-help.md @@ -0,0 +1,6 @@ +The `MEMORY HELP` command returns a helpful text describing the different +subcommands. + +@return + +@array-reply: a list of subcommands and their descriptions diff --git a/commands/memory-malloc-stats.md b/commands/memory-malloc-stats.md new file mode 100644 index 0000000000..8da8e72e96 --- /dev/null +++ b/commands/memory-malloc-stats.md @@ -0,0 +1,9 @@ +The `MEMORY MALLOC-STATS` command provides an internal statistics report from +the memory allocator. + +This command is currently implemented only when using **jemalloc** as an +allocator, and evaluates to a benign NOOP for all others. + +@return + +@bulk-string-reply: the memory allocator's internal statistics report diff --git a/commands/memory-purge.md b/commands/memory-purge.md new file mode 100644 index 0000000000..5ebe43356d --- /dev/null +++ b/commands/memory-purge.md @@ -0,0 +1,9 @@ +The `MEMORY PURGE` command attempts to purge dirty pages so these can be +reclaimed by the allocator. + +This command is currently implemented only when using **jemalloc** as an +allocator, and evaluates to a benign NOOP for all others. + +@return + +@simple-string-reply diff --git a/commands/memory-stats.md b/commands/memory-stats.md new file mode 100644 index 0000000000..ddc41ac366 --- /dev/null +++ b/commands/memory-stats.md @@ -0,0 +1,43 @@ +The `MEMORY STATS` command returns an @array-reply about the memory usage of the +server. + +The information about memory usage is provided as metrics and their respective +values. The following metrics are reported: + +* `peak.allocated`: Peak memory consumed by Redis in bytes (see `INFO`'s + `used_memory`) +* `total.allocated`: Total number of bytes allocated by Redis using its + allocator (see `INFO`'s `used_memory`) +* `startup.allocated`: Initial amount of memory consumed by Redis at startup + in bytes +* `replication.backlog`: Size in bytes of the replication backlog (see + `INFO`'s `repl_backlog_size`) +* `clients.slaves`: The total size in bytes of all slaves overheads (output + and query buffers, connection contexts) +* `clients.normal`: The total size in bytes of all clients overheads (output + and query buffers, connection contexts) +* `aof.buffer`: The summed size in bytes of the current and rewrite AOF + buffers (see `INFO`'s `aof_buffer_length` and `aof_rewrite_buffer_length`, + respectively) +* `dbXXX`: For each of the server's databases, the overheads of the main and + expiry dictionaries (`overhead.hashtable.main` and + `overhead.hashtable.expires`, respectively) are reported in bytes +* `overhead.total`: The sum of all overheads, i.e. `startup.allocated`, + `replication.backlog`, `clients.slaves`, `clients.normal`, `aof.buffer` and + those of the internal data structures that are used in managing the + Redis keyspace +* `keys.count`: The total number of keys stored across all databases in the + server +* `keys.bytes-per-key`: The ratio between **net memory usage** (`total.allocated` + minus `startup.allocated`) and `keys.count` +* `dataset.bytes`: The size in bytes of the dataset (`overhead.total` + subtracted from `total.allocated`) +* `dataset.percentage`: The percentage of `dataset.bytes` out of the net + memory usage +* `peak.percentage`: The percentage of `peak.allocated` out of + `total.allocated` +* `fragmentation`: See `INFO`'s `mem_fragmentation_ratio` + +@return + +@array-reply: nested list of memory usage metrics and their values diff --git a/commands/memory-usage.md b/commands/memory-usage.md new file mode 100644 index 0000000000..2bdb1d9dee --- /dev/null +++ b/commands/memory-usage.md @@ -0,0 +1,40 @@ +The `MEMORY USAGE` command reports the number of bytes that a key and its value +require to be stored in RAM. + +The reported usage is the total of memory allocations for data and +administrative overheads that a key its value require. + +For nested data types, the optional `SAMPLES` option can be provided, where +`count` is the number of sampled nested values. By default, this option is set +to `5`. To sample the all of the nested values, use `SAMPLES 0`. + +@examples + +With Redis v4.0.1 64-bit and **jemalloc**, the empty string measures as follows: + +``` +> SET "" "" +OK +> MEMORY USAGE "" +(integer) 51 +``` + +These bytes are pure overhead at the moment as no actual data is stored, and are +used for maintaining the internal data structures of the server. Longer keys and +values show asymptotically linear usage. + +``` +> SET foo bar +OK +> MEMORY USAGE foo +(integer) 54 +> SET cento 01234567890123456789012345678901234567890123 +45678901234567890123456789012345678901234567890123456789 +OK +127.0.0.1:6379> MEMORY USAGE cento +(integer) 153 +``` + +@return + +@integer-reply: the memory usage in bytes \ No newline at end of file From 492ebac259b7095556a10279f9125678a854e3aa Mon Sep 17 00:00:00 2001 From: Itamar Haber Date: Sun, 30 Jul 2017 00:46:05 +0300 Subject: [PATCH 0784/2314] WIP on `INFO` doc This is quite a rabbit hole - missing pieces marked w/ "TBD" --- commands/info.md | 144 ++++++++++++++++++++++++++++++++++++----------- 1 file changed, 110 insertions(+), 34 deletions(-) diff --git a/commands/info.md b/commands/info.md index d30565ac9c..766b0f1711 100644 --- a/commands/info.md +++ b/commands/info.md @@ -47,44 +47,73 @@ Here is the meaning of all fields in the **server** section: * `redis_version`: Version of the Redis server * `redis_git_sha1`: Git SHA1 * `redis_git_dirty`: Git dirty flag +* `redis_build_id`: The build id +* `redis_mode`: The server's mode ("standalone", "sentinel" or "cluster") * `os`: Operating system hosting the Redis server * `arch_bits`: Architecture (32 or 64 bits) -* `multiplexing_api`: event loop mechanism used by Redis +* `multiplexing_api`: Event loop mechanism used by Redis +* `atomicvar_api`: Atomicvar API used by Redis * `gcc_version`: Version of the GCC compiler used to compile the Redis server * `process_id`: PID of the server process -* `run_id`: Random value identifying the Redis server (to be used by Sentinel and Cluster) +* `run_id`: Random value identifying the Redis server (to be used by Sentinel + and Cluster) * `tcp_port`: TCP/IP listen port * `uptime_in_seconds`: Number of seconds since Redis server start * `uptime_in_days`: Same value expressed in days +* `hz`: The server's frequency setting * `lru_clock`: Clock incrementing every minute, for LRU management +* `executable`: The path to the server's executable +* `config_file`: The path to the config file Here is the meaning of all fields in the **clients** section: -* `connected_clients`: Number of client connections (excluding connections from slaves) -* `client_longest_output_list`: longest output list among current client connections -* `client_biggest_input_buf`: biggest input buffer among current client connections -* `blocked_clients`: Number of clients pending on a blocking call (BLPOP, BRPOP, BRPOPLPUSH) +* `connected_clients`: Number of client connections (excluding connections + from slaves) +* `client_longest_output_list`: longest output list among current client + connections +* `client_biggest_input_buf`: biggest input buffer among current client + connections +* `blocked_clients`: Number of clients pending on a blocking call (BLPOP, + BRPOP, BRPOPLPUSH) Here is the meaning of all fields in the **memory** section: -* `used_memory`: total number of bytes allocated by Redis using its - allocator (either standard **libc**, **jemalloc**, or an alternative allocator such - as [**tcmalloc**][hcgcpgp] +* `used_memory`: Total number of bytes allocated by Redis using its + allocator (either standard **libc**, **jemalloc**, or an alternative + allocator such as [**tcmalloc**][hcgcpgp]) * `used_memory_human`: Human readable representation of previous value * `used_memory_rss`: Number of bytes that Redis allocated as seen by the - operating system (a.k.a resident set size). This is the number reported by tools - such as `top(1)` and `ps(1)` + operating system (a.k.a resident set size). This is the number reported by + tools such as `top(1)` and `ps(1)` +* `used_memory_rss_human`: Human readable representation of previous value * `used_memory_peak`: Peak memory consumed by Redis (in bytes) * `used_memory_peak_human`: Human readable representation of previous value +* `used_memory_peak_perc`: TBD +* `used_memory_overhead`: TBD +* `used_memory_startup`: TBD +* `used_memory_dataset`: TBD +* `used_memory_dataset_perc`: TBD +* `total_system_memory`: TBD +* `total_system_memory_human`: Human readable representation of previous value * `used_memory_lua`: Number of bytes used by the Lua engine +* `used_memory_lua_human`: Human readable representation of previous value +* `used_memory_lua:`: TBD +* `used_memory_lua_human`: Human readable representation of previous value +* `maxmemory`: TBD +* `maxmemory_human`: Human readable representation of previous value +* `maxmemory_policy`: TBD * `mem_fragmentation_ratio`: Ratio between `used_memory_rss` and `used_memory` * `mem_allocator`: Memory allocator, chosen at compile time +* `active_defrag_running`: TBD +* `lazyfree_pending_objects`: TBD -Ideally, the `used_memory_rss` value should be only slightly higher than `used_memory`. +Ideally, the `used_memory_rss` value should be only slightly higher than +`used_memory`. When rss >> used, a large difference means there is memory fragmentation -(internal or external), which can be evaluated by checking `mem_fragmentation_ratio`. -When used >> rss, it means part of Redis memory has been swapped off by the operating -system: expect some significant latencies. +(internal or external), which can be evaluated by checking +`mem_fragmentation_ratio`. +When used >> rss, it means part of Redis memory has been swapped off by the +operating system: expect some significant latencies. Because Redis does not have control over how its allocations are mapped to memory pages, high `used_memory_rss` is often the result of a spike in memory @@ -94,8 +123,8 @@ When Redis frees memory, the memory is given back to the allocator, and the allocator may or may not give the memory back to the system. There may be a discrepancy between the `used_memory` value and memory consumption as reported by the operating system. It may be due to the fact memory has been -used and released by Redis, but not given back to the system. The `used_memory_peak` -value is generally useful to check this point. +used and released by Redis, but not given back to the system. The +`used_memory_peak` value is generally useful to check this point. Here is the meaning of all fields in the **persistence** section: @@ -104,15 +133,23 @@ Here is the meaning of all fields in the **persistence** section: * `rdb_bgsave_in_progress`: Flag indicating a RDB save is on-going * `rdb_last_save_time`: Epoch-based timestamp of last successful RDB save * `rdb_last_bgsave_status`: Status of the last RDB save operation -* `rdb_last_bgsave_time_sec`: Duration of the last RDB save operation in seconds -* `rdb_current_bgsave_time_sec`: Duration of the on-going RDB save operation if any +* `rdb_last_bgsave_time_sec`: Duration of the last RDB save operation in + seconds +* `rdb_current_bgsave_time_sec`: Duration of the on-going RDB save operation + if any +* `rdb_last_cow_size`: TBD * `aof_enabled`: Flag indicating AOF logging is activated -* `aof_rewrite_in_progress`: Flag indicating a AOF rewrite operation is on-going +* `aof_rewrite_in_progress`: Flag indicating a AOF rewrite operation is + on-going * `aof_rewrite_scheduled`: Flag indicating an AOF rewrite operation will be scheduled once the on-going RDB save is complete. -* `aof_last_rewrite_time_sec`: Duration of the last AOF rewrite operation in seconds -* `aof_current_rewrite_time_sec`: Duration of the on-going AOF rewrite operation if any +* `aof_last_rewrite_time_sec`: Duration of the last AOF rewrite operation in + seconds +* `aof_current_rewrite_time_sec`: Duration of the on-going AOF rewrite + operation if any * `aof_last_bgrewrite_status`: Status of the last AOF rewrite operation +* `aof_last_write_status`: TBD +* `aof_last_cow_size`: TBD `changes_since_last_save` refers to the number of operations that produced some kind of changes in the dataset since the last time either `SAVE` or @@ -126,12 +163,14 @@ If AOF is activated, these additional fields will be added: will be scheduled once the on-going RDB save is complete. * `aof_buffer_length`: Size of the AOF buffer * `aof_rewrite_buffer_length`: Size of the AOF rewrite buffer -* `aof_pending_bio_fsync`: Number of fsync pending jobs in background I/O queue +* `aof_pending_bio_fsync`: Number of fsync pending jobs in background I/O + queue * `aof_delayed_fsync`: Delayed fsync counter If a load operation is on-going, these additional fields will be added: -* `loading_start_time`: Epoch-based timestamp of the start of the load operation +* `loading_start_time`: Epoch-based timestamp of the start of the load + operation * `loading_total_bytes`: Total file size * `loading_loaded_bytes`: Number of bytes already loaded * `loading_loaded_perc`: Same value expressed as a percentage @@ -139,35 +178,66 @@ If a load operation is on-going, these additional fields will be added: Here is the meaning of all fields in the **stats** section: -* `total_connections_received`: Total number of connections accepted by the server +* `total_connections_received`: Total number of connections accepted by the + server * `total_commands_processed`: Total number of commands processed by the server * `instantaneous_ops_per_sec`: Number of commands processed per second -* `rejected_connections`: Number of connections rejected because of `maxclients` limit +* `total_net_input_bytes`: TBD +* `total_net_output_bytes`: TBD +* `instantaneous_input_kbps`: TBD +* `instantaneous_output_kbps`: TBD +* `rejected_connections`: Number of connections rejected because of + `maxclients` limit +* `sync_full`: TBD +* `sync_partial_ok`: TBD +* `sync_partial_err`: TBD * `expired_keys`: Total number of key expiration events * `evicted_keys`: Number of evicted keys due to `maxmemory` limit * `keyspace_hits`: Number of successful lookup of keys in the main dictionary * `keyspace_misses`: Number of failed lookup of keys in the main dictionary -* `pubsub_channels`: Global number of pub/sub channels with client subscriptions -* `pubsub_patterns`: Global number of pub/sub pattern with client subscriptions +* `pubsub_channels`: Global number of pub/sub channels with client + subscriptions +* `pubsub_patterns`: Global number of pub/sub pattern with client + subscriptions * `latest_fork_usec`: Duration of the latest fork operation in microseconds +* `migrate_cached_sockets`: TBD +* `slave_expires_tracked_keys`: TBD +* `active_defrag_hits`: TBD +* `active_defrag_misses`: TBD +* `active_defrag_key_hits`: TBD +* `active_defrag_key_misses`: TBD Here is the meaning of all fields in the **replication** section: -* `role`: Value is "master" if the instance is slave of no one, or "slave" if the instance is enslaved to a master. - Note that a slave can be master of another slave (daisy chaining). +* `role`: Value is "master" if the instance is slave of no one, or "slave" if + the instance is enslaved to master. + Note that a slave can be master of another slave (daisy chaining). +* `master_replid`: TBD +* `master_replid2`: TBD +* `master_repl_offset`: TBD +* `second_repl_offset`: TBD +* `repl_backlog_active`: TBD +* `repl_backlog_size`: Size in bytes of the replication backlog +* `repl_backlog_first_byte_offset`: TBD +* `repl_backlog_histlen`: TBD If the instance is a slave, these additional fields are provided: * `master_host`: Host or IP address of the master * `master_port`: Master listening TCP port * `master_link_status`: Status of the link (up/down) -* `master_last_io_seconds_ago`: Number of seconds since the last interaction with master +* `master_last_io_seconds_ago`: Number of seconds since the last interaction + with master * `master_sync_in_progress`: Indicate the master is syncing to the slave +* `slave_repl_offset`: TBD +* `slave_priority`: TBD +* `slave_read_only`: TBD If a SYNC operation is on-going, these additional fields are provided: * `master_sync_left_bytes`: Number of bytes left before syncing is complete -* `master_sync_last_io_seconds_ago`: Number of seconds since last transfer I/O during a SYNC operation +* `master_sync_last_io_seconds_ago`: Number of seconds since last transfer I/O + during a SYNC operation If the link between master and slave is down, an additional field is provided: @@ -177,9 +247,14 @@ The following field is always provided: * `connected_slaves`: Number of connected slaves +If the server is configured with the `min-slaves-to-write` directive, an +additional field is provided: + +* `min_slaves_good_slaves`: Number of slaves currently considered good + For each slave, the following line is added: -* `slaveXXX`: id, IP address, port, state +* `slaveXXX`: id, IP address, port, state, offset, lag Here is the meaning of all fields in the **cpu** section: @@ -200,7 +275,8 @@ The **cluster** section currently only contains a unique field: * `cluster_enabled`: Indicate Redis cluster is enabled -The **keyspace** section provides statistics on the main dictionary of each database. +The **keyspace** section provides statistics on the main dictionary of each +database. The statistics are the number of keys, and the number of keys with an expiration. For each database, the following line is added: From 2b42102c3eba3a5ebacab1975261297284d6d50c Mon Sep 17 00:00:00 2001 From: Itamar Haber Date: Sun, 30 Jul 2017 22:55:20 +0300 Subject: [PATCH 0785/2314] Completes INFO gaps --- commands/info.md | 96 ++++++++++++++++++++++++---------------- commands/memory-stats.md | 8 ++-- 2 files changed, 61 insertions(+), 43 deletions(-) diff --git a/commands/info.md b/commands/info.md index 766b0f1711..fc125f4209 100644 --- a/commands/info.md +++ b/commands/info.md @@ -88,24 +88,30 @@ Here is the meaning of all fields in the **memory** section: * `used_memory_rss_human`: Human readable representation of previous value * `used_memory_peak`: Peak memory consumed by Redis (in bytes) * `used_memory_peak_human`: Human readable representation of previous value -* `used_memory_peak_perc`: TBD -* `used_memory_overhead`: TBD -* `used_memory_startup`: TBD -* `used_memory_dataset`: TBD -* `used_memory_dataset_perc`: TBD -* `total_system_memory`: TBD +* `used_memory_peak_perc`: The percentage of `used_memory_peak` out of + `used_memory` +* `used_memory_overhead`: The sum in bytes of all overheads that the server + allocated for managing its internal data structures +* `used_memory_startup`: Initial amount of memory consumed by Redis at startup + in bytes +* `used_memory_dataset`: The size in bytes of the dataset + (`used_memory_overhead` subtracted from `used_memory`) +* `used_memory_dataset_perc`: The percentage of `used_memory_dataset` out of + the net memory usage (`used_memory` minus `used_memory_startup`) +* `total_system_memory`: The total amount of memory that the Redis host has * `total_system_memory_human`: Human readable representation of previous value * `used_memory_lua`: Number of bytes used by the Lua engine * `used_memory_lua_human`: Human readable representation of previous value -* `used_memory_lua:`: TBD -* `used_memory_lua_human`: Human readable representation of previous value -* `maxmemory`: TBD +* `maxmemory`: The value of the `maxmemory` configuration directive * `maxmemory_human`: Human readable representation of previous value -* `maxmemory_policy`: TBD +* `maxmemory_policy`: The value of the `maxmemory-policy` configuration + directive * `mem_fragmentation_ratio`: Ratio between `used_memory_rss` and `used_memory` * `mem_allocator`: Memory allocator, chosen at compile time -* `active_defrag_running`: TBD -* `lazyfree_pending_objects`: TBD +* `active_defrag_running`: Flag indicating if active defragmentation is active +* `lazyfree_pending_objects`: The number of objects waiting to be freed (as a + result of calling `UNLINK`, or `FLUSHDB` and `FLUSHALL` with the **ASYNC** + option) Ideally, the `used_memory_rss` value should be only slightly higher than `used_memory`. @@ -126,6 +132,9 @@ reported by the operating system. It may be due to the fact memory has been used and released by Redis, but not given back to the system. The `used_memory_peak` value is generally useful to check this point. +Additional introspective information about the server's memory can be obtained +by referring to the `MEMORY STATS` command and the `MEMORY DOCTOR`. + Here is the meaning of all fields in the **persistence** section: * `loading`: Flag indicating if the load of a dump file is on-going @@ -137,7 +146,8 @@ Here is the meaning of all fields in the **persistence** section: seconds * `rdb_current_bgsave_time_sec`: Duration of the on-going RDB save operation if any -* `rdb_last_cow_size`: TBD +* `rdb_last_cow_size`: The size in bytes of copy-on-write allocations during + the last RBD save operation * `aof_enabled`: Flag indicating AOF logging is activated * `aof_rewrite_in_progress`: Flag indicating a AOF rewrite operation is on-going @@ -148,8 +158,9 @@ Here is the meaning of all fields in the **persistence** section: * `aof_current_rewrite_time_sec`: Duration of the on-going AOF rewrite operation if any * `aof_last_bgrewrite_status`: Status of the last AOF rewrite operation -* `aof_last_write_status`: TBD -* `aof_last_cow_size`: TBD +* `aof_last_write_status`: Status of the last write operation to the AOF +* `aof_last_cow_size`: The size in bytes of copy-on-write allocations during + the last AOF rewrite operation `changes_since_last_save` refers to the number of operations that produced some kind of changes in the dataset since the last time either `SAVE` or @@ -182,15 +193,15 @@ Here is the meaning of all fields in the **stats** section: server * `total_commands_processed`: Total number of commands processed by the server * `instantaneous_ops_per_sec`: Number of commands processed per second -* `total_net_input_bytes`: TBD -* `total_net_output_bytes`: TBD -* `instantaneous_input_kbps`: TBD -* `instantaneous_output_kbps`: TBD +* `total_net_input_bytes`: The total number of bytes read from the network +* `total_net_output_bytes`: The total number of bytes written to the network +* `instantaneous_input_kbps`: The network's read rate per second in KB/sec +* `instantaneous_output_kbps`: The network's write rate per second in KB/sec * `rejected_connections`: Number of connections rejected because of `maxclients` limit -* `sync_full`: TBD -* `sync_partial_ok`: TBD -* `sync_partial_err`: TBD +* `sync_full`: The number of full resyncs with slaves +* `sync_partial_ok`: The number of accpepted partial resync requests +* `sync_partial_err`: The number of denied partial resync requests * `expired_keys`: Total number of key expiration events * `evicted_keys`: Number of evicted keys due to `maxmemory` limit * `keyspace_hits`: Number of successful lookup of keys in the main dictionary @@ -200,26 +211,33 @@ Here is the meaning of all fields in the **stats** section: * `pubsub_patterns`: Global number of pub/sub pattern with client subscriptions * `latest_fork_usec`: Duration of the latest fork operation in microseconds -* `migrate_cached_sockets`: TBD -* `slave_expires_tracked_keys`: TBD -* `active_defrag_hits`: TBD -* `active_defrag_misses`: TBD -* `active_defrag_key_hits`: TBD -* `active_defrag_key_misses`: TBD +* `migrate_cached_sockets`: The number of sockets open for `MIGRATE` purposes +* `slave_expires_tracked_keys`: The number of keys tracked for expiry purposes + (applicable only to writable slaves) +* `active_defrag_hits`: Number of value reallocations performed by active the + defragmentation process +* `active_defrag_misses`: Number of aborted value reallocations started by the + active defragmentation process +* `active_defrag_key_hits`: Number of keys that were actively defragmented +* `active_defrag_key_misses`: Number of keys that were skipped by the active + defragmentation process Here is the meaning of all fields in the **replication** section: * `role`: Value is "master" if the instance is slave of no one, or "slave" if the instance is enslaved to master. Note that a slave can be master of another slave (daisy chaining). -* `master_replid`: TBD -* `master_replid2`: TBD -* `master_repl_offset`: TBD -* `second_repl_offset`: TBD -* `repl_backlog_active`: TBD -* `repl_backlog_size`: Size in bytes of the replication backlog -* `repl_backlog_first_byte_offset`: TBD -* `repl_backlog_histlen`: TBD +* `master_replid`: The replication ID of the Redis server, if it is a master +* `master_replid2`: The repliation ID of the Redis server's master, if it is + enslaved +* `master_repl_offset`: The server's current replication offset +* `second_repl_offset`: The offset up to which replication IDs are accepted +* `repl_backlog_active`: Flag indicating replication backlog is active +* `repl_backlog_size`: Total size in bytes of the replication backlog buffer +* `repl_backlog_first_byte_offset`: The master offset of the replication + backlog buffer +* `repl_backlog_histlen`: Size in bytes of the data in the replication backlog + buffer If the instance is a slave, these additional fields are provided: @@ -229,9 +247,9 @@ If the instance is a slave, these additional fields are provided: * `master_last_io_seconds_ago`: Number of seconds since the last interaction with master * `master_sync_in_progress`: Indicate the master is syncing to the slave -* `slave_repl_offset`: TBD -* `slave_priority`: TBD -* `slave_read_only`: TBD +* `slave_repl_offset`: The replication offset of the slave instance +* `slave_priority`: The priority of the instance as a candidate for failover +* `slave_read_only`: Flag indicating if the slave is read-only If a SYNC operation is on-going, these additional fields are provided: diff --git a/commands/memory-stats.md b/commands/memory-stats.md index ddc41ac366..17d417a8bf 100644 --- a/commands/memory-stats.md +++ b/commands/memory-stats.md @@ -9,7 +9,7 @@ values. The following metrics are reported: * `total.allocated`: Total number of bytes allocated by Redis using its allocator (see `INFO`'s `used_memory`) * `startup.allocated`: Initial amount of memory consumed by Redis at startup - in bytes + in bytes (see `INFO`'s `used_memory_startup`) * `replication.backlog`: Size in bytes of the replication backlog (see `INFO`'s `repl_backlog_size`) * `clients.slaves`: The total size in bytes of all slaves overheads (output @@ -25,13 +25,13 @@ values. The following metrics are reported: * `overhead.total`: The sum of all overheads, i.e. `startup.allocated`, `replication.backlog`, `clients.slaves`, `clients.normal`, `aof.buffer` and those of the internal data structures that are used in managing the - Redis keyspace + Redis keyspace (see `INFO`'s `used_memory_overhead`) * `keys.count`: The total number of keys stored across all databases in the server * `keys.bytes-per-key`: The ratio between **net memory usage** (`total.allocated` minus `startup.allocated`) and `keys.count` -* `dataset.bytes`: The size in bytes of the dataset (`overhead.total` - subtracted from `total.allocated`) +* `dataset.bytes`: The size in bytes of the dataset, i.e. `overhead.total` + subtracted from `total.allocated` (see `INFO`'s `used_memory_dataset`) * `dataset.percentage`: The percentage of `dataset.bytes` out of the net memory usage * `peak.percentage`: The percentage of `peak.allocated` out of From 5d936774fb67702ea7ede1ff0f25af00eca3ba7e Mon Sep 17 00:00:00 2001 From: Loris Cro Date: Wed, 2 Aug 2017 16:15:05 +0200 Subject: [PATCH 0786/2314] add kristoff-it/redis-cuckoofilter to modules.json (#855) --- modules.json | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/modules.json b/modules.json index e2e9183962..8f1da81990 100644 --- a/modules.json +++ b/modules.json @@ -123,6 +123,15 @@ "description": "Add comment syntax to your redis-cli scripts.", "authors": ["daTokenizer"], "stars": 1 + }, + + { + "name": "redis-cuckoofilter", + "license" : "MIT", + "repository": "https://github.com/kristoff-it/redis-cuckoofilter", + "description": "Hashing-function agnostic Cuckoo filters.", + "authors": ["kristoff-it"], + "stars": 37 } ] From bf2add5b973e7906b200e429dc5727972d9f9e31 Mon Sep 17 00:00:00 2001 From: Marco Cecconi Date: Thu, 3 Aug 2017 10:03:57 +0100 Subject: [PATCH 0787/2314] Update modules.json --- modules.json | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/modules.json b/modules.json index 8f1da81990..882a726884 100644 --- a/modules.json +++ b/modules.json @@ -132,6 +132,14 @@ "description": "Hashing-function agnostic Cuckoo filters.", "authors": ["kristoff-it"], "stars": 37 - } + }, + { + "name": "cthulhu", + "license" : "BSD", + "repository": "https://github.com/sklivvz/cthulhu", + "description": "Extend Redis with JavaScript modules", + "authors": ["sklivvz"], + "stars": 67 + } ] From b53d4ead744aae88d0774ebb2f669602b252cf21 Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 3 Aug 2017 11:11:11 +0200 Subject: [PATCH 0788/2314] Stars count updated in modules.json. Manually... we need a script ASAP. --- modules.json | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/modules.json b/modules.json index 882a726884..aa4a042fad 100644 --- a/modules.json +++ b/modules.json @@ -5,7 +5,7 @@ "repository": "https://github.com/aviggiano/redis-roaring", "description": "Uses the CRoaring library to implement roaring bitmap commands for Redis.", "authors": ["aviggiano"], - "stars": 9 + "stars": 32 }, { @@ -14,7 +14,7 @@ "repository": "https://github.com/brandur/redis-cell", "description": "A Redis module that provides rate limiting in Redis as a single command.", "authors": ["brandur"], - "stars": 164 + "stars": 196 }, { @@ -23,7 +23,7 @@ "repository": "https://github.com/swilly22/redis-module-graph", "description": "A graph database with a Cypher-based querying language", "authors": ["swilly22"], - "stars": 120 + "stars": 170 }, { @@ -32,7 +32,7 @@ "repository": "https://github.com/usmanm/redis-tdigest", "description": "t-digest data structure wich can be used for accurate online accumulation of rank-based statistics such as quantiles and cumulative distribution at a point.", "authors": ["usmanm"], - "stars": 29 + "stars": 36 }, { @@ -41,7 +41,7 @@ "repository": "https://github.com/RedisLabsModules/ReJSON", "description": "A JSON data type for Redis", "authors": ["itamarhaber", "RedisLabs"], - "stars": 271 + "stars": 366 }, { @@ -50,7 +50,7 @@ "repository": "https://github.com/RedisLabsModules/redis-ml", "description": "Machine Learning Model Server", "authors": ["shaynativ", "RedisLabs"], - "stars": 49 + "stars": 99 }, { @@ -59,7 +59,7 @@ "repository": "https://github.com/RedisLabsModules/RediSearch", "description": "Full-Text search over Redis", "authors": ["dvirsky", "RedisLabs"], - "stars": 247 + "stars": 583 }, { @@ -68,7 +68,7 @@ "repository": "https://github.com/RedisLabsModules/topk", "description": "An almost deterministic top k elements counter", "authors": ["itamarhaber", "RedisLabs"], - "stars": 15 + "stars": 16 }, { @@ -77,7 +77,7 @@ "repository": "https://github.com/RedisLabsModules/countminsketch", "description": "An apporximate frequency counter", "authors": ["itamarhaber", "RedisLabs"], - "stars": 15 + "stars": 23 }, { @@ -86,7 +86,7 @@ "repository": "https://github.com/RedisLabsModules/rebloom", "description": "Scalable Bloom filters", "authors": ["mnunberg", "RedisLabs"], - "stars": 6 + "stars": 24 }, { @@ -95,7 +95,7 @@ "repository": "https://github.com/antirez/neural-redis", "description": "Online trainable neural networks as Redis data types.", "authors": ["antirez"], - "stars": 1854 + "stars": 1912 }, { @@ -104,7 +104,7 @@ "repository": "https://github.com/danni-m/redis-timeseries", "description": "Time-series data structure for redis", "authors": ["danni-m"], - "stars": 48 + "stars": 58 }, { @@ -113,7 +113,7 @@ "repository": "https://github.com/TamarLabs/ReDe", "description": "Low Latancy timed queues (Dehydrators) as Redis data types.", "authors": ["daTokenizer"], - "stars": 6 + "stars": 9 }, { @@ -122,7 +122,7 @@ "repository": "https://github.com/picotera/commentDis", "description": "Add comment syntax to your redis-cli scripts.", "authors": ["daTokenizer"], - "stars": 1 + "stars": 2 }, { From 14b5deff42ab31b685c0fabab3c53f23956a3066 Mon Sep 17 00:00:00 2001 From: Thulio Ferraz Assis Date: Mon, 21 Aug 2017 03:51:13 -0700 Subject: [PATCH 0789/2314] Added Session Gate module (#859) --- modules.json | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/modules.json b/modules.json index aa4a042fad..efc523e128 100644 --- a/modules.json +++ b/modules.json @@ -141,5 +141,14 @@ "description": "Extend Redis with JavaScript modules", "authors": ["sklivvz"], "stars": 67 + }, + + { + "name": "Session Gate", + "license": "MIT", + "repository": "https://github.com/f0rmiga/sessiongate", + "description": "Session management with multiple payloads using cryptographically signed tokens.", + "authors": ["f0rmiga"], + "stars": 16 } ] From 39460eac1a4d079f7d938b8e2477e9f0599ecdd8 Mon Sep 17 00:00:00 2001 From: spikefoo Date: Fri, 25 Aug 2017 09:13:52 +0300 Subject: [PATCH 0790/2314] Fix mistake in bitfield.md Make the example command match the explanation. --- commands/bitfield.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands/bitfield.md b/commands/bitfield.md index 3141be9fd5..2f3e7b661a 100644 --- a/commands/bitfield.md +++ b/commands/bitfield.md @@ -2,7 +2,7 @@ The command treats a Redis string as a array of bits, and is capable of addressi `BITFIELD` is able to operate with multiple bit fields in the same command call. It takes a list of operations to perform, and returns an array of replies, where each array matches the corresponding operation in the list of arguments. -For example the following command increments an 8 bit signed integer at bit offset 100, and gets the value of the 4 bit unsigned integer at bit offset 0: +For example the following command increments an 5 bit signed integer at bit offset 100, and gets the value of the 4 bit unsigned integer at bit offset 0: > BITFIELD mykey INCRBY i5 100 1 GET u4 0 1) (integer) 1 From 8c996c7d9992ef85be18c7af56009e19ccf41800 Mon Sep 17 00:00:00 2001 From: Nick Craver Date: Thu, 31 Aug 2017 07:53:37 -0400 Subject: [PATCH 0791/2314] Add Redis 4.0 config link in config docs --- topics/config.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/topics/config.md b/topics/config.md index 374b87c02e..6043faed45 100644 --- a/topics/config.md +++ b/topics/config.md @@ -26,8 +26,9 @@ The list of configuration directives, and their meaning and intended usage is available in the self documented example redis.conf shipped into the Redis distribution. -* The self documented [redis.conf for Redis 3.0](https://raw.githubusercontent.com/antirez/redis/3.0/redis.conf) -* The self documented [redis.conf for Redis 2.8](https://raw.githubusercontent.com/antirez/redis/2.8/redis.conf) +* The self documented [redis.conf for Redis 4.0](https://raw.githubusercontent.com/antirez/redis/4.0/redis.conf). +* The self documented [redis.conf for Redis 3.0](https://raw.githubusercontent.com/antirez/redis/3.0/redis.conf). +* The self documented [redis.conf for Redis 2.8](https://raw.githubusercontent.com/antirez/redis/2.8/redis.conf). * The self documented [redis.conf for Redis 2.6](https://raw.githubusercontent.com/antirez/redis/2.6/redis.conf). * The self documented [redis.conf for Redis 2.4](https://raw.githubusercontent.com/antirez/redis/2.4/redis.conf). From 0ffcd65d8da01934127b17a083d7e2c7b6d7ec84 Mon Sep 17 00:00:00 2001 From: Nick Craver Date: Thu, 31 Aug 2017 07:58:32 -0400 Subject: [PATCH 0792/2314] Add 3.2 config as well (additions since 3.0) This adds on a link to the 3.2 self-documented config. --- topics/config.md | 1 + 1 file changed, 1 insertion(+) diff --git a/topics/config.md b/topics/config.md index 6043faed45..ba8e0e8479 100644 --- a/topics/config.md +++ b/topics/config.md @@ -27,6 +27,7 @@ is available in the self documented example redis.conf shipped into the Redis distribution. * The self documented [redis.conf for Redis 4.0](https://raw.githubusercontent.com/antirez/redis/4.0/redis.conf). +* The self documented [redis.conf for Redis 3.2](https://raw.githubusercontent.com/antirez/redis/3.2/redis.conf). * The self documented [redis.conf for Redis 3.0](https://raw.githubusercontent.com/antirez/redis/3.0/redis.conf). * The self documented [redis.conf for Redis 2.8](https://raw.githubusercontent.com/antirez/redis/2.8/redis.conf). * The self documented [redis.conf for Redis 2.6](https://raw.githubusercontent.com/antirez/redis/2.6/redis.conf). From b37787d40816c7f3925adf3f232c52749292ba63 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=83=91=E6=A0=91=E6=96=B0?= Date: Mon, 4 Sep 2017 01:58:02 +0800 Subject: [PATCH 0793/2314] changed acl-redis library's link (#865) * C++ redis client of acl C++ redis client of acl is very efficient and stable, used by many people in their projects. I've rewrited the summary about this C++ redis client. * modify the description about redis client for C++. * merge clients.json from upstream * changed acl-redis library's link to https://github.com/acl-dev/acl/tree/master/lib_acl_cpp/samples/redis. --- clients.json | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/clients.json b/clients.json index 399eef5ac8..4a960d60f5 100644 --- a/clients.json +++ b/clients.json @@ -836,9 +836,9 @@ { "name": "acl-redis", "language": "C++", - "url": "https://github.com/zhengshuxin/acl/tree/master/lib_acl_cpp/samples/redis", - "repository": "https://github.com/zhengshuxin/acl/tree/master/lib_acl_cpp/include/acl_cpp/redis", - "description": "Standard C++ Redis Client with high performance and stl-like interface, supporting Redis Cluster, thread-safe", + "url": "https://github.com/acl-dev/acl/tree/master/lib_acl_cpp/samples/redis", + "repository": "https://github.com/acl-dev/acl/tree/master/lib_acl_cpp/include/acl_cpp/redis", + "description": "Standard C++ Redis Client with high performance and stl-like interface, supporting Redis Cluster, thread safety", "authors": ["zhengshuxin"], "active": true }, From 37dedd0583b273350579c6003fcdaec61bb6b5c4 Mon Sep 17 00:00:00 2001 From: AGutan Date: Sun, 3 Sep 2017 20:59:14 +0300 Subject: [PATCH 0794/2314] Update Vert.X client repository. (#864) https://github.com/vert-x/mod-redis is deprecated. --- clients.json | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/clients.json b/clients.json index 4a960d60f5..00f4ba9221 100644 --- a/clients.json +++ b/clients.json @@ -269,10 +269,10 @@ }, { - "name": "mod-redis", + "name": "vertx-redis-client", "language": "Java", - "repository": "https://github.com/vert-x/mod-redis", - "description": "Official asynchronous redis.io bus module for Vert.x", + "repository": "https://github.com/vert-x3/vertx-redis-client", + "description": "The Vert.x Redis client provides an asynchronous API to interact with a Redis data-structure server.", "authors": ["pmlopes"] }, From dbed7641efc648be70827ea2dbb481598c6de0ec Mon Sep 17 00:00:00 2001 From: Pavlo Yatsukhnenko Date: Sun, 3 Sep 2017 21:00:24 +0300 Subject: [PATCH 0795/2314] Update client.json (#863) --- clients.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clients.json b/clients.json index 00f4ba9221..a3477f302a 100644 --- a/clients.json +++ b/clients.json @@ -401,7 +401,7 @@ "language": "PHP", "repository": "https://github.com/phpredis/phpredis", "description": "This is a client written in C as a PHP module.", - "authors": ["grumi78", "yowgi"], + "authors": ["grumi78", "yowgi", "yatsukhnenko"], "recommended": true, "active": true }, From 31640aa81acf848aae1a7bf2f116014ac5af2896 Mon Sep 17 00:00:00 2001 From: Sawood Alam Date: Fri, 29 Sep 2017 17:31:34 -0400 Subject: [PATCH 0796/2314] Minor typo correction --- commands/scan.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands/scan.md b/commands/scan.md index 940e11811f..00e2d3b129 100644 --- a/commands/scan.md +++ b/commands/scan.md @@ -72,7 +72,7 @@ However there is a way for the user to tune the order of magnitude of the number ## The COUNT option -While `SCAN` does not provide guarantees about the number of elements returned at every iteration, it is possible to empirically adjust the behavior of `SCAN` using the **COUNT** option. Basically with COUNT the user specified the *amount of work that should be done at every call in order to retrieve elements from the collection*. This is **just an hint** for the implementation, however generally speaking this is what you could expect most of the times from the implementation. +While `SCAN` does not provide guarantees about the number of elements returned at every iteration, it is possible to empirically adjust the behavior of `SCAN` using the **COUNT** option. Basically with COUNT the user specified the *amount of work that should be done at every call in order to retrieve elements from the collection*. This is **just a hint** for the implementation, however generally speaking this is what you could expect most of the times from the implementation. * The default COUNT value is 10. * When iterating the key space, or a Set, Hash or Sorted Set that is big enough to be represented by a hash table, assuming no **MATCH** option is used, the server will usually return *count* or a bit more than *count* elements per call. From 3d74abc68570015cea85c38647cb58b17d71db7b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philipp=20Gill=C3=A9?= Date: Sat, 7 Oct 2017 16:36:51 +0200 Subject: [PATCH 0797/2314] Remove word (#872) --- topics/persistence.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/topics/persistence.md b/topics/persistence.md index 33b55d269f..ef75f689f8 100644 --- a/topics/persistence.md +++ b/topics/persistence.md @@ -270,8 +270,7 @@ of money to spend we'll review the most interesting disaster recovery techniques that don't have too high costs. * Amazon S3 and other similar services are a good way for mounting your disaster recovery system. Simply transfer your daily or hourly RDB snapshot to S3 in an encrypted form. You can encrypt your data using `gpg -c` (in symmetric encryption mode). Make sure to store your password in many different safe places (for instance give a copy to the most important people of your organization). It is recommended to use multiple storage services for improved data safety. -* Transfer your snapshots using SCP (part of SSH) to far servers. This is a fairly simple and safe route: get a small VPS in a place that is very far from you, install ssh there, and generate an ssh client key without passphrase, then make -add it in the authorized_keys file of your small VPS. You are ready to transfer +* Transfer your snapshots using SCP (part of SSH) to far servers. This is a fairly simple and safe route: get a small VPS in a place that is very far from you, install ssh there, and generate an ssh client key without passphrase, then add it in the authorized_keys file of your small VPS. You are ready to transfer backups in an automated fashion. Get at least two VPS in two different providers for best results. From 03d8d0d051340ed5d295c771b4a9b949a97e13bb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philipp=20Gill=C3=A9?= Date: Sat, 7 Oct 2017 22:34:15 +0200 Subject: [PATCH 0798/2314] Fix typo (#873) --- topics/cluster-tutorial.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/cluster-tutorial.md b/topics/cluster-tutorial.md index 05b869c47a..6f6dac2e07 100644 --- a/topics/cluster-tutorial.md +++ b/topics/cluster-tutorial.md @@ -129,7 +129,7 @@ In our example cluster with nodes A, B, C, if node B fails the cluster is not able to continue, since we no longer have a way to serve hash slots in the range 5501-11000. -However when the cluster is created (or at a latter time) we add a slave +However when the cluster is created (or at a later time) we add a slave node to every master, so that the final cluster is composed of A, B, C that are masters nodes, and A1, B1, C1 that are slaves nodes, the system is able to continue if node B fails. From 32ab6e583f167b23368e47deb7e8b70c33ab0f49 Mon Sep 17 00:00:00 2001 From: Yannick Francois Date: Fri, 20 Oct 2017 16:53:28 +0200 Subject: [PATCH 0799/2314] Improve keys examples In order to reduce confusion between values and keys and _key index_ we replace example with one and 1 (and _1)_) with name, age and some stuff around people. --- commands/keys.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/commands/keys.md b/commands/keys.md index 670bce96ac..186caca66c 100644 --- a/commands/keys.md +++ b/commands/keys.md @@ -33,8 +33,8 @@ Use `\` to escape special characters if you want to match them verbatim. @examples ```cli -MSET one 1 two 2 three 3 four 4 -KEYS *o* -KEYS t?? +MSET firstname Jack lastname Stuntman age 35 +KEYS *name* +KEYS a?? KEYS * ``` From 918c3a2344c0b26b6887bd6bc21a53d07e2e02ec Mon Sep 17 00:00:00 2001 From: Simone Mosciatti Date: Wed, 1 Nov 2017 12:55:32 +0100 Subject: [PATCH 0800/2314] add redisql --- modules.json | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/modules.json b/modules.json index efc523e128..2c4398a6e3 100644 --- a/modules.json +++ b/modules.json @@ -150,5 +150,14 @@ "description": "Session management with multiple payloads using cryptographically signed tokens.", "authors": ["f0rmiga"], "stars": 16 + }, + + { + "name": "rediSQL", + "license": "AGPL-3.0", + "repository": "https://github.com/RedBeardLab/rediSQL", + "description": "A redis module that provide full SQL capabilities embeding SQLite", + "authors": ["siscia", "RedBeardLab"], + "stars": 409 } ] From 1f5313889fa930c184ce68e49a01c99f33f382c4 Mon Sep 17 00:00:00 2001 From: Simone Mosciatti Date: Wed, 1 Nov 2017 12:57:40 +0100 Subject: [PATCH 0801/2314] fix typo --- modules.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules.json b/modules.json index 2c4398a6e3..d0d0a83d93 100644 --- a/modules.json +++ b/modules.json @@ -156,7 +156,7 @@ "name": "rediSQL", "license": "AGPL-3.0", "repository": "https://github.com/RedBeardLab/rediSQL", - "description": "A redis module that provide full SQL capabilities embeding SQLite", + "description": "A redis module that provide full SQL capabilities embedding SQLite", "authors": ["siscia", "RedBeardLab"], "stars": 409 } From 09eff6fb90c6a9538e52a0aa30566fab0dff8633 Mon Sep 17 00:00:00 2001 From: antirez Date: Fri, 3 Nov 2017 22:43:07 +0100 Subject: [PATCH 0802/2314] Remove SWAPDB example from cli block. --- commands/swapdb.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands/swapdb.md b/commands/swapdb.md index 1613316f44..ead2db07c0 100644 --- a/commands/swapdb.md +++ b/commands/swapdb.md @@ -12,6 +12,6 @@ This will swap database 0 with database 1. All the clients connected with databa @examples -```cli +``` SWAPDB 0 1 ``` From 356227e9a2e862741c912cfd66e5e4b6936218ce Mon Sep 17 00:00:00 2001 From: Itamar Haber Date: Sun, 5 Nov 2017 19:31:56 +0200 Subject: [PATCH 0803/2314] Fixes a typo (#878) --- topics/pipelining.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/pipelining.md b/topics/pipelining.md index a05db1fc81..24c86b33ba 100644 --- a/topics/pipelining.md +++ b/topics/pipelining.md @@ -68,7 +68,7 @@ you can perform per second in a given Redis server. This is the result of the fact that, without using pipelining, serving each command is very cheap from the point of view of accessing the data structures and producing the reply, but it is very costly from the point of view of doing the socket I/O. This -involes calling the `read()` and `write()` syscall, that means going from user +involves calling the `read()` and `write()` syscall, that means going from user land to kernel land. The context switch is a huge speed penalty. When pipelining is used, many commands are usually read with a single `read()` From 4232c9e9e15bb01dbe1041c323ca6dd34a23cac8 Mon Sep 17 00:00:00 2001 From: David Szkiba Date: Sun, 5 Nov 2017 18:32:43 +0100 Subject: [PATCH 0804/2314] Update data-types.md (#868) Looks like a typo. --- topics/data-types.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/data-types.md b/topics/data-types.md index 285488fbe2..7b96dcb7e9 100644 --- a/topics/data-types.md +++ b/topics/data-types.md @@ -77,7 +77,7 @@ The max number of members in a set is 2^32 - 1 (4294967295, more than 4 billion You can do many interesting things using Redis Sets, for instance you can: * You can track unique things using Redis Sets. Want to know all the unique IP addresses visiting a given blog post? Simply use [SADD](/commands/sadd) every time you process a page view. You are sure repeated IPs will not be inserted. -* Redis Sets are good to represent relations. You can create a tagging system with Redis using a Set to represent every tag. Then you can add all the IDs of all the objects having a given tag into a Set representing this particular tag, using the [SADD](/commands/sadd) command. Do you want all the IDs of all the Objects having a three different tags at the same time? Just use [SINTER](/commands/sinter). +* Redis Sets are good to represent relations. You can create a tagging system with Redis using a Set to represent every tag. Then you can add all the IDs of all the objects having a given tag into a Set representing this particular tag, using the [SADD](/commands/sadd) command. Do you want all the IDs of all the Objects having three different tags at the same time? Just use [SINTER](/commands/sinter). * You can use Sets to extract elements at random using the [SPOP](/commands/spop) or [SRANDMEMBER](/commands/srandmember) commands. From c6c9cbbd05e638f376ce50817346ed5aae3ebf21 Mon Sep 17 00:00:00 2001 From: Mikhail Vasin Date: Thu, 9 Nov 2017 11:13:00 +0300 Subject: [PATCH 0805/2314] Make clear that redis-cli won't UNSUBSCRIBE (#880) It's pretty frustrating to follow the docs, type in UNSUBSCRIBE and see nothing in redis-cli. See https://stackoverflow.com/questions/17621371/redis-unsubscribe --- topics/pubsub.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/topics/pubsub.md b/topics/pubsub.md index f20284cf94..059e644fbb 100644 --- a/topics/pubsub.md +++ b/topics/pubsub.md @@ -30,6 +30,9 @@ message. The commands that are allowed in the context of a subscribed client are `SUBSCRIBE`, `PSUBSCRIBE`, `UNSUBSCRIBE`, `PUNSUBSCRIBE`, `PING` and `QUIT`. +Please not that `redis-cli` will not accept any commands once in +subscribed mode and can only quit the mode with `Ctrl-C`. + ## Format of pushed messages A message is a @array-reply with three elements. From 4ccaf10902674647c955026400e6ebfcdbaee7a9 Mon Sep 17 00:00:00 2001 From: gu Date: Thu, 9 Nov 2017 09:13:33 +0100 Subject: [PATCH 0806/2314] erroneous twitter account for spade client (#879) Hi @antirez, please accept the PR, my twitter nick was changed, the old one is pointing to someone else. Thanks --- clients.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clients.json b/clients.json index a3477f302a..256e5ea2f1 100644 --- a/clients.json +++ b/clients.json @@ -717,7 +717,7 @@ "language": "Node.js", "repository": "https://github.com/rootslab/spade", "description": "♠ Spade, a full-featured modular client for node.", - "authors": ["rootslab"], + "authors": ["44gtti"], "recommended": false, "active": true }, From 44bbfd0ccc8742da2d5fac7f804527b2d6a1884d Mon Sep 17 00:00:00 2001 From: Itamar Haber Date: Thu, 23 Nov 2017 21:29:59 +0200 Subject: [PATCH 0807/2314] Adds the `OBJECT FREQ` subcommand --- commands/object.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/commands/object.md b/commands/object.md index 7820db602f..14c9e613be 100644 --- a/commands/object.md +++ b/commands/object.md @@ -16,7 +16,11 @@ The `OBJECT` command supports multiple sub commands: * `OBJECT IDLETIME ` returns the number of seconds since the object stored at the specified key is idle (not requested by read or write operations). While the value is returned in seconds the actual resolution of this timer is - 10 seconds, but may vary in future implementations. + 10 seconds, but may vary in future implementations. This subcommand is + available when `maxmemory-policy` is set to an LRU policy or `noeviction`. + * `OBJECT FREQ ` returns the inverse logarithmic access frequency counter + of the object stored at the specified key. This subcommand is available when + `maxmemory-policy` is set to an LFU policy. Objects can be encoded in different ways: From cdedfccb80c2eb7541c4998ea5966d7e033de52a Mon Sep 17 00:00:00 2001 From: Itamar Haber Date: Mon, 27 Nov 2017 17:49:01 +0200 Subject: [PATCH 0808/2314] Fixes typo Thanks to @boughtonp for nailing it - https://github.com/antirez/redis-doc/pull/851#pullrequestreview-79207859 --- commands/info.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands/info.md b/commands/info.md index fc125f4209..4d6ff88528 100644 --- a/commands/info.md +++ b/commands/info.md @@ -200,7 +200,7 @@ Here is the meaning of all fields in the **stats** section: * `rejected_connections`: Number of connections rejected because of `maxclients` limit * `sync_full`: The number of full resyncs with slaves -* `sync_partial_ok`: The number of accpepted partial resync requests +* `sync_partial_ok`: The number of accepted partial resync requests * `sync_partial_err`: The number of denied partial resync requests * `expired_keys`: Total number of key expiration events * `evicted_keys`: Number of evicted keys due to `maxmemory` limit From dc402c61da3c015e49eb6fd00e4a0564d122d645 Mon Sep 17 00:00:00 2001 From: Itamar Haber Date: Mon, 27 Nov 2017 18:05:14 +0200 Subject: [PATCH 0809/2314] Adds `HELP` subcommand, removes incorrect use of 'inverse' (#884) --- commands/object.md | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/commands/object.md b/commands/object.md index 14c9e613be..1528510db8 100644 --- a/commands/object.md +++ b/commands/object.md @@ -18,9 +18,10 @@ The `OBJECT` command supports multiple sub commands: While the value is returned in seconds the actual resolution of this timer is 10 seconds, but may vary in future implementations. This subcommand is available when `maxmemory-policy` is set to an LRU policy or `noeviction`. - * `OBJECT FREQ ` returns the inverse logarithmic access frequency counter - of the object stored at the specified key. This subcommand is available when - `maxmemory-policy` is set to an LFU policy. + * `OBJECT FREQ ` returns the logarithmic access frequency counter of the + object stored at the specified key. This subcommand is available when + `maxmemory-policy` is set to an LFU policy. + * `OBJECT HELP` returns a succint help text. Objects can be encoded in different ways: From 2d05ed1bf26b583109b9cc626c7538181ae419a4 Mon Sep 17 00:00:00 2001 From: Eugene Ponizovsky Date: Fri, 1 Dec 2017 18:10:54 +0300 Subject: [PATCH 0810/2314] Added Redis Cluster client Redis::ClusterRider for Perl --- clients.json | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/clients.json b/clients.json index 256e5ea2f1..d494dbe4e6 100644 --- a/clients.json +++ b/clients.json @@ -360,6 +360,16 @@ "active": true }, + { + "name": "Redis::ClusterRider", + "language": "Perl", + "url": "http://search.cpan.org/dist/Redis-ClusterRider/", + "repository": " https://github.com/iph0/Redis-ClusterRider", + "description": "Daring Redis Cluster client", + "authors": ["iph0"], + "active": true + }, + { "name": "AnyEvent::Hiredis", "language": "Perl", From 7238695449a2cabe40d731c56a94140cac0a858e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Florian=20H=C3=A4mmerle?= Date: Sun, 17 Dec 2017 21:16:20 +0100 Subject: [PATCH 0811/2314] fix a small typo --- topics/data-types-intro.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/data-types-intro.md b/topics/data-types-intro.md index 15d2ab8f1f..fbf03b4d08 100644 --- a/topics/data-types-intro.md +++ b/topics/data-types-intro.md @@ -954,7 +954,7 @@ is a trivial example of `BITCOUNT` call: > bitcount key (integer) 2 -Common user cases for bitmaps are: +Common use cases for bitmaps are: * Real time analytics of all kinds. * Storing space efficient but high performance boolean information associated with object IDs. From f75291ceed98c4e5a85caf993cad0598480dca91 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Florian=20H=C3=A4mmerle?= Date: Mon, 18 Dec 2017 09:30:01 +0100 Subject: [PATCH 0812/2314] Fix a Small Typo (#891) --- topics/data-types-intro.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/data-types-intro.md b/topics/data-types-intro.md index fbf03b4d08..7e7431ad9b 100644 --- a/topics/data-types-intro.md +++ b/topics/data-types-intro.md @@ -327,7 +327,7 @@ pop. If we try to pop yet another element, this is the result we get: > rpop mylist (nil) -Redis returned a NULL value to signal that there are no elements into the +Redis returned a NULL value to signal that there are no elements in the list. Common use cases for lists From 69867c03b1747494b549e4fe53aac535e2702fc0 Mon Sep 17 00:00:00 2001 From: Louis Morgan Date: Sat, 23 Dec 2017 11:59:31 +0000 Subject: [PATCH 0813/2314] Fix formatting in OBJECT documentation Remove leading spaces from the documentation for `OBJECT FREQ` and `OBJECT HELP`. These were previously in a nested `