From 73c1143de7701ad8501e729d406dd4c7ba14c964 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ronny=20Lo=CC=81pez?= Date: Tue, 15 Jan 2013 22:57:53 +0100 Subject: [PATCH 0001/2497] Fixed command BITCOUNT arguments. --- commands.json | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/commands.json b/commands.json index 1fc881e944..bff4fdbb2e 100644 --- a/commands.json +++ b/commands.json @@ -45,14 +45,9 @@ "type": "key" }, { - "name": "start", - "type": "integer", - "optional": true - }, - { - "name": "end", - "type": "integer", - "optional": true + "name": ["start", "end"], + "type": ["integer", "integer"], + "multiple": true } ], "since": "2.6.0", From 49da29860dae9730861ab1e1dd8774490fe53546 Mon Sep 17 00:00:00 2001 From: John Weir Date: Thu, 17 Jan 2013 14:49:32 -0500 Subject: [PATCH 0002/2497] Document Pub/Sub and db number scope --- topics/pubsub.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/topics/pubsub.md b/topics/pubsub.md index 9765f47efb..d8772db905 100644 --- a/topics/pubsub.md +++ b/topics/pubsub.md @@ -49,6 +49,16 @@ issued by another client. The second element is the name of the originating channel, and the third argument is the actual message payload. +## Database & Scoping + +Pub/Sub has no relation to the key space. It was made to not interfere with +it on any level, including database numbers. + +Publishing on db 10, will be heard on by a subscriber on db 1. + +If you need scoping of some kind, prefix the channels with the name of the +environment (test, staging, production, ...). + ## Wire protocol example SUBSCRIBE first second From 3e00d2f5c1cda5a712e3d5d331990c108da3fad9 Mon Sep 17 00:00:00 2001 From: george Date: Fri, 8 Feb 2013 23:43:02 +0900 Subject: [PATCH 0003/2497] add examples for set-related store commands --- commands/sdiffstore.md | 13 +++++++++++++ commands/sinterstore.md | 13 +++++++++++++ commands/sunionstore.md | 13 +++++++++++++ 3 files changed, 39 insertions(+) diff --git a/commands/sdiffstore.md b/commands/sdiffstore.md index db95908556..e941016742 100644 --- a/commands/sdiffstore.md +++ b/commands/sdiffstore.md @@ -6,3 +6,16 @@ If `destination` already exists, it is overwritten. @return @integer-reply: the number of elements in the resulting set. + +@examples + +```cli +SADD key1 "a" +SADD key1 "b" +SADD key1 "c" +SADD key2 "c" +SADD key2 "d" +SADD key2 "e" +SDIFFSTORE key key1 key2 +SMEMBERS key +``` diff --git a/commands/sinterstore.md b/commands/sinterstore.md index 26d6e3f381..17dd0bf0b4 100644 --- a/commands/sinterstore.md +++ b/commands/sinterstore.md @@ -6,3 +6,16 @@ If `destination` already exists, it is overwritten. @return @integer-reply: the number of elements in the resulting set. + +@examples + +```cli +SADD key1 "a" +SADD key1 "b" +SADD key1 "c" +SADD key2 "c" +SADD key2 "d" +SADD key2 "e" +SINTERSTORE key key1 key2 +SMEMBERS key +``` diff --git a/commands/sunionstore.md b/commands/sunionstore.md index f3bf959c5d..74df06071f 100644 --- a/commands/sunionstore.md +++ b/commands/sunionstore.md @@ -6,3 +6,16 @@ If `destination` already exists, it is overwritten. @return @integer-reply: the number of elements in the resulting set. + +@examples + +```cli +SADD key1 "a" +SADD key1 "b" +SADD key1 "c" +SADD key2 "c" +SADD key2 "d" +SADD key2 "e" +SINTERSTORE key key1 key2 +SMEMBERS key +``` From ef0e67c09af63dc8b808c620fccff24c0b11e661 Mon Sep 17 00:00:00 2001 From: george Date: Sat, 16 Feb 2013 12:04:44 +0900 Subject: [PATCH 0004/2497] fix typo - s/an hash/a hash --- topics/memory-optimization.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/topics/memory-optimization.md b/topics/memory-optimization.md index 5b5bea9621..e2efeee280 100644 --- a/topics/memory-optimization.md +++ b/topics/memory-optimization.md @@ -45,10 +45,10 @@ where values can just be just strings, that is not just more memory efficient than Redis plain keys but also much more memory efficient than memcached. Let's start with some fact: a few keys use a lot more memory than a single key -containing an hash with a few fields. How is this possible? We use a trick. +containing a hash with a few fields. How is this possible? We use a trick. In theory in order to guarantee that we perform lookups in constant time (also known as O(1) in big O notation) there is the need to use a data structure -with a constant time complexity in the average case, like an hash table. +with a constant time complexity in the average case, like a hash table. But many times hashes contain just a few fields. When hashes are small we can instead just encode them in an O(N) data structure, like a linear @@ -60,7 +60,7 @@ it contains will grow too much (you can configure the limit in redis.conf). This does not work well just from the point of view of time complexity, but also from the point of view of constant times, since a linear array of key value pairs happens to play very well with the CPU cache (it has a better -cache locality than an hash table). +cache locality than a hash table). However since hash fields and values are not (always) represented as full featured Redis objects, hash fields can't have an associated time to live @@ -168,7 +168,7 @@ of your keys and values: hash-max-zipmap-value 1024 -Every time an hash will exceed the number of elements or element size specified +Every time a hash will exceed the number of elements or element size specified it will be converted into a real hash table, and the memory saving will be lost. You may ask, why don't you do this implicitly in the normal key space so that From 5a6f0f49eb5ad20ac664355fc6c77820b3b81a83 Mon Sep 17 00:00:00 2001 From: 0x20h Date: Fri, 15 Mar 2013 00:46:32 +0100 Subject: [PATCH 0005/2497] fixed typo --- commands/object.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/commands/object.md b/commands/object.md index c0b9a1f709..87aee77de6 100644 --- a/commands/object.md +++ b/commands/object.md @@ -37,14 +37,14 @@ Objects can be encoded in different ways: sets of any size. All the specially encoded types are automatically converted to the general type -once you perform an operation that makes it no possible for Redis to retain the +once you perform an operation that makes it impossible for Redis to retain the space saving encoding. @return Different return values are used for different subcommands. -* Subcommands `refcount` and `idletime` returns integers. +* Subcommands `refcount` and `idletime` return integers. * Subcommand `encoding` returns a bulk reply. If the object you try to inspect is missing, a null bulk reply is returned. From 98438f66a3b71bb208b5455fb2bb3105bd236695 Mon Sep 17 00:00:00 2001 From: Sandeep Shetty Date: Tue, 16 Apr 2013 17:37:09 +0530 Subject: [PATCH 0006/2497] Added phpish/redis --- clients.json | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/clients.json b/clients.json index 327a74c93c..33b7e4da66 100644 --- a/clients.json +++ b/clients.json @@ -362,7 +362,15 @@ "description": "Lightweight, standalone, unit-tested fork of Redisent which wraps phpredis for best performance if available.", "authors": ["colinmollenhour"] }, - + + { + "name": "phpish/redis", + "language": "PHP", + "repository": "https://github.com/phpish/redis", + "description": "Simple Redis client in PHP", + "authors": ["sandeepshetty"] + }, + { "name": "redis-py", "language": "Python", From 9e8163f89085aadf6f6b9be1b351c8c4092e68b8 Mon Sep 17 00:00:00 2001 From: Victor Deryagin Date: Tue, 14 May 2013 11:23:46 +0300 Subject: [PATCH 0007/2497] Fixed typo in partitioning.md --- topics/partitioning.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/partitioning.md b/topics/partitioning.md index c21f342036..7d663407f0 100644 --- a/topics/partitioning.md +++ b/topics/partitioning.md @@ -50,7 +50,7 @@ Some features of Redis don't play very well with partitioning: Data store or cache? --- -Partitioning when using Redis ad a data store or cache is conceptually the same, however there is a huge difference. While when Redis is used as a data store you need to be sure that a given key always maps to the same instance, when Redis is used as a cache if a given node is unavailable it is not a big problem if we start using a different node, altering the key-instance map as we wish to improve the *availability* of the system (that is, the ability of the system to reply to our queries). +Partitioning when using Redis as a data store or cache is conceptually the same, however there is a huge difference. While when Redis is used as a data store you need to be sure that a given key always maps to the same instance, when Redis is used as a cache if a given node is unavailable it is not a big problem if we start using a different node, altering the key-instance map as we wish to improve the *availability* of the system (that is, the ability of the system to reply to our queries). Consistent hashing implementations are often able to switch to other nodes if the preferred node for a given key is not available. Similarly if you add a new node, part of the new keys will start to be stored on the new node. From 14cc15a04a74c63864b1c96fc83c8936cb4dc04d Mon Sep 17 00:00:00 2001 From: BB Date: Sat, 18 May 2013 08:43:46 +0200 Subject: [PATCH 0008/2497] Added Redis client for Rebol. --- clients.json | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/clients.json b/clients.json index 327a74c93c..c0caf92da5 100644 --- a/clients.json +++ b/clients.json @@ -399,6 +399,14 @@ "active": true }, + { + "name": "prot-redis", + "language": "Rebol", + "repository": "https://github.com/rebolek/prot-redis", + "description": "Redis network scheme for Rebol 3", + "authors": ["rebolek"] + }, + { "name": "scala-redis", "language": "Scala", From 9db568250965da21e47258e1292757414c7f556b Mon Sep 17 00:00:00 2001 From: Tianon Gravi Date: Thu, 4 Jul 2013 14:56:29 -0600 Subject: [PATCH 0009/2497] Swapped MojoX::Redis for Mojo::Redis MojoX::Redis is deprecated in favor of Mojo::Redis --- clients.json | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/clients.json b/clients.json index 327a74c93c..53b6642e31 100644 --- a/clients.json +++ b/clients.json @@ -291,12 +291,12 @@ }, { - "name": "MojoX::Redis", + "name": "Mojo::Redis", "language": "Perl", - "url": "http://search.cpan.org/dist/MojoX-Redis", - "repository": "https://github.com/und3f/mojox-redis", + "url": "http://search.cpan.org/dist/Mojo-Redis", + "repository": "https://github.com/marcusramberg/mojo-redis", "description": "asynchronous Redis client for Mojolicious", - "authors": ["und3f"], + "authors": ["und3f", "marcusramberg", "jhthorsen"], "active": true }, From 7a87240ed0e105906d7005874df0e9142f2aafb2 Mon Sep 17 00:00:00 2001 From: Philipp Klose Date: Fri, 26 Jul 2013 02:16:15 +0200 Subject: [PATCH 0010/2497] Haxe was renamed Haxe was renamed. From "haXe" to "Haxe". --- clients.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clients.json b/clients.json index 327a74c93c..1a7d9e6d33 100644 --- a/clients.json +++ b/clients.json @@ -489,7 +489,7 @@ { "name": "hxneko-redis", - "language": "haXe", + "language": "Haxe", "url": "http://code.google.com/p/hxneko-redis", "repository": "http://code.google.com/p/hxneko-redis/source/browse", "description": "", From ae80f66def21b68498f5c592d974cb3bdc1196fb Mon Sep 17 00:00:00 2001 From: sugelav Date: Sat, 27 Jul 2013 23:25:51 +0530 Subject: [PATCH 0011/2497] Added entry for aredis java client in clients.json. --- clients.json | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/clients.json b/clients.json index 327a74c93c..df24918f70 100644 --- a/clients.json +++ b/clients.json @@ -212,6 +212,14 @@ "active": true }, + { + "name": "aredis", + "language": "Java", + "repository": "http://aredis.sourceforge.net/", + "description": "Asynchronous, pipelined client based on Java 7 NIO Channel API", + "authors": ["msuresh"] + }, + { "name": "redis-lua", "language": "Lua", From 209a76a7270d85a84452b5cfd2580cdc9fe314b1 Mon Sep 17 00:00:00 2001 From: sugelav Date: Sun, 28 Jul 2013 12:57:02 +0530 Subject: [PATCH 0012/2497] Minor change to the description of aredis. --- clients.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clients.json b/clients.json index df24918f70..00fd367596 100644 --- a/clients.json +++ b/clients.json @@ -216,7 +216,7 @@ "name": "aredis", "language": "Java", "repository": "http://aredis.sourceforge.net/", - "description": "Asynchronous, pipelined client based on Java 7 NIO Channel API", + "description": "Asynchronous, pipelined client based on the Java 7 NIO Channel API", "authors": ["msuresh"] }, From 380858ed6c4ab72c86959626c94887ac462da40f Mon Sep 17 00:00:00 2001 From: sugelav Date: Mon, 29 Jul 2013 22:27:12 +0530 Subject: [PATCH 0013/2497] Blanked out author tag for aredis in clients.json since author msuresh does not have a twitter account. --- clients.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clients.json b/clients.json index 00fd367596..63dc9d9ee5 100644 --- a/clients.json +++ b/clients.json @@ -217,7 +217,7 @@ "language": "Java", "repository": "http://aredis.sourceforge.net/", "description": "Asynchronous, pipelined client based on the Java 7 NIO Channel API", - "authors": ["msuresh"] + "authors": [] }, { From 8e6266bb33edba5e81afd765ccdf2ca6bf7cbf63 Mon Sep 17 00:00:00 2001 From: Shawn Milochik Date: Fri, 9 Aug 2013 17:04:35 -0400 Subject: [PATCH 0014/2497] typo & grammar fixes, other minor edits Typos: (quite vs quiet, text vs test) A couple of capitalization fixes. A few small English grammar improvements. --- topics/benchmarks.md | 58 ++++++++++++++++++++++---------------------- 1 file changed, 29 insertions(+), 29 deletions(-) diff --git a/topics/benchmarks.md b/topics/benchmarks.md index b645f4ea28..e95a3f35a2 100644 --- a/topics/benchmarks.md +++ b/topics/benchmarks.md @@ -27,7 +27,7 @@ The following options are supported: -q Quiet. Just show query/sec values --csv Output in CSV format -l Loop. Run the tests forever - -t Only run the comma separated list of tests. The test + -t Only run the comma-separated list of tests. The test names are the same as the ones produced as output. -I Idle mode. Just open N idle connections and wait. @@ -51,7 +51,7 @@ like in the following example: LPUSH: 79239.30 requests per second In the above example we asked to just run test the SET and LPUSH commands, -in quite mode (see the `-q` switch). +in quiet mode (see the `-q` switch). It is also possible to specify the command to benchmark directly like in the following example: @@ -64,11 +64,11 @@ Selecting the size of the key space By default the benchmark runs against a single key. In Redis the difference between such a synthetic benchmark and a real one is not huge since it is an -in memory system, however it is possible to stress cache misses and in general +in-memory system, however it is possible to stress cache misses and in general to simulate a more real-world work load by using a large key space. This is obtained by using the `-r` switch. For instance if I want to run -one million of SET operations, using a random key for every operation out of +one million SET operations, using a random key for every operation out of 100k possible keys, I'll use the following command line: $ redis-cli flushall @@ -110,7 +110,7 @@ pipeling of 16 commands: SET: 403063.28 requests per second GET: 508388.41 requests per second -Using pipelining resulted into a sensible amount of more commands processed. +Using pipelining results in a significant increase in performance. Pitfalls and misconceptions --------------------------- @@ -124,8 +124,8 @@ in account. + Redis is a server: all commands involve network or IPC roundtrips. It is meaningless to compare it to embedded data stores such as SQLite, Berkeley DB, -Tokyo/Kyoto Cabinet, etc ... because the cost of most operations is precisely -dominated by network/protocol management. +Tokyo/Kyoto Cabinet, etc ... because the cost of most operations is +primarily in network/protocol management. + Redis commands return an acknowledgment for all usual commands. Some other data stores do not (for instance MongoDB does not implicitly acknowledge write operations). Comparing Redis to stores involving one-way queries is only @@ -136,7 +136,7 @@ you need multiple connections (like redis-benchmark) and/or to use pipelining to aggregate several commands and/or multiple threads or processes. + Redis is an in-memory data store with some optional persistency options. If you plan to compare it to transactional servers (MySQL, PostgreSQL, etc ...), -then you should consider activating AOF and decide of a suitable fsync policy. +then you should consider activating AOF and decide on a suitable fsync policy. + Redis is a single-threaded server. It is not designed to benefit from multiple CPU cores. People are supposed to launch several Redis instances to scale out on several cores if needed. It is not really fair to compare one @@ -184,7 +184,7 @@ memcached (dormando) developers. You can see that in the end, the difference between the two solutions is not so staggering, once all technical aspects are considered. Please note both -Redis and memcached have been optimized further after these benchmarks ... +Redis and memcached have been optimized further after these benchmarks. Finally, when very efficient servers are benchmarked (and stores like Redis or memcached definitely fall in this category), it may be difficult to saturate @@ -198,7 +198,7 @@ Factors impacting Redis performance There are multiple factors having direct consequences on Redis performance. We mention them here, since they can alter the result of any benchmarks. Please note however, that a typical Redis instance running on a low end, -non tuned, box usually provides good enough performance for most applications. +untuned box usually provides good enough performance for most applications. + Network bandwidth and latency usually have a direct impact on the performance. It is a good practice to use the ping program to quickly check the latency @@ -207,7 +207,7 @@ Regarding the bandwidth, it is generally useful to estimate the throughput in Gbits/s and compare it to the theoretical bandwidth of the network. For instance a benchmark setting 4 KB strings in Redis at 100000 q/s, would actually consume 3.2 Gbits/s of bandwidth -and probably fit with a 10 GBits/s link, but not a 1 Gbits/s one. In many real +and probably fit within a 10 GBits/s link, but not a 1 Gbits/s one. In many real world scenarios, Redis throughput is limited by the network well before being limited by the CPU. To consolidate several high-throughput Redis instances on a single server, it worth considering putting a 10 Gbits/s NIC @@ -215,24 +215,24 @@ or multiple 1 Gbits/s NICs with TCP/IP bonding. + CPU is another very important factor. Being single-threaded, Redis favors fast CPUs with large caches and not many cores. At this game, Intel CPUs are currently the winners. It is not uncommon to get only half the performance on -an AMD Opteron CPU compared to similar Nehalem EP/Westmere EP/Sandy bridge +an AMD Opteron CPU compared to similar Nehalem EP/Westmere EP/Sandy Bridge Intel CPUs with Redis. When client and server run on the same box, the CPU is the limiting factor with redis-benchmark. + Speed of RAM and memory bandwidth seem less critical for global performance especially for small objects. For large objects (>10 KB), it may become -noticeable though. Usually, it is not really cost effective to buy expensive +noticeable though. Usually, it is not really cost-effective to buy expensive fast memory modules to optimize Redis. -+ Redis runs slower on a VM. Virtualization toll is quite high because ++ Redis runs slower on a VM. The virtualization toll is quite high because for many common operations, Redis does not add much overhead on top of the required system calls and network interruptions. Prefer to run Redis on a physical box, especially if you favor deterministic latencies. On a state-of-the-art hypervisor (VMWare), result of redis-benchmark on a VM -through the physical network is almost divided by 2 compared to the +through the physical network is almost cut in half compared to the physical machine, with some significant CPU time spent in system and interruptions. + When the server and client benchmark programs run on the same box, both -the TCP/IP loopback and unix domain sockets can be used. It depends on the -platform, but unix domain sockets can achieve around 50% more throughput than +the TCP/IP loopback and unix domain sockets can be used. Depending on the +platform, unix domain sockets can achieve around 50% more throughput than the TCP/IP loopback (on Linux for instance). The default behavior of redis-benchmark is to use the TCP/IP loopback. + The performance benefit of unix domain sockets compared to TCP/IP loopback @@ -247,7 +247,7 @@ See the graph below. + On multi CPU sockets servers, Redis performance becomes dependant on the NUMA configuration and process location. The most visible effect is that -redis-benchmark results seem non deterministic because client and server +redis-benchmark results seem non-deterministic because client and server processes are distributed randomly on the cores. To get deterministic results, it is required to use process placement tools (on Linux: taskset or numactl). The most efficient combination is always to put the client and server on two @@ -260,7 +260,7 @@ Please note this benchmark is not meant to compare CPU models between themselves ![NUMA chart](https://github.com/dspezia/redis-doc/raw/6374a07f93e867353e5e946c1e39a573dfc83f6c/topics/NUMA_chart.gif) + With high-end configurations, the number of client connections is also an -important factor. Being based on epoll/kqueue, Redis event loop is quite +important factor. Being based on epoll/kqueue, the Redis event loop is quite scalable. Redis has already been benchmarked at more than 60000 connections, and was still able to sustain 50000 q/s in these conditions. As a rule of thumb, an instance with 30000 connections can only process half the throughput @@ -278,7 +278,7 @@ Jumbo frames may also provide a performance boost when large objects are used. + Depending on the platform, Redis can be compiled against different memory allocators (libc malloc, jemalloc, tcmalloc), which may have different behaviors in term of raw speed, internal and external fragmentation. -If you did not compile Redis by yourself, you can use the INFO command to check +If you did not compile Redis yourself, you can use the INFO command to check the mem_allocator field. Please note most benchmarks do not run long enough to generate significant external fragmentation (contrary to production Redis instances). @@ -289,7 +289,7 @@ Other things to consider One important goal of any benchmark is to get reproducible results, so they can be compared to the results of other tests. -+ A good practice is to try to run tests on isolated hardware as far as possible. ++ A good practice is to try to run tests on isolated hardware as much as possible. If it is not possible, then the system must be monitored to check the benchmark is not impacted by some external activity. + Some configurations (desktops and laptops for sure, some servers as well) @@ -300,8 +300,8 @@ reproducible results, it is better to set the highest possible fixed frequency for all the CPU cores involved in the benchmark. + An important point is to size the system accordingly to the benchmark. The system must have enough RAM and must not swap. On Linux, do not forget -to set the overcommit_memory parameter correctly. Please note 32 and 64 bits -Redis instances have not the same memory footprint. +to set the overcommit_memory parameter correctly. Please note 32 and 64 bit +Redis instances do not have the same memory footprint. + If you plan to use RDB or AOF for your benchmark, please check there is no other I/O activity in the system. Avoid putting RDB or AOF files on NAS or NFS shares, or on any other devices impacting your network bandwidth and/or latency @@ -312,13 +312,13 @@ the generated log file on a remote filesystem. instance using INFO at regular interval to gather statistics is probably fine, but MONITOR will impact the measured performance significantly. -# Benchmark results on different virtualized and bare metal servers. +# Benchmark results on different virtualized and bare-metal servers. * The test was done with 50 simultaneous clients performing 2 million requests. * Redis 2.6.14 is used for all the tests. -* Test executed using the loopback interface. -* Test executed using a key space of 1 million keys. -* Test executed with and without pipelining (16 commands pipeline). +* Test was executed using the loopback interface. +* Test was executed using a key space of 1 million keys. +* Test was executed with and without pipelining (16 commands pipeline). **Intel(R) Xeon(R) CPU E5520 @ 2.27GHz (with pipelining)** @@ -447,7 +447,7 @@ will output the following: LPUSH: 34803.41 requests per second LPOP: 37367.20 requests per second -Another one using a 64 bit box, a Xeon L5420 clocked at 2.5 GHz: +Another one using a 64-bit box, a Xeon L5420 clocked at 2.5 GHz: $ ./redis-benchmark -q -n 100000 PING: 111731.84 requests per second @@ -463,7 +463,7 @@ Another one using a 64 bit box, a Xeon L5420 clocked at 2.5 GHz: * Redis version **2.4.2** * Default number of connections, payload size = 256 * The Linux box is running *SLES10 SP3 2.6.16.60-0.54.5-smp*, CPU is 2 x *Intel X5670 @ 2.93 GHz*. -* Text executed while running redis server and benchmark client on the same CPU, but different cores. +* Test executed while running Redis server and benchmark client on the same CPU, but different cores. Using a unix domain socket: From 464e917b5c104d62d30aee601e6f3f91adc10805 Mon Sep 17 00:00:00 2001 From: Amber Jain Date: Thu, 15 Aug 2013 19:16:13 +0530 Subject: [PATCH 0015/2497] fixed typos in http://redis.io/topics/quickstart --- topics/quickstart.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/topics/quickstart.md b/topics/quickstart.md index c0dc6f80b3..5de6b92cf5 100644 --- a/topics/quickstart.md +++ b/topics/quickstart.md @@ -28,19 +28,19 @@ In order to compile Redis follow this simple steps: cd redis-stable make -At this point you can try if your build works correctly typing **make test**, but this is an optional step. After the compilation the **src** directory inside the Redis distribution is populated with the different executables that are part of Redis: +At this point you can try if your build works correctly by typing **make test**, but this is an optional step. After the compilation the **src** directory inside the Redis distribution is populated with the different executables that are part of Redis: * **redis-server** is the Redis Server itself. * **redis-cli** is the command line interface utility to talk with Redis. * **redis-benchmark** is used to check Redis performances. * **redis-check-aof** and **redis-check-dump** are useful in the rare event of corrupted data files. -It is a good idea to copy both the Redis server than the command line interface in proper places using the following commands: +It is a good idea to copy both the Redis server and the command line interface in proper places using the following commands: * sudo cp redis-server /usr/local/bin/ * sudo cp redis-cli /usr/local/bin/ -In the following documentation I assume that /usr/local/bin is in your PATH environment variable so you can execute both the binaries without specifying the full path. +In the following documentation I assume that /usr/local/bin is in your PATH environment variable so that you can execute both the binaries without specifying the full path. Starting Redis === @@ -114,7 +114,7 @@ commands calling methods. A short interactive example using Ruby: Redis persistence ================= -You can learn [how Redis persisence works in this page](http://redis.io/topics/persistence), however what is important to understand for a quick start is that by default, if you start Redis with the default configuration, Redis will spontaneously save the dataset only from time to time (for instance after at least five minutes if you have at least 100 changes in your data), so if you want your database to persist and be reloaded after a restart make sure to call the **SAVE** command manually every time you want to force a data set snapshot. Otherwise make sure to shutdown the database using the **SHUTDOWN** command: +You can learn [how Redis persisence works on this page](http://redis.io/topics/persistence), however what is important to understand for a quick start is that by default, if you start Redis with the default configuration, Redis will spontaneously save the dataset only from time to time (for instance after at least five minutes if you have at least 100 changes in your data), so if you want your database to persist and be reloaded after a restart make sure to call the **SAVE** command manually every time you want to force a data set snapshot. Otherwise make sure to shutdown the database using the **SHUTDOWN** command: $ redis-cli shutdown @@ -182,5 +182,5 @@ Make sure that everything is working as expected: * Check that your Redis instance is correctly logging in the log file. * If it's a new machine where you can try it without problems make sure that after a reboot everything is still working. -Note: in the above instructions we skipped many Redis configurations parameters that you would like to change, for instance in order to use AOF persistence instead of RDB persistence, or to setup replication, and so forth. +Note: In the above instructions we skipped many Redis configuration parameters that you would like to change, for instance in order to use AOF persistence instead of RDB persistence, or to setup replication, and so forth. Make sure to read the redis.conf file (that is heavily commented) and the other documentation you can find in this web site for more information. From 4424e5354cedd12057d33a809556396ac7bc643b Mon Sep 17 00:00:00 2001 From: Matteo Centenaro Date: Fri, 23 Aug 2013 18:17:40 +0200 Subject: [PATCH 0016/2497] The redhatvm cited article have a known bug The "Understanding Virtual Memory" article cited when motivating the setting for overcommit_memory had the meaning of the values 1 and 2 reversed. I found it while reading this comment http://superuser.com/a/200504. With this commit, I'm trying to make this known to the Redis FAQ reader. The proc(5) man page has it pretty clear: /proc/sys/vm/overcommit_memory This file contains the kernel virtual memory accounting mode. Values are: 0: heuristic overcommit (this is the default) 1: always overcommit, never check 2: always check, never overcommit In mode 0, calls of mmap(2) with MAP_NORESERVE are not checked, and the default check is very weak, leading to the risk of getting a process "OOM-killed". Under Linux 2.4 any nonzero value implies mode 1. In mode 2 (available since Linux 2.6), the total virtual address space on the system is limited to (SS + RAM*(r/100)), where SS is the size of the swap space, and RAM is the size of the physical memory, and r is the contents of the file /proc/sys/vm/overcommit_ratio. --- topics/faq.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/topics/faq.md b/topics/faq.md index dfb42a0665..154c1cbf16 100644 --- a/topics/faq.md +++ b/topics/faq.md @@ -252,8 +252,12 @@ more optimistic allocation fashion, and this is indeed what you want for Redis. A good source to understand how Linux Virtual Memory work and other alternatives for `overcommit_memory` and `overcommit_ratio` is this classic from Red Hat Magazine, ["Understanding Virtual Memory"][redhatvm]. +Beware, this article had 1 and 2 configurtation value for `overcommit_memory` +reversed: reffer to the ["proc(5)"][proc5] man page for the right meaning of the +available values. [redhatvm]: http://www.redhat.com/magazine/001nov04/features/vm/ +[proc5]: http://man7.org/linux/man-pages/man5/proc.5.html ## Are Redis on disk snapshots atomic? From a7a6c8751c6a798713a2d4e35c07fc1141c2c642 Mon Sep 17 00:00:00 2001 From: Matteo Centenaro Date: Fri, 23 Aug 2013 18:23:26 +0200 Subject: [PATCH 0017/2497] Remove " araund proc(5) --- topics/faq.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/faq.md b/topics/faq.md index 154c1cbf16..5ea626080a 100644 --- a/topics/faq.md +++ b/topics/faq.md @@ -253,7 +253,7 @@ A good source to understand how Linux Virtual Memory work and other alternatives for `overcommit_memory` and `overcommit_ratio` is this classic from Red Hat Magazine, ["Understanding Virtual Memory"][redhatvm]. Beware, this article had 1 and 2 configurtation value for `overcommit_memory` -reversed: reffer to the ["proc(5)"][proc5] man page for the right meaning of the +reversed: reffer to the [proc(5)][proc5] man page for the right meaning of the available values. [redhatvm]: http://www.redhat.com/magazine/001nov04/features/vm/ From 0c9c73adbf7d10d3d6768df122dfdb5fe3f86bb3 Mon Sep 17 00:00:00 2001 From: Matteo Centenaro Date: Fri, 23 Aug 2013 18:25:19 +0200 Subject: [PATCH 0018/2497] FIX: typos here and there - configurtation -> configuration - reffer -> refer --- topics/faq.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/topics/faq.md b/topics/faq.md index 5ea626080a..c7294947ec 100644 --- a/topics/faq.md +++ b/topics/faq.md @@ -252,8 +252,8 @@ more optimistic allocation fashion, and this is indeed what you want for Redis. A good source to understand how Linux Virtual Memory work and other alternatives for `overcommit_memory` and `overcommit_ratio` is this classic from Red Hat Magazine, ["Understanding Virtual Memory"][redhatvm]. -Beware, this article had 1 and 2 configurtation value for `overcommit_memory` -reversed: reffer to the [proc(5)][proc5] man page for the right meaning of the +Beware, this article had 1 and 2 configuration value for `overcommit_memory` +reversed: refer to the [proc(5)][proc5] man page for the right meaning of the available values. [redhatvm]: http://www.redhat.com/magazine/001nov04/features/vm/ From 9ebac39d49f576a4e14ff9a4ffe642fb48aa68ba Mon Sep 17 00:00:00 2001 From: Matteo Centenaro Date: Fri, 23 Aug 2013 18:28:43 +0200 Subject: [PATCH 0019/2497] Format option values as code --- topics/faq.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/faq.md b/topics/faq.md index c7294947ec..9e53d4bde0 100644 --- a/topics/faq.md +++ b/topics/faq.md @@ -252,7 +252,7 @@ more optimistic allocation fashion, and this is indeed what you want for Redis. A good source to understand how Linux Virtual Memory work and other alternatives for `overcommit_memory` and `overcommit_ratio` is this classic from Red Hat Magazine, ["Understanding Virtual Memory"][redhatvm]. -Beware, this article had 1 and 2 configuration value for `overcommit_memory` +Beware, this article had `1` and `2` configuration values for `overcommit_memory` reversed: refer to the [proc(5)][proc5] man page for the right meaning of the available values. From 4e1f3f4700417ad6ffe1bae8f25abf5737156ecc Mon Sep 17 00:00:00 2001 From: bmatte Date: Mon, 26 Aug 2013 10:53:37 +0200 Subject: [PATCH 0020/2497] Typos. --- topics/releases.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/releases.md b/topics/releases.md index bdaf616fe9..33ddcbf2b8 100644 --- a/topics/releases.md +++ b/topics/releases.md @@ -62,7 +62,7 @@ Stable releases follow the usual `major.minor.patch` versioning schema, with the * The minor is even in stable versions of Redis. * The minor is odd in unstable, development, frozen, release candidates. For instance the unstable version of 2.8.x will have a version number in the form 2.7.x. In general the unstable version of x.y.z will have a version x.(y-1).z. -* As an unstable version of Redis progresses, the patchlevel is incremented from time to time, so at a given time you may have 2.7.2, and later 2.7.3 and so forth. However when the release candidate state is reached, the patchlevel starts tfrom 101. So for instance 2.7.101 is the first release candidate for 2.8, 2.7.105 is Release Candidate 5, and so forth. +* As an unstable version of Redis progresses, the patchlevel is incremented from time to time, so at a given time you may have 2.7.2, and later 2.7.3 and so forth. However when the release candidate state is reached, the patchlevel starts from 101. So for instance 2.7.101 is the first release candidate for 2.8, 2.7.105 is Release Candidate 5, and so forth. Support --- From 7b327da4837ffb5d913c20d90375e1a53425bd5e Mon Sep 17 00:00:00 2001 From: Lucas Chi Date: Tue, 1 Oct 2013 22:51:01 -0400 Subject: [PATCH 0021/2497] grammar fixes and rewording in persistence docs --- topics/persistence.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/persistence.md b/topics/persistence.md index 731236e61d..d9710e11fc 100644 --- a/topics/persistence.md +++ b/topics/persistence.md @@ -40,7 +40,7 @@ AOF disadvantages * AOF files are usually bigger than the equivalent RDB files for the same dataset. * AOF can be slower then RDB depending on the exact fsync policy. In general with fsync set to *every second* performances are still very high, and with fsync disabled it should be exactly as fast as RDB even under high load. Still RDB is able to provide more guarantees about the maximum latency even in the case of an huge write load. -* In the past we experienced rare bugs in specific commands (for instance there was one involving blocking commands like BRPOPLPUSH) causing the AOF produced to don't reproduce exactly the same dataset on reloading. This bugs are rare and we have tests in the test suite creating random complex datasets automatically and reloading them to check everything is ok, but this kind of bugs are almost impossible with RDB persistence. To make this point more clear: the Redis AOF works incrementally updating an existing state, like MySQL or MongoDB does, while the RDB snapshotting creates everything from scratch again and again, that is conceptually more robust. However 1) It should be noted that every time the AOF is rewritten by Redis it is recreated from scratch starting from the actual data contained in the data set, making resistance to bugs stronger compared to an always appending AOF file (or one rewritten reading the old AOF instead of reading the data in memory). 2) We never had a single report from users about an AOF corruption that was detected in the real world. +* In the past we've experienced rare bugs in specific commands (for instance there was one involving blocking commands like BRPOPLPUSH) causing the AOF to inaccurately reproduce the dataset on recovery. These bugs are rare and are almost impossible with RDB persistence. To make this point more clear: the Redis AOF works by incrementally updating an existing state, like MySQL or MongoDB, while RDB snapshotting is conceptually more robust because it recreates the snapshot from scratch each time. It should be noted that every time the AOF is rewritten it is recreated from scratch using the actual data contained in the dataset and therefore is more robust when compared to a perpetually appending AOF file (or one that is rewritten by reading the old AOF instead of reading the data in memory). To date, there has never been a single report from users about an AOF corruption that was detected in the real world. Ok, so what should I use? --- From bb259e1b7469722ed19741bf11bc59c33b0e801e Mon Sep 17 00:00:00 2001 From: Lucas Chi Date: Tue, 1 Oct 2013 23:05:13 -0400 Subject: [PATCH 0022/2497] update readme with parse task dependencies --- README.md | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/README.md b/README.md index 2ee41c8696..d51fb29f99 100644 --- a/README.md +++ b/README.md @@ -111,6 +111,15 @@ You can do this by running Rake inside your working directory. $ rake parse ``` +The parse task has the following dependencies: + +* batch +* rdiscount + +``` +gem install batch rdiscount +``` + Additionally, if you have [Aspell][han] installed, you can spell check the documentation: From f10af03633762961af88cecafd8e99b77e502270 Mon Sep 17 00:00:00 2001 From: Alexandre Curreli Date: Thu, 10 Oct 2013 11:41:30 -0400 Subject: [PATCH 0023/2497] Added scredis to Scala clients --- clients.json | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/clients.json b/clients.json index 327a74c93c..1c6ae47be7 100644 --- a/clients.json +++ b/clients.json @@ -669,5 +669,13 @@ "repository": "https://github.com/ctstone/csredis", "description": "Async (and sync) client for Redis and Sentinel", "authors": ["ctnstone"] + }, + + { + "name": "scredis", + "language": "Scala", + "repository": "https://github.com/Livestream/scredis", + "description": "Advanced async (and sync) client entirely written in Scala. Extensively used in production at http://www.livestream.com", + "authors": ["Livestream"] } ] From bc4cf8dd95fc32119d4f4cfb0c72e4301a3a8f3f Mon Sep 17 00:00:00 2001 From: Hugo Lopes Tavares Date: Wed, 23 Oct 2013 11:51:29 -0400 Subject: [PATCH 0024/2497] Add note to restart redis after `vm.overcommit_memory` changes Redis needs to be restarted after a sysctl `vm.overcommit_memory` change to take effect. --- topics/admin.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/admin.md b/topics/admin.md index 1baa94adf9..98a2da2fe0 100644 --- a/topics/admin.md +++ b/topics/admin.md @@ -8,7 +8,7 @@ Redis setup hints ----------------- + We suggest deploying Redis using the **Linux operating system**. Redis is also tested heavily on osx, and tested from time to time on FreeBSD and OpenBSD systems. However Linux is where we do all the major stress testing, and where most production deployments are working. -+ Make sure to set the Linux kernel **overcommit memory setting to 1**. Add `vm.overcommit_memory = 1` to `/etc/sysctl.conf` and then reboot or run the command `sysctl vm.overcommit_memory=1` for this to take effect immediately. ++ Make sure to set the Linux kernel **overcommit memory setting to 1**. Add `vm.overcommit_memory = 1` to `/etc/sysctl.conf` and then reboot the machine, or run the command `sysctl vm.overcommit_memory=1` and restart Redis for this to take effect immediately. + Make sure to **setup some swap** in your system (we suggest as much as swap as memory). If Linux does not have swap and your Redis instance accidentally consumes too much memory, either Redis will crash for out of memory or the Linux kernel OOM killer will kill the Redis process. + If you are using Redis in a very write-heavy application, while saving an RDB file on disk or rewriting the AOF log **Redis may use up to 2 times the memory normally used**. The additional memory used is proportional to the number of memory pages modified by writes during the saving process, so it is often proportional to the number of keys (or aggregate types items) touched during this time. Make sure to size your memory accordingly. + Even if you have persistence disabled, Redis will need to perform RDB saves if you use replication. From 04a7a2aad4010eea1c33319e5e5959de80c32973 Mon Sep 17 00:00:00 2001 From: Xavier Shay Date: Sun, 3 Nov 2013 06:51:39 -0800 Subject: [PATCH 0025/2497] Minor typo fix in protocol documentation. --- topics/protocol.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/topics/protocol.md b/topics/protocol.md index 032e3b921b..44cc02f75f 100644 --- a/topics/protocol.md +++ b/topics/protocol.md @@ -70,7 +70,7 @@ possible to detect the kind of reply from the first byte sent by the server: * In an Error Reply the first byte of the reply is "-" * In an Integer Reply the first byte of the reply is ":" * In a Bulk Reply the first byte of the reply is "$" -* In a Multi Bulk Reply the first byte of the reply s "`*`" +* In a Multi Bulk Reply the first byte of the reply is "`*`" From 8d45b0d13bc1ed18ef704224ada3c7a2d0a4156e Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Sat, 9 Nov 2013 13:54:55 +0100 Subject: [PATCH 0026/2497] Documented included libraries and added examples. --- commands/eval.md | 84 ++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 82 insertions(+), 2 deletions(-) diff --git a/commands/eval.md b/commands/eval.md index 606ea4a9c6..d00771c127 100644 --- a/commands/eval.md +++ b/commands/eval.md @@ -481,14 +481,94 @@ The Redis Lua interpreter loads the following Lua libraries: * string lib. * math lib. * debug lib. +* struct lib. * cjson lib. * cmsgpack lib. +* redis.sha1hex function. Every Redis instance is _guaranteed_ to have all the above libraries so you can be sure that the environment for your Redis scripts is always the same. -The CJSON library provides extremely fast JSON maniplation within Lua. -All the other libraries are standard Lua libraries. +struct, CJSON and cmsgpack are external libraries, all the other libraries are standard +Lua libraries. + +### struct + +struct is a library for packing/unpacking structures within Lua. + +``` +Valid formats: +> - big endian +< - little endian +![num] - alignment +x - pading +b/B - signed/unsigned byte +h/H - signed/unsigned short +l/L - signed/unsigned long +T - size_t +i/In - signed/unsigned integer with size `n' (default is size of int) +cn - sequence of `n' chars (from/to a string); when packing, n==0 means + the whole string; when unpacking, n==0 means use the previous + read number as the string length +s - zero-terminated string +f - float +d - double +' ' - ignored +``` + + +Example: + +``` +127.0.0.1:6379> eval 'return struct.pack("HH", 1, 2)' 0 +"\x01\x00\x02\x00" +3) (integer) 5 +127.0.0.1:6379> eval 'return {struct.unpack("HH", ARGV[1])}' 0 "\x01\x00\x02\x00" +1) (integer) 1 +2) (integer) 2 +3) (integer) 5 +127.0.0.1:6379> eval 'return struct.size("HH")' 0 +(integer) 4 +``` + +### CJSON + +The CJSON library provides extremely fast JSON manipulation within Lua. + +Example: + +``` +redis 127.0.0.1:6379> eval 'return cjson.encode({["foo"]= "bar"})' 0 +"{\"foo\":\"bar\"}" +redis 127.0.0.1:6379> eval 'return cjson.decode(ARGV[1])["foo"]' 0 "{\"foo\":\"bar\"}" +"bar" +``` + +### cmsgpack + +The cmsgpack library provides simple and fast MessagePack manipulation within Lua. + +Example: + +``` +127.0.0.1:6379> eval 'return cmsgpack.pack({"foo", "bar", "baz"})' 0 +"\x93\xa3foo\xa3bar\xa3baz" +127.0.0.1:6379> eval 'return cmsgpack.unpack(ARGV[1])' 0 "\x93\xa3foo\xa3bar\xa3baz +1) "foo" +2) "bar" +3) "baz" +``` + +### redis.sha1hex + +Perform the SHA1 of the input string. + +Example: + +``` +127.0.0.1:6379> eval 'return redis.sha1hex(ARGV[1])' 0 "foo" +"0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33" +``` ## Emitting Redis logs from scripts From a838e74d857ca49a9549cb171c47d11f54fad517 Mon Sep 17 00:00:00 2001 From: Jan-Erik Rediger Date: Tue, 12 Nov 2013 16:56:24 +0100 Subject: [PATCH 0027/2497] Documented that migrate options are only available in 2.8 Fixes antirez/redis#506 --- commands/migrate.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/commands/migrate.md b/commands/migrate.md index 775d1ea6fc..cee852393b 100644 --- a/commands/migrate.md +++ b/commands/migrate.md @@ -42,6 +42,8 @@ On success OK is returned. * `COPY` -- Do not remove the key from the local instance. * `REPLACE` -- Replace existing key on the remote instance. +`COPY` and `REPLACE` are added in 2.8 and not available in 2.6 + @return @status-reply: The command returns OK on success. From 62a3bffe65aecce387067d84a704a7a76d21b81e Mon Sep 17 00:00:00 2001 From: Sean Charles Date: Sun, 24 Nov 2013 19:26:47 +0000 Subject: [PATCH 0028/2497] Added GNU Prolog client information --- clients.json | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/clients.json b/clients.json index 327a74c93c..6bdef60f2f 100644 --- a/clients.json +++ b/clients.json @@ -669,5 +669,13 @@ "repository": "https://github.com/ctstone/csredis", "description": "Async (and sync) client for Redis and Sentinel", "authors": ["ctnstone"] + }, + + { + "name": "gnuprolog-redisclient", + "language": "GNU Prolog", + "repository": "https://github.com/emacstheviking/gnuprolog-redisclient", + "description": "Simple Redis client for GNU Prolog in native Prolog, no FFI, libraries etc.", + "authors": ["seancharles"] } ] From e0342264cd24c49bddfe3a1f5dbab7e34856a406 Mon Sep 17 00:00:00 2001 From: Curtis Maloney Date: Tue, 26 Nov 2013 10:20:25 +1100 Subject: [PATCH 0029/2497] Minor grammar corrections --- topics/persistence.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/topics/persistence.md b/topics/persistence.md index 731236e61d..42cd83ee55 100644 --- a/topics/persistence.md +++ b/topics/persistence.md @@ -39,8 +39,10 @@ AOF disadvantages --- * AOF files are usually bigger than the equivalent RDB files for the same dataset. -* AOF can be slower then RDB depending on the exact fsync policy. In general with fsync set to *every second* performances are still very high, and with fsync disabled it should be exactly as fast as RDB even under high load. Still RDB is able to provide more guarantees about the maximum latency even in the case of an huge write load. -* In the past we experienced rare bugs in specific commands (for instance there was one involving blocking commands like BRPOPLPUSH) causing the AOF produced to don't reproduce exactly the same dataset on reloading. This bugs are rare and we have tests in the test suite creating random complex datasets automatically and reloading them to check everything is ok, but this kind of bugs are almost impossible with RDB persistence. To make this point more clear: the Redis AOF works incrementally updating an existing state, like MySQL or MongoDB does, while the RDB snapshotting creates everything from scratch again and again, that is conceptually more robust. However 1) It should be noted that every time the AOF is rewritten by Redis it is recreated from scratch starting from the actual data contained in the data set, making resistance to bugs stronger compared to an always appending AOF file (or one rewritten reading the old AOF instead of reading the data in memory). 2) We never had a single report from users about an AOF corruption that was detected in the real world. +* AOF can be slower than RDB depending on the exact fsync policy. In general with fsync set to *every second* performances are still very high, and with fsync disabled it should be exactly as fast as RDB even under high load. Still RDB is able to provide more guarantees about the maximum latency even in the case of an huge write load. +* In the past we experienced rare bugs in specific commands (for instance there was one involving blocking commands like BRPOPLPUSH) causing the AOF produced to not reproduce exactly the same dataset on reloading. This bugs are rare and we have tests in the test suite creating random complex datasets automatically and reloading them to check everything is ok, but this kind of bugs are almost impossible with RDB persistence. To make this point more clear: the Redis AOF works incrementally updating an existing state, like MySQL or MongoDB does, while the RDB snapshotting creates everything from scratch again and again, that is conceptually more robust. However - + 1) It should be noted that every time the AOF is rewritten by Redis it is recreated from scratch starting from the actual data contained in the data set, making resistance to bugs stronger compared to an always appending AOF file (or one rewritten reading the old AOF instead of reading the data in memory). + 2) We never had a single report from users about an AOF corruption that was detected in the real world. Ok, so what should I use? --- From 50a9f98a8822b4267786d147ff5adadb225179d0 Mon Sep 17 00:00:00 2001 From: Lennie Date: Sun, 15 Dec 2013 10:06:53 +0100 Subject: [PATCH 0030/2497] Update link to example 2.8 redis.conf in config set command documentation Maybe it is time to update the documentation to point to the example redis.conf of the stable (2.8) version. --- commands/config set.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands/config set.md b/commands/config set.md index fb75449e82..f556b9a3d2 100644 --- a/commands/config set.md +++ b/commands/config set.md @@ -14,7 +14,7 @@ All the supported parameters have the same meaning of the equivalent configuration parameter used in the [redis.conf][hgcarr22rc] file, with the following important differences: -[hgcarr22rc]: http://github.com/antirez/redis/raw/2.2/redis.conf +[hgcarr22rc]: http://github.com/antirez/redis/raw/2.8/redis.conf * Where bytes or other quantities are specified, it is not possible to use the `redis.conf` abbreviated form (10k 2gb ... and so forth), everything From 4a39bc329a6a518a1c9351a55dde112b2b41d037 Mon Sep 17 00:00:00 2001 From: Sasan Rose Date: Mon, 23 Dec 2013 23:09:11 +0330 Subject: [PATCH 0031/2497] Multi-server functionality added to PHPRedMin --- tools.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools.json b/tools.json index c85a0e6d4e..f8b5e88d00 100644 --- a/tools.json +++ b/tools.json @@ -279,7 +279,7 @@ "name": "PHPRedMin", "language": "PHP", "repository": "https://github.com/sasanrose/phpredmin", - "description": "Yet another web interface for Redis", + "description": "Yet another web interface for Redis with multi-server support", "authors": ["sasanrose"] }, { From 3e2df64b262e75c8d29cd4bd8df3f26dadf33e9a Mon Sep 17 00:00:00 2001 From: Michael Neumann Date: Sat, 4 Jan 2014 11:39:54 +0100 Subject: [PATCH 0032/2497] Add rust-redis client for Rust language --- clients.json | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/clients.json b/clients.json index e18661b340..2c6e44cca2 100644 --- a/clients.json +++ b/clients.json @@ -686,5 +686,14 @@ "repository": "https://github.com/chrisdinn/brando", "description": "A Redis client written with the Akka IO package introduced in Akka 2.2.", "authors": ["chrisdinn"] + }, + + { + "name": "rust-redis", + "language": "Rust", + "repository": "https://github.com/mneumann/rust-redis", + "description": "A Rust client library for Redis.", + "authors": ["mneumann"], + "active": true } ] From 4953dcd08081e320eeaf9f8b71402c2b9f108c64 Mon Sep 17 00:00:00 2001 From: Carlos Nieto Date: Sun, 5 Jan 2014 13:02:19 -0600 Subject: [PATCH 0033/2497] Updating description and url. --- clients.json | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/clients.json b/clients.json index e18661b340..f2f9f29ad1 100644 --- a/clients.json +++ b/clients.json @@ -141,7 +141,8 @@ "name": "gosexy/redis", "language": "Go", "repository": "https://github.com/gosexy/redis", - "description": "Go bindings for the official C redis client (hiredis), supports the whole command set of redis 2.6.10 and subscriptions with go channels.", + "url": "https://menteslibres.net/gosexy/redis", + "description": "A Go client for redis built on top of the hiredis C client. Supports non-blocking connections and channel-based subscriptions.", "authors": ["xiam"], "active": true }, @@ -671,7 +672,7 @@ "description": "Thread-safe client supporting async usage and key/value codecs", "authors": ["ar3te"] }, - + { "name": "csredis", "language": "C#", From 54997964be3219b396b787ad11b07ecee00bb28d Mon Sep 17 00:00:00 2001 From: Austin McKinley Date: Tue, 7 Jan 2014 12:26:31 -0800 Subject: [PATCH 0034/2497] fixing doc typos --- topics/replication.md | 82 +++++++++++++++++++++---------------------- 1 file changed, 40 insertions(+), 42 deletions(-) diff --git a/topics/replication.md b/topics/replication.md index 3191fc7c5e..5b70a2e1d8 100644 --- a/topics/replication.md +++ b/topics/replication.md @@ -6,46 +6,47 @@ replication that allows slave Redis servers to be exact copies of master servers. The following are some very important facts about Redis replication: -* Redis uses asynchronous replication. Starting with Redis 2.8 there is however a periodic (one time every second) acknowledge of the replication stream processed by slaves. +* Redis uses asynchronous replication. Starting with Redis 2.8, however, slaves +will periodically acknowledge the replication stream. * A master can have multiple slaves. -* Slaves are able to accept other slaves connections. Aside from +* Slaves are able to accept connections from other slaves. Aside from connecting a number of slaves to the same master, slaves can also be connected to other slaves in a graph-like structure. -* Redis replication is non-blocking on the master side, this means that -the master will continue to serve queries when one or more slaves perform -the first synchronization. +* Redis replication is non-blocking on the master side. This means that +the master will continue to handle queries when one or more slaves perform +the initial synchronization. -* Replication is non blocking on the slave side: while the slave is performing -the first synchronization it can reply to queries using the old version of -the data set, assuming you configured Redis to do so in redis.conf. -Otherwise you can configure Redis slaves to send clients an error if the -link with the master is down. However there is a moment where the old dataset must be deleted and the new one must be loaded by the slave where it will block incoming connections. +* Replication is also non-blocking on the slave side. While the slave is performing +the initial synchronization, it can handle queries using the old version of +the dataset, assuming you configured Redis to do so in redis.conf. +Otherwise, you can configure Redis slaves to return an error to clients if the +replication stream is down. However, after the initial sync, the old dataset +must be deleted and the new one must be loaded. The slave will block incoming +connections during this brief window. -* Replications can be used both for scalability, in order to have +* Replication can be used both for scalability, in order to have multiple slaves for read-only queries (for example, heavy `SORT` -operations can be offloaded to slaves, or simply for data redundancy. +operations can be offloaded to slaves), or simply for data redundancy. -* It is possible to use replication to avoid the saving process on the -master side: just configure your master redis.conf to avoid saving -(just comment all the "save" directives), then connect a slave +* It is possible to use replication to avoid the cost of writing the master +write the full dataset to disk: just configure your master redis.conf to avoid +saving (just comment all the "save" directives), then connect a slave configured to save from time to time. How Redis replication works --- -If you set up a slave, upon connection it sends a SYNC command. And -it doesn't matter if it's the first time it has connected or if it's -a reconnection. +If you set up a slave, upon connection it sends a SYNC command. It doesn't +matter if it's the first time it has connected or if it's a reconnection. -The master then starts background saving, and collects all new +The master then starts background saving, and starts to buffer all new commands received that will modify the dataset. When the background saving is complete, the master transfers the database file to the slave, which saves it on disk, and then loads it into memory. The master will -then send to the slave all accumulated commands, and all new commands -received from clients that will modify the dataset. This is done as a +then send to the slave all buffered commands. This is done as a stream of commands and is in the same format of the Redis protocol itself. You can try it yourself via telnet. Connect to the Redis port while the @@ -59,7 +60,7 @@ concurrent slave synchronization requests, it performs a single background save in order to serve all of them. When a master and a slave reconnects after the link went down, a full resync -is always performed. However starting with Redis 2.8, a partial resynchronization +is always performed. However, starting with Redis 2.8, a partial resynchronization is also possible. Partial resynchronization @@ -69,20 +70,17 @@ Starting with Redis 2.8, master and slave are usually able to continue the replication process without requiring a full resynchronization after the replication link went down. -This works using an in-memory backlog of the replication stream in the -master side. Also the master and all the slaves agree on a *replication +This works by creating an in-memory backlog of the replication stream on the +master side. The master and all the slaves agree on a *replication offset* and a *master run id*, so when the link goes down, the slave will -reconnect and ask the master to continue the replication, assuming the +reconnect and ask the master to continue the replication. Assuming the master run id is still the same, and that the offset specified is available -in the replication backlog. - -If the conditions are met, the master just sends the part of the replication -stream the master missed, and the replication continues. -Otherwise a full resynchronization is performed as in the past versions of -Redis. +in the replication backlog, replication will resume from the point where it left off. +If either of these conditions are unmet, a full resynchronization is performed +(which is the normal pre-2.8 behavior). The new partial resynchronization feature uses the `PSYNC` command internally, -while the old implementation used the `SYNC` command, however a Redis 2.8 +while the old implementation uses the `SYNC` command. Note that a Redis 2.8 slave is able to detect if the server it is talking with does not support `PSYNC`, and will use `SYNC` instead. @@ -98,19 +96,19 @@ Of course you need to replace 192.168.1.1 6379 with your master IP address (or hostname) and port. Alternatively, you can call the `SLAVEOF` command and the master host will start a sync with the slave. -There are also a few parameters in order to tune the replication backlog taken +There are also a few parameters for tuning the replication backlog taken in memory by the master to perform the partial resynchronization. See the example `redis.conf` shipped with the Redis distribution for more information. -Read only slave +Read-only slave --- -Since Redis 2.6 slaves support a read-only mode that is enabled by default. +Since Redis 2.6, slaves support a read-only mode that is enabled by default. This behavior is controlled by the `slave-read-only` option in the redis.conf file, and can be enabled and disabled at runtime using `CONFIG SET`. -Read only slaves will reject all the write commands, so that it is not possible to write to a slave because of a mistake. This does not mean that the feature is conceived to expose a slave instance to the internet or more generally to a network where untrusted clients exist, because administrative commands like `DEBUG` or `CONFIG` are still enabled. However security of read-only instances can be improved disabling commands in redis.conf using the `rename-command` directive. +Read-only slaves will reject all write commands, so that it is not possible to write to a slave because of a mistake. This does not mean that the feature is intended to expose a slave instance to the internet or more generally to a network where untrusted clients exist, because administrative commands like `DEBUG` or `CONFIG` are still enabled. However, security of read-only instances can be improved by disabling commands in redis.conf using the `rename-command` directive. -You may wonder why it is possible to revert the default and have slave instances that can be target of write operations. The reason is that while this writes will be discarded if the slave and the master will resynchronize, or if the slave is restarted, often there is ephemeral data that is unimportant that can be stored into slaves. For instance clients may take information about reachability of master in the slave instance to coordinate a fail over strategy. +You may wonder why it is possible to revert the read-only setting and have slave instances that can be target of write operations. The reason is that these writes will be discarded if the slave and the master resynchronize, or if the slave is restarted. Often there is ephemeral data that is unimportant that can be stored on read-only slaves. For instance, clients may take information about master reachability to coordinate a failover strategy. Setting a slave to authenticate to a master --- @@ -129,12 +127,12 @@ To set it permanently, add this to your config file: Allow writes only with N attached replicas --- -Starting with Redis 2.8 it is possible to configure a Redis master in order to +Starting with Redis 2.8, it is possible to configure a Redis master to accept write queries only if at least N slaves are currently connected to the -master, in order to improve data safety. +master. -However because Redis uses asynchronous replication it is not possible to ensure -the write actually received a given write, so there is always a window for data +However, because Redis uses asynchronous replication it is not possible to ensure +the slave actually received a given write, so there is always a window for data loss. This is how the feature works: @@ -154,5 +152,5 @@ There are two configuration parameters for this feature: * min-slaves-to-write `` * min-slaves-max-lag `` -For more information please check the example `redis.conf` file shipped with the +For more information, please check the example `redis.conf` file shipped with the Redis source distribution. From 17924196017b266d98332d6477b0e3b70c521a4c Mon Sep 17 00:00:00 2001 From: xuyu Date: Wed, 8 Jan 2014 16:22:34 +0800 Subject: [PATCH 0035/2497] Update clients.json add a new redis client for golang --- clients.json | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/clients.json b/clients.json index e18661b340..22bc868b68 100644 --- a/clients.json +++ b/clients.json @@ -145,6 +145,15 @@ "authors": ["xiam"], "active": true }, + + { + "name": "goredis", + "language": "Go", + "repository": "https://github.com/xuyu/goredis", + "description": "A redis client for golang with full features", + "authors": ["xuyu"], + "active": true + }, { "name": "hedis", From c4b896b57a7feb4ecd7c387bf99f7650f741ccb3 Mon Sep 17 00:00:00 2001 From: Jon Forrest Date: Thu, 9 Jan 2014 16:55:50 -0800 Subject: [PATCH 0036/2497] Initial changes. Nothing major. --- topics/twitter-clone.md | 78 ++++++++++++++++++++--------------------- 1 file changed, 39 insertions(+), 39 deletions(-) diff --git a/topics/twitter-clone.md b/topics/twitter-clone.md index e21e189f11..194b1084f0 100644 --- a/topics/twitter-clone.md +++ b/topics/twitter-clone.md @@ -1,36 +1,36 @@ A case study: Design and implementation of a simple Twitter clone using only the Redis key-value store as database and PHP === -In this article I'll explain the design and the implementation of a [simple clone of Twitter](http://retwis.antirez.com) written using PHP and Redis as only database. The programming community uses to look at key-value stores like special databases that can't be used as drop in replacement for a relational database for the development of web applications. This article will try to prove the contrary. +In this article I'll describe the design and the implementation of a [simple clone of Twitter](http://retwis.antirez.com) written using PHP with Redis as the only database. The programming community traditionally considered key-value stores as special databases that couldn't be used as drop in replacements for a relational database for the development of web applications. This article will try to correct this impression. -Our Twitter clone, [called Retwis](http://retwis.antirez.com), is structurally simple, has very good performance, and can be distributed among N web servers and M Redis servers with very little effort. You can find the source code [here](http://code.google.com/p/redis/downloads/list). +Our Twitter clone, called [Retwis](http://retwis.antirez.com), is structurally simple, has very good performance, and can be distributed among any number of web and Redis servers with very little effort. You can find the source code [here](http://code.google.com/p/redis/downloads/list). -We use PHP for the example since it can be read by everybody. The same (or... much better) results can be obtained using Ruby, Python, Erlang, and so on. +I use PHP for the example since it can be read by everybody. The same (or... much better) results can be obtained using Ruby, Python, Erlang, and so on. **Note:** [Retwis-RB](http://retwisrb.danlucraft.com/) is a port of Retwis to Ruby and Sinatra written by Daniel Lucraft! With full source code included of -course, the Git repository is linked in the footer of the web page. The rest -of this article targets PHP, but Ruby programmers can also check the other -source code, it conceptually very similar. +course, a link to its Git repository appears in the footer of this article. The rest +of this article targets PHP, but Ruby programmers can also check the Retwis-RB +source code since it's conceptually very similar. **Note:** [Retwis-J](http://retwisj.cloudfoundry.com/) is a port of Retwis to -Java, using the Spring Data Framework, written by [Costin Leau](http://twitter.com/costinl). The source code +Java, using the Spring Data Framework, written by [Costin Leau](http://twitter.com/costinl). Its source code can be found on -[GitHub](https://github.com/SpringSource/spring-data-keyvalue-examples) and +[GitHub](https://github.com/SpringSource/spring-data-keyvalue-examples), and there is comprehensive documentation available at [springsource.org](http://j.mp/eo6z6I). -Key-value stores basics +Key-value store basics --- -The essence of a key-value store is the ability to store some data, called _value_, inside a key. This data can later be retrieved only if we know the exact key used to store it. There is no way to search something by value. In a sense, it is like a very large hash/dictionary, but it is persistent, i.e. when your application ends, the data doesn't go away. So for example I can use the command SET to store the value *bar* at key *foo*: +The essence of a key-value store is the ability to store some data, called a _value_, inside a key. The value can be retrieved later only if we know the exact key it was stored in. There is no way to search for something by value. In a sense, it is like a very large hash/dictionary, but it is persistent, i.e. when your application ends, the data doesn't go away. So, for example, I can use the command SET to store the value *bar* in the key *foo*: SET foo bar -Redis will store our data permanently, so we can later ask for "_What is the value stored at key foo?_" and Redis will reply with *bar*: +Redis stores data permanently, so if I later ask "_What is the value stored in key foo?_" Redis will reply with *bar*: GET foo => bar -Other common operations provided by key-value stores are DEL used to delete a given key, and the associated value, SET-if-not-exists (called SETNX on Redis) that sets a key only if it does not already exist, and INCR that is able to atomically increment a number stored at a given key: +Other common operations provided by key-value stores are DEL, to delete a given key and its associated value, SET-if-not-exists (called SETNX on Redis), to assign a value to a key only if the key does not already exist, and INCR, to atomically increment a number stored in a given key: SET foo 10 INCR foo => 11 @@ -40,13 +40,13 @@ Other common operations provided by key-value stores are DEL used to delete a gi Atomic operations --- -So far it should be pretty simple, but there is something special about INCR. Think about this, why to provide such an operation if we can do it ourselves with a bit of code? After all it is as simple as: +There is something special about INCR. Think about why Redis provides such an operation if we can do it ourselves with a bit of code? After all, it is as simple as: x = GET foo x = x + 1 SET foo x -The problem is that doing the increment this way will work as long as there is only a client working with the value _x_ at a time. See what happens if two computers are accessing this data at the same time: +The problem is that incrementing this way will work as long as there is only one client working with the key _foo_ at one time. See what happens if two clients are accessing this key at the same time: x = GET foo (yields 10) y = GET foo (yields 10) @@ -55,34 +55,34 @@ The problem is that doing the increment this way will work as long as there is o SET foo x (foo is now 11) SET foo y (foo is now 11) -Something is wrong with that! We incremented the value two times, but instead to go from 10 to 12 our key holds 11. This is because the INCR operation done with `GET / increment / SET` *is not an atomic operation*. Instead the INCR provided by Redis, Memcached, ..., are atomic implementations, the server will take care to protect the get-increment-set for all the time needed to complete in order to prevent simultaneous accesses. +Something is wrong! We incremented the value two times, but instead of going from 10 to 12, our key holds 11. This is because the increment done with `GET / increment / SET` *is not an atomic operation*. Instead the INCR provided by Redis, Memcached, ..., are atomic implementations, and the server will take care of protecting the key for all the time needed to complete the increment in order to prevent simultaneous accesses. -What makes Redis different from other key-value stores is that it provides more operations similar to INCR that can be used together to model complex problems. This is why you can use Redis to write whole web applications without using an SQL database and without going crazy. +What makes Redis different from other key-value stores is that it provides other operations similar to INCR that can be used to model complex problems. This is why you can use Redis to write whole web applications without using an SQL database and without going crazy. Beyond key-value stores --- -In this section we will see what Redis features we need to build our Twitter clone. The first thing to know is that Redis values can be more than strings. Redis supports Lists and Sets as values, and there are atomic operations to operate against this more advanced values so we are safe even with multiple accesses against the same key. Let's start from Lists: +In this section we will see which Redis features we need to build our Twitter clone. The first thing to know is that Redis values can be more than strings. Redis supports Lists and Sets as values, and there are atomic operations to operate on them so we are safe even with multiple accesses of the same key. Let's start with Lists: LPUSH mylist a (now mylist holds one element list 'a') LPUSH mylist b (now mylist holds 'b,a') LPUSH mylist c (now mylist holds 'c,b,a') -LPUSH means _Left Push_, that is, add an element to the left (or to the head) of the list stored at _mylist_. If the key _mylist_ does not exist it is automatically created by Redis as an empty list before the PUSH operation. As you can imagine, there is also the RPUSH operation that adds the element on the right of the list (on the tail). +LPUSH means _Left Push_, that is, add an element to the left (or to the head) of the list stored in _mylist_. If the key _mylist_ does not exist it is automatically created by Redis as an empty list before the PUSH operation. As you can imagine, there is also an RPUSH operation that adds the element to the right of the list (on the tail). -This is very useful for our Twitter clone. Updates of users can be stored into a list stored at `username:updates` for instance. There are operations to get data or information from Lists of course. For instance LRANGE returns a range of the list, or the whole list. +This is very useful for our Twitter clone. User updates can be added to a list stored in `username:updates`, for instance. There are operations to get data from Lists, of course. For instance, LRANGE returns a range of the list, or the whole list. LRANGE mylist 0 1 => c,b -LRANGE uses zero-based indexes, that is the first element is 0, the second 1, and so on. The command arguments are `LRANGE key first-index last-index`. The _last index_ argument can be negative, with a special meaning: -1 is the last element of the list, -2 the penultimate, and so on. So in order to get the whole list we can use: +LRANGE uses zero-based indexes, that is the first element is 0, the second 1, and so on. The command arguments are `LRANGE key first-index last-index`. The _last-index_ argument can be negative, with a special meaning: -1 is the last element of the list, -2 the penultimate, and so on. So in order to get the whole list we can use: LRANGE mylist 0 -1 => c,b,a -Other important operations are LLEN that returns the length of the list, and LTRIM that is like LRANGE but instead of returning the specified range *trims* the list, so it is like _Get range from mylist, Set this range as new value_ but atomic. We will use only this List operations, but make sure to check the [Redis documentation](http://code.google.com/p/redis/wiki/README) to discover all the List operations supported by Redis. +Other important operations are LLEN that returns the length of the list, and LTRIM that is like LRANGE but instead of returning the specified range *trims* the list, so it is like _Get range from mylist, Set this range as new value_ but atomically. We will use only these List operations, but make sure to check the [Redis documentation](http://code.google.com/p/redis/wiki/README) to discover all the List operations supported by Redis. The set data type --- -There is more than Lists, Redis also supports Sets, that are unsorted collection of elements. It is possible to add, remove, and test for existence of members, and perform intersection between different Sets. Of course it is possible to ask for the list or the number of elements of a Set. Some example will make it more clear. Keep in mind that SADD is the _add to set_ operation, SREM is the _remove from set_ operation, _sismember_ is the _test if it is a member_ operation, and SINTER is _perform intersection_ operation. Other operations are SCARD that is used to get the cardinality (the number of elements) of a Set, and SMEMBERS that will return all the members of a Set. +There is more than Lists. Redis also supports Sets, which are unsorted collection of elements. It is possible to add, remove, and test for existence of members, and perform intersection between different Sets. Of course it is possible to ask for the list or the number of elements of a Set. Some example will make it more clear. Keep in mind that SADD is the _add to set_ operation, SREM is the _remove from set_ operation, _sismember_ is the _test if it is a member_ operation, and SINTER is _perform intersection_ operation. Other operations are SCARD that is used to get the cardinality (the number of elements) of a Set, and SMEMBERS that will return all the members of a Set. SADD myset a SADD myset b @@ -103,21 +103,21 @@ SINTER can return the intersection between Sets but it is not limited to two set SISMEMBER myset foo => 1 SISMEMBER myset notamember => 0 -Okay, I think we are ready to start coding! +Okay, we are ready to start coding! Prerequisites --- -If you didn't download it already please grab the [source code of Retwis](http://code.google.com/p/redis/downloads/list). It's a simple tar.gz file with a few of PHP files inside. The implementation is very simple. You will find the PHP library client inside (redis.php) that is used to talk with the Redis server from PHP. This library was written by [Ludovico Magnocavallo](http://qix.it) and you are free to reuse this in your own projects, but for updated version of the library please download the Redis distribution. (Note: there are now better PHP libraries available, check our [clients page](/clients). +If you haven't downloaded the [Retwis source code](http://code.google.com/p/redis/downloads/list) already please grab it now. It's a simple tar.gz file containing a few PHP files. The implementation is very simple. You will find the PHP library client inside (redis.php) that is used to talk with the Redis server from PHP. This library was written by [Ludovico Magnocavallo](http://qix.it) and you are free to reuse this in your own projects, but for an updated version of the library please download the Redis distribution. (Note: there are now better PHP libraries available, check our [clients page](/clients). -Another thing you probably want is a working Redis server. Just get the source, compile with make, and run with ./redis-server and you are done. No configuration is required at all in order to play with it or to run Retwis in your computer. +Another thing you probably want is a working Redis server. Just get the source, build with make, run with ./redis-server and you're done. No configuration is required at all in order to play with or run Retwis in your computer. Data layout --- -Working with a relational database this is the stage were the database layout should be produced in form of tables, indexes, and so on. We don't have tables, so what should be designed? We need to identify what keys are needed to represent our objects and what kind of values this keys need to hold. +When working with a relational database, this is when the database schema should be designed so that we'd know the tables, indexes, and so on that the database will contain. We don't have tables, so what should be designed? We need to identify what keys are needed to represent our objects and what kind of values this keys need to hold. -Let's start from Users. We need to represent this users of course, with the username, userid, password, followers and following users, and so on. The first question is, what should identify a user inside our system? The username can be a good idea since it is unique, but it is also too big, and we want to stay low on memory. So like if our DB was a relational one we can associate an unique ID to every user. Every other reference to this user will be done by id. That's very simple to do, because we have our atomic INCR operation! When we create a new user we can do something like this, assuming the user is called "antirez": +Let's start with Users. We need to represent the users, of course, with their username, userid, password, followers, following users, and so on. The first question is, how should we identify a user? The username can be a good idea since it is unique, but it is also too big, and we want to stay low on memory. So like if our DB was a relational one we can associate an unique ID to every user. Every other reference to this user will be done by id. That's very simple to do, because we have our atomic INCR operation! When we create a new user we can do something like this, assuming the user is called "antirez": INCR global:nextUserId => 1000 SET uid:1000:username antirez @@ -130,10 +130,10 @@ Besides the fields already defined, we need some more stuff in order to fully de This may appear strange at first, but remember that we are only able to access data by key! It's not possible to tell Redis to return the key that holds a specific value. This is also *our strength*, this new paradigm is forcing us to organize the data so that everything is accessible by _primary key_, speaking with relational DBs language. -Following, followers and updates +Following, followers, and updates --- -There is another central need in our system. Every user has followers users and following users. We have a perfect data structure for this work! That is... Sets. So let's add this two new fields to our schema: +There is another central need in our system. Every user has users that they follow and users who follow them. We have a perfect data structure for this! That is... Sets. So let's add these two new fields to our schema: uid:1000:followers => Set of uids of all the followers users uid:1000:following => Set of uids of all the following users @@ -215,9 +215,9 @@ The code is simpler than the description, possibly: return true; } -`loadUserInfo` as separated function is an overkill for our application, but it's a good template for a complex application. The only thing it's missing from all the authentication is the logout. What we do on logout? That's simple, we'll just change the random string in uid:1000:auth, remove the old auth:`` and add a new auth:``. +`loadUserInfo` as a separate function is overkill for our application, but it's a good approach in a complex application. The only thing that's missing from all the authentication is the logout. What do we do on logout? That's simple, we'll just change the random string in uid:1000:auth, remove the old auth:`` and add a new auth:``. -*Important:* the logout procedure explains why we don't just authenticate the user after the lookup of auth:``, but double check it against uid:1000:auth. The true authentication string is the latter, the auth:`` is just an authentication key that may even be volatile, or if there are bugs in the program or a script gets interrupted we may even end with multiple auth:`` keys pointing to the same user id. The logout code is the following (logout.php): +*Important:* the logout procedure explains why we don't just authenticate the user after looking up auth:``, but double check it against uid:1000:auth. The true authentication string is the latter, the auth:`` is just an authentication key that may even be volatile, or if there are bugs in the program or a script gets interrupted we may even end with multiple auth:`` keys pointing to the same user id. The logout code is the following (logout.php): include("retwis.php"); @@ -242,12 +242,12 @@ That is just what we described and should be simple to understand. Updates --- -Updates, also known as posts, are even simpler. In order to create a new post on the database we do something like this: +Updates, also known as posts, are even simpler. In order to create a new post in the database we do something like this: INCR global:nextPostId => 10343 SET post:10343 "$owner_id|$time|I'm having fun with Retwis" -As you can see the user id and time of the post are stored directly inside the string, we don't need to lookup by time or user id in the example application so it is better to compact everything inside the post string. +As you can see, the user id and time of the post are stored directly inside the string, so we don't need to lookup by time or user id in the example application so it is better to compact everything inside the post string. After we create a post we obtain the post id. We need to LPUSH this post id in every user that's following the author of the post, and of course in the list of posts of the author. This is the file update.php that shows how this is performed: @@ -277,14 +277,14 @@ After we create a post we obtain the post id. We need to LPUSH this post id in e header("Location: index.php"); -The core of the function is the `foreach`. We get using SMEMBERS all the followers of the current user, then the loop will LPUSH the post against the uid:``:posts of every follower. +The core of the function is the `foreach` loop. We get using SMEMBERS all the followers of the current user, then the loop will LPUSH the post against the uid:``:posts of every follower. -Note that we also maintain a timeline with all the posts. In order to do so what is needed is just to LPUSH the post against global:timeline. Let's face it, do you start thinking it was a bit strange to have to sort things added in chronological order using ORDER BY with SQL? I think so indeed. +Note that we also maintain a timeline for all the posts. This requires just LPUSHing the post against global:timeline. Let's face it, do you start thinking it was a bit strange to have to sort things added in chronological order using ORDER BY with SQL? I think so indeed. Paginating updates --- -Now it should be pretty clear how we can user LRANGE in order to get ranges of posts, and render this posts on the screen. The code is simple: +Now it should be pretty clear how we can use LRANGE in order to get ranges of posts, and render these posts on the screen. The code is simple: function showPost($id) { $r = redisLink(); @@ -333,7 +333,7 @@ You can find the code that sets or removes a following/follower relation at foll Making it horizontally scalable --- -Gentle reader, if you reached this point you are already an hero, thank you. Before to talk about scaling horizontally it is worth to check the performances on a single server. Retwis is *amazingly fast*, without any kind of cache. On a very slow and loaded server, apache benchmark with 100 parallel clients issuing 100000 requests measured the average pageview to take 5 milliseconds. This means you can serve millions of users every day with just a single Linux box, and this one was monkey asses slow! Go figure with more recent hardware. +Gentle reader, if you reached this point you are already a hero. Thank you. Before talking about scaling horizontally it is worth checking the performances on a single server. Retwis is *amazingly fast*, without any kind of cache. On a very slow and loaded server, an apache benchmark with 100 parallel clients issuing 100000 requests measured the average pageview to take 5 milliseconds. This means you can serve millions of users every day with just a single Linux box, and this one was monkey ass slow! Go figure with more recent hardware. So, first of all, probably you will not need more than one server for a lot of applications, even when you have a lot of users. But let's assume we *are* Twitter and need to handle a huge amount of traffic. What to do? @@ -344,7 +344,7 @@ The first thing to do is to hash the key and issue the request on different serv server_id = crc32(key) % number_of_servers -This has a lot of problems since if you add one server you need to move too much keys and so on, but this is the general idea even if you use a better hashing scheme like consistent hashing. +This has a lot of problems since if you add one server you need to move too many keys and so on, but this is the general idea even if you use a better hashing scheme like consistent hashing. Ok, are key accesses distributed among the key space? Well, all the user data will be partitioned among different servers. There are no inter-keys operations used (like SINTER, otherwise you need to care that things you want to intersect will end in the same server. *This is why Redis unlike memcached does not force a specific hashing scheme, it's application specific*). Btw there are keys that are accessed more frequently. @@ -353,6 +353,6 @@ Special keys For example every time we post a new message, we *need* to increment the `global:nextPostId` key. How to fix this problem? A Single server will get a lot if increments. The simplest way to handle this is to have a dedicated server just for increments. This is probably an overkill btw unless you have really a lot of traffic. There is another trick. The ID does not really need to be an incremental number, but just *it needs to be unique*. So you can get a random string long enough to be unlikely (almost impossible, if it's md5-size) to collide, and you are done. We successfully eliminated our main problem to make it really horizontally scalable! -There is another one: global:timeline. There is no fix for this, if you need to take something in order you can split among different servers and *then merge* when you need to get the data back, or take it ordered and use a single key. Again if you really have so much posts per second, you can use a single server just for this. Remember that with commodity hardware Redis is able to handle 100000 writes for second, that's enough even for Twitter, I guess. +There is another one: global:timeline. There is no fix for this, if you need to take something in order you can split among different servers and *then merge* when you need to get the data back, or take it ordered and use a single key. Again if you really have so much posts per second, you can use a single server just for this. Remember that with commodity hardware Redis is able to handle 100000 writes per second. That's enough even for Twitter, I guess. Please feel free to use the comments below for questions and feedbacks. From 335c90deb8bdf2eeccfcbd24c044d437c4510cb4 Mon Sep 17 00:00:00 2001 From: Nikita Koksharov Date: Sat, 11 Jan 2014 07:15:23 -0800 Subject: [PATCH 0037/2497] Redisson entry added --- clients.json | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/clients.json b/clients.json index e18661b340..108bb3cdd5 100644 --- a/clients.json +++ b/clients.json @@ -176,6 +176,16 @@ "active": true }, + { + "name": "Redisson", + "language": "Java", + "repository": "https://github.com/mrniko/redisson", + "description": "distributed and scalable Java data structures on top of Redis server", + "authors": ["mrniko"], + "recommended": true, + "active": true + }, + { "name": "JRedis", "language": "Java", From 6c192be1997962d27189fe5db3e71239f89fd565 Mon Sep 17 00:00:00 2001 From: "Stuart P. Bentley" Date: Sat, 11 Jan 2014 17:02:05 -0800 Subject: [PATCH 0038/2497] Fix PTTL first version in ttl.md PTTL is first available in 2.6, not 2.8. --- commands/ttl.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands/ttl.md b/commands/ttl.md index 17055f4884..15821e1140 100644 --- a/commands/ttl.md +++ b/commands/ttl.md @@ -9,7 +9,7 @@ Starting with Redis 2.8 the return value in case of error changed: * The command returns `-2` if the key does not exist. * The command returns `-1` if the key exists but has no associated expire. -See also the `PTTL` command that returns the same information with milliseconds resolution (Only available in Redis 2.8 or greater). +See also the `PTTL` command that returns the same information with milliseconds resolution (Only available in Redis 2.6 or greater). @return From cf613d1359f87e450c66f8c72a1a00a2ff0a2f99 Mon Sep 17 00:00:00 2001 From: "Stuart P. Bentley" Date: Sat, 11 Jan 2014 17:03:42 -0800 Subject: [PATCH 0039/2497] Document 2.8+ negative value behavior in pttl.md I just got bitten by this behavior in my own code. --- commands/pttl.md | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/commands/pttl.md b/commands/pttl.md index a3d66431a1..4e0807971b 100644 --- a/commands/pttl.md +++ b/commands/pttl.md @@ -2,10 +2,16 @@ Like `TTL` this command returns the remaining time to live of a key that has an expire set, with the sole difference that `TTL` returns the amount of remaining time in seconds while `PTTL` returns it in milliseconds. +In Redis 2.6 or older the command returns `-1` if the key does not exist or if the key exist but has no associated expire. + +Starting with Redis 2.8 the return value in case of error changed: + +* The command returns `-2` if the key does not exist. +* The command returns `-1` if the key exists but has no associated expire. + @return -@integer-reply: Time to live in milliseconds or `-1` when `key` does not exist -or does not have a timeout. +@integer-reply: TTL in milliseconds, or a negative value in order to signal an error (see the description above). @examples From 584dd95882738ee2762040bb6dc2bd3bab20ad88 Mon Sep 17 00:00:00 2001 From: antirez Date: Mon, 13 Jan 2014 16:35:55 +0100 Subject: [PATCH 0040/2497] SENTINEL runtime config API documented. --- topics/sentinel.md | 47 +++++++++++++++++++++++++++++----------------- 1 file changed, 30 insertions(+), 17 deletions(-) diff --git a/topics/sentinel.md b/topics/sentinel.md index 0f0a3f6845..21fb99b2c4 100644 --- a/topics/sentinel.md +++ b/topics/sentinel.md @@ -1,8 +1,6 @@ Redis Sentinel Documentation === -**Note:** this page documents the *new* Sentinel implementation that entered the Github repository 21th of November. The old Sentinel implementation is [documented here](http://redis.io/topics/sentinel-old), however using the old implementation is discouraged. - Redis Sentinel is a system designed to help managing Redis instances. It performs the following three tasks: @@ -25,19 +23,14 @@ executable. describes how to use what we is already implemented, and may change as the Sentinel implementation evolves. -Redis Sentinel is compatible with Redis 2.4.16 or greater, and Redis 2.6.0 or greater, however it works better if used against Redis instances version 2.8.0 or greater. +Redis Sentinel is compatible with Redis 2.4.16 or greater, and Redis 2.6.0 or greater, however it works better if used with Redis instances version 2.8.0 or greater. Obtaining Sentinel --- -Currently Sentinel is part of the Redis *unstable* branch at github. -To compile it you need to clone the *unstable* branch and compile Redis. -You'll see a `redis-sentinel` executable in your `src` directory. - -Alternatively you can use directly the `redis-server` executable itself, -starting it in Sentinel mode as specified in the next paragraph. +Sentinel is currently developed in the *unstable* branch of the Redis source code at Github. However an update copy of Sentinel is provided with every patch release of Redis 2.8. -An updated version of Sentinel is also available as part of the Redis 2.8.0 release. +The simplest way to use Sentinel is to download the latest verison of Redis 2.8 or to compile Redis latest commit in the *unstable* branch at Github. Running Sentinel --- @@ -80,7 +73,7 @@ that is at address 127.0.0.1 and port 6379, with a level of agreement needed to detect this master as failing of 2 sentinels (if the agreement is not reached the automatic failover does not start). -However note that whatever the agreement you specify to detect an instance as not working, a Sentinel requires **the vote from the majority** of the known Sentinels in the system in order to start a failover and reserve a given *configuration Epoch* (that is a version to attach to a new master configuration). +However note that whatever the agreement you specify to detect an instance as not working, a Sentinel requires **the vote from the majority** of the known Sentinels in the system in order to start a failover and obtain a new *configuration Epoch* to assign to the new configuraiton afte the failiver. In other words **Sentinel is not able to perform the failover if only a minority of the Sentinel processes are working**. @@ -112,6 +105,8 @@ The other options are described in the rest of this document and documented in the example sentinel.conf file shipped with the Redis distribution. +All the configuration parameters can be modified at runtime using the `SENTINEL` command. See the **Reconfiguring Sentinel at runtime** section for more information. + SDOWN and ODOWN --- @@ -204,12 +199,30 @@ Sentinel commands The following is a list of accepted commands: -* **PING** this command simply returns PONG. -* **SENTINEL masters** show a list of monitored masters and their state. -* **SENTINEL slaves ``** show a list of slaves for this master, and their state. -* **SENTINEL get-master-addr-by-name ``** return the ip and port number of the master with that name. If a failover is in progress or terminated successfully for this master it returns the address and port of the promoted slave. -* **SENTINEL reset ``** this command will reset all the masters with matching name. The pattern argument is a glob-style pattern. The reset process clears any previous state in a master (including a failover in progress), and removes every slave and sentinel already discovered and associated with the master. -* **SENTINEL failover ``** force a failover as if the master was not reachable, and without asking for agreement to other Sentinels (however a new version of the configuration will be published so that the other Sentinels will update their configurations). +* **PING** This command simply returns PONG. +* **SENTINEL masters** Show a list of monitored masters and their state. +* **SENTINEL master ``** Show the state and info of the specified master. +* **SENTINEL slaves ``** Show a list of slaves for this master, and their state. +* **SENTINEL get-master-addr-by-name ``** Return the ip and port number of the master with that name. If a failover is in progress or terminated successfully for this master it returns the address and port of the promoted slave. +* **SENTINEL reset ``** This command will reset all the masters with matching name. The pattern argument is a glob-style pattern. The reset process clears any previous state in a master (including a failover in progress), and removes every slave and sentinel already discovered and associated with the master. +* **SENTINEL failover ``** Force a failover as if the master was not reachable, and without asking for agreement to other Sentinels (however a new version of the configuration will be published so that the other Sentinels will update their configurations). + +Reconfiguring Sentinel at Runtime +--- + +Starting with Redis version 2.8.4, Sentinel provides an API in order to add, remove, or change the configuration of a given master. Note that if you have multiple sentinels you should apply the changes to all to your instances for Redis Sentinel to work properly. This means that changing the configuration of a single Sentinel does not automatically propagates the changes to the other Sentinels in the network. + +The following is a list of `SENTINEL` sub commands used in order to update the configuration of a Sentinel instance. + +* **SENTINEL MONITOR `` `` `` ``** This command tells the Sentinel to start monitoring a new master with the specified name, ip, port, and quorum. It is identical to the `sentinel monitor` configuration directive in `sentinel.conf` configuration file, with the difference that you can't use an hostname in as `ip`, but you need to provide an IPv4 or IPv6 address. +* **SENTINEL REMOVE ``** is used in order to remove the specified master: the master will no longer be monitored, and will totally be removed from the internal state of the Sentinel, so it will no longer listed by `SENTINEL masters` and so forth. +* **SENTINEL SET `` `